Merge "msm: ipa3: fix the Use-after-free in ipa3_dma_destroy"
diff --git a/Documentation/arm/msm/remote_debug_drv.txt b/Documentation/arm/msm/remote_debug_drv.txt
new file mode 100644
index 0000000..13a35f4
--- /dev/null
+++ b/Documentation/arm/msm/remote_debug_drv.txt
@@ -0,0 +1,468 @@
+Introduction
+============
+
+The goal of this debug feature is to provide a reliable, responsive,
+accurate and secure debug capability to developers interested in
+debugging MSM subsystem processor images without the use of a hardware
+debugger.
+
+The Debug Agent along with the Remote Debug Driver implements a shared
+memory based transport mechanism that allows for a debugger (ex. GDB)
+running on a host PC to communicate with a remote stub running on
+peripheral subsystems such as the ADSP, MODEM etc.
+
+The diagram below depicts end to end the components involved to
+support remote debugging:
+
+
+: :
+: HOST (PC) : MSM
+: ,--------, : ,-------,
+: | | : | Debug | ,--------,
+: |Debugger|<--:-->| Agent | | Remote |
+: | | : | App | +----->| Debug |
+: `--------` : |-------| ,--------, | | Stub |
+: : | Remote| | |<---+ `--------`
+: : | Debug |<-->|--------|
+: : | Driver| | |<---+ ,--------,
+: : `-------` `--------` | | Remote |
+: : LA Shared +----->| Debug |
+: : Memory | Stub |
+: : `--------`
+: : Peripheral Subsystems
+: : (ADSP, MODEM, ...)
+
+
+Debugger: Debugger application running on the host PC that
+ communicates with the remote stub.
+ Examples: GDB, LLDB
+
+Debug Agent: Software that runs on the Linux Android platform
+ that provides connectivity from the MSM to the
+ host PC. This involves two portions:
+ 1) User mode Debug Agent application that discovers
+ processes running on the subsystems and creates
+ TCP/IP sockets for the host to connect to. In addition
+ to this, it creates an info (or meta) port that
+ users can connect to discover the various
+ processes and their corresponding debug ports.
+
+Remote Debug A character based driver that the Debug
+Driver: Agent uses to transport the payload received from the
+ host to the debug stub running on the subsystem
+ processor over shared memory and vice versa.
+
+Shared Memory: Shared memory from the SMEM pool that is accessible
+ from the Applications Processor (AP) and the
+ subsystem processors.
+
+Remote Debug Privileged code that runs in the kernels of the
+Stub: subsystem processors that receives debug commands
+ from the debugger running on the host and
+ acts on these commands. These commands include reading
+ and writing to registers and memory belonging to the
+ subsystem's address space, setting breakpoints,
+ single stepping etc.
+
+Hardware description
+====================
+
+The Remote Debug Driver interfaces with the Remote Debug stubs
+running on the subsystem processors and does not drive or
+manage any hardware resources.
+
+Software description
+====================
+
+The debugger and the remote stubs use Remote Serial Protocol (RSP)
+to communicate with each other. This is widely used protocol by both
+software and hardware debuggers. RSP is an ASCII based protocol
+and used when it is not possible to run GDB server on the target under
+debug.
+
+The Debug Agent application along with the Remote Debug Driver
+is responsible for establishing a bi-directional connection from
+the debugger application running on the host to the remote debug
+stub running on a subsystem. The Debug Agent establishes connectivity
+to the host PC via TCP/IP sockets.
+
+This feature uses ADB port forwarding to establish connectivity
+between the debugger running on the host and the target under debug.
+
+Please note the Debug Agent does not expose HLOS memory to the
+remote subsystem processors.
+
+Design
+======
+
+Here is the overall flow:
+
+1) When the Debug Agent application starts up, it opens up a shared memory
+based transport channel to the various subsystem processor images.
+
+2) The Debug Agent application sends messages across to the remote stubs
+to discover the various processes that are running on the subsystem and
+creates debug sockets for each of them.
+
+3) Whenever a process running on a subsystem exits, the Debug Agent
+is notified by the stub so that the debug port and other resources
+can be reclaimed.
+
+4) The Debug Agent uses the services of the Remote Debug Driver to
+transport payload from the host debugger to the remote stub and vice versa.
+
+5) Communication between the Remote Debug Driver and the Remote Debug stub
+running on the subsystem processor is done over shared memory (see figure).
+SMEM services are used to allocate the shared memory that will
+be readable and writeable by the AP and the subsystem image under debug.
+
+A separate SMEM allocation takes place for each subsystem processor
+involved in remote debugging. The remote stub running on each of the
+subsystems allocates a SMEM buffer using a unique identifier so that both
+the AP and subsystem get the same physical block of memory. It should be
+noted that subsystem images can be restarted at any time.
+However, when a subsystem comes back up, its stub uses the same unique
+SMEM identifier to allocate the SMEM block. This would not result in a
+new allocation rather the same block of memory in the first bootup instance
+is provided back to the stub running on the subsystem.
+
+An 8KB chunk of shared memory is allocated and used for communication
+per subsystem. For multi-process capable subsystems, 16KB chunk of shared
+memory is allocated to allow for simultaneous debugging of more than one
+process running on a single subsystem.
+
+The shared memory is used as a circular ring buffer in each direction.
+Thus we have a bi-directional shared memory channel between the AP
+and a subsystem. We call this SMQ. Each memory channel contains a header,
+data and a control mechanism that is used to synchronize read and write
+of data between the AP and the remote subsystem.
+
+Overall SMQ memory view:
+:
+: +------------------------------------------------+
+: | SMEM buffer |
+: |-----------------------+------------------------|
+: |Producer: LA | Producer: Remote |
+: |Consumer: Remote | subsystem |
+: | subsystem | Consumer: LA |
+: | | |
+: | Producer| Consumer|
+: +-----------------------+------------------------+
+: | |
+: | |
+: | +--------------------------------------+
+: | |
+: | |
+: v v
+: +--------------------------------------------------------------+
+: | Header | Data | Control |
+: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+: | | b | b | b | | S |n |n | | S |n |n | |
+: | Producer | l | l | l | | M |o |o | | M |o |o | |
+: | Ver | o | o | o | | Q |d |d | | Q |d |d | |
+: |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
+: | | k | k | k | | O | | | | I | | | |
+: | Consumer | | | | | u |0 |1 | | n |0 |1 | |
+: | Ver | 0 | 1 | 2 | | t | | | | | | | |
+: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+: | |
+: + |
+: |
+: +------------------------+
+: |
+: v
+: +----+----+----+----+
+: | SMQ Nodes |
+: |----|----|----|----|
+: Node # | 0 | 1 | 2 | ...|
+: |----|----|----|----|
+: Starting Block Index # | 0 | 3 | 8 | ...|
+: |----|----|----|----|
+: # of blocks | 3 | 5 | 1 | ...|
+: +----+----+----+----+
+:
+
+Header: Contains version numbers for software compatibility to ensure
+that both producers and consumers on the AP and subsystems know how to
+read from and write to the queue.
+Both the producer and consumer versions are 1.
+: +---------+-------------------+
+: | Size | Field |
+: +---------+-------------------+
+: | 1 byte | Producer Version |
+: +---------+-------------------+
+: | 1 byte | Consumer Version |
+: +---------+-------------------+
+
+
+Data: The data portion contains multiple blocks [0..N] of a fixed size.
+The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+Payload sent from the debug agent app is split (if necessary) and placed
+in these blocks. The first data block is placed at the next 8 byte aligned
+address after the header.
+
+The number of blocks for a given SMEM allocation is derived as follows:
+ Number of Blocks = ((Total Size - Alignment - Size of Header
+ - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+
+The producer maintains a private block map of each of these blocks to
+determine which of these blocks in the queue is available and which are free.
+
+Control:
+The control portion contains a list of nodes [0..N] where N is number
+of available data blocks. Each node identifies the data
+block indexes that contain a particular debug message to be transferred,
+and the number of blocks it took to hold the contents of the message.
+
+Each node has the following structure:
+: +---------+-------------------+
+: | Size | Field |
+: +---------+-------------------+
+: | 2 bytes |Staring Block Index|
+: +---------+-------------------+
+: | 2 bytes |Number of Blocks |
+: +---------+-------------------+
+
+The producer and the consumer update different parts of the control channel
+(SMQOut / SMQIn) respectively. Each of these control data structures contains
+information about the last node that was written / read, and the actual nodes
+that were written/read.
+
+SMQOut Structure (R/W by producer, R by consumer):
+: +---------+-------------------+
+: | Size | Field |
+: +---------+-------------------+
+: | 4 bytes | Magic Init Number |
+: +---------+-------------------+
+: | 4 bytes | Reset |
+: +---------+-------------------+
+: | 4 bytes | Last Sent Index |
+: +---------+-------------------+
+: | 4 bytes | Index Free Read |
+: +---------+-------------------+
+
+SMQIn Structure (R/W by consumer, R by producer):
+: +---------+-------------------+
+: | Size | Field |
+: +---------+-------------------+
+: | 4 bytes | Magic Init Number |
+: +---------+-------------------+
+: | 4 bytes | Reset ACK |
+: +---------+-------------------+
+: | 4 bytes | Last Read Index |
+: +---------+-------------------+
+: | 4 bytes | Index Free Write |
+: +---------+-------------------+
+
+Magic Init Number:
+Both SMQ Out and SMQ In initialize this field with a predefined magic
+number so as to make sure that both the consumer and producer blocks
+have fully initialized and have valid data in the shared memory control area.
+ Producer Magic #: 0xFF00FF01
+ Consumer Magic #: 0xFF00FF02
+
+SMQ Out's Last Sent Index and Index Free Read:
+ Only a producer can write to these indexes and they are updated whenever
+ there is new payload to be inserted into the SMQ in order to be sent to a
+ consumer.
+
+ The number of blocks required for the SMQ allocation is determined as:
+ (payload size + SM_BLOCKSIZE - 1) / SM_BLOCKSIZE
+
+ The private block map is searched for a large enough continuous set of blocks
+ and the user data is copied into the data blocks.
+
+ The starting index of the free block(s) is updated in the SMQOut's Last Sent
+ Index. This update keeps track of which index was last written to and the
+ producer uses it to determine where the the next allocation could be done.
+
+ Every allocation, a producer updates the Index Free Read from its
+ collaborating consumer's Index Free Write field (if they are unequal).
+ This index value indicates that the consumer has read all blocks associated
+ with allocation on the SMQ and that the producer can reuse these blocks for
+ subsquent allocations since this is a circular queue.
+
+ At cold boot and restart, these indexes are initialized to zero and all
+ blocks are marked as available for allocation.
+
+SMQ In's Last Read Index and Index Free Write:
+ These indexes are written to only by a consumer and are updated whenever
+ there is new payload to be read from the SMQ. The Last Read Index keeps
+ track of which index was last read by the consumer and using this, it
+ determines where the next read should be done.
+ After completing a read, Last Read Index is incremented to the
+ next block index. A consumer updates Index Free Write to the starting
+ index of an allocation whenever it has completed processing the blocks.
+ This is an optimization that can be used to prevent an additional copy
+ of data from the queue into a client's data buffer and the data in the queue
+ itself can be used.
+ Once Index Free Write is updated, the collaborating producer (on the next
+ data allocation) reads the updated Index Free Write value and it then
+ updates its corresponding SMQ Out's Index Free Read and marks the blocks
+ associated with that index as available for allocation. At cold boot and
+ restart, these indexes are initialized to zero.
+
+SMQ Out Reset# and SMQ In Reset ACK #:
+ Since subsystems can restart at anytime, the data blocks and control channel
+ can be in an inconsistent state when a producer or consumer comes up.
+ We use Reset and Reset ACK to manage this. At cold boot, the producer
+ initializes the Reset# to a known number ex. 1. Every other reset that the
+ producer undergoes, the Reset#1 is simply incremented by 1. All the producer
+ indexes are reset.
+ When the producer notifies the consumer of data availability, the consumer
+ reads the producers Reset # and copies that into its SMQ In Reset ACK#
+ field when they differ. When that occurs, the consumer resets its
+ indexes to 0.
+
+6) Asynchronous notifications between a producer and consumer are
+done using the SMP2P service which is interrupt based.
+
+Power Management
+================
+
+None
+
+SMP/multi-core
+==============
+
+The driver uses completion to wake up the Debug Agent client threads.
+
+Security
+========
+
+From the perspective of the subsystem, the AP is untrusted. The remote
+stubs consult the secure debug fuses to determine whether or not the
+remote debugging will be enabled at the subsystem.
+
+If the hardware debug fuses indicate that debugging is disabled, the
+remote stubs will not be functional on the subsystem. Writes to the
+queue will only be done if the driver sees that the remote stub has been
+initialized on the subsystem.
+
+Therefore even if any untrusted software running on the AP requests
+the services of the Remote Debug Driver and inject RSP messages
+into the shared memory buffer, these RSP messages will be discarded and
+an appropriate error code will be sent up to the invoking application.
+
+Performance
+===========
+
+During operation, the Remote Debug Driver copies RSP messages
+asynchronously sent from the host debugger to the remote stub and vice
+versa. The debug messages are ASCII based and relatively short
+(<25 bytes) and may once in a while go up to a maximum 700 bytes
+depending on the command the user requested. Thus we do not
+anticipate any major performance impact. Moreover, in a typical
+functional debug scenario performance should not be a concern.
+
+Interface
+=========
+
+The Remote Debug Driver is a character based device that manages
+a piece of shared memory that is used as a bi-directional
+single producer/consumer circular queue using a next fit allocator.
+Every subsystem, has its own shared memory buffer that is managed
+like a separate device.
+
+The driver distinguishes each subsystem processor's buffer by
+registering a node with a different minor number.
+
+For each subsystem that is supported, the driver exposes a user space
+interface through the following node:
+ - /dev/rdbg-<subsystem>
+ Ex. /dev/rdbg-adsp (for the ADSP subsystem)
+
+The standard open(), close(), read() and write() API set is
+implemented.
+
+The open() syscall will fail if a subsystem is not present or supported
+by the driver or a shared memory buffer cannot be allocated for the
+AP - subsystem communication. It will also fail if the subsytem has
+not initialized the queue on its side. Here are the error codes returned
+in case a call to open() fails:
+ENODEV - memory was not yet allocated for the device
+EEXIST - device is already opened
+ENOMEM - SMEM allocation failed
+ECOMM - Subsytem queue is not yet setup
+ENOMEM - Failure to initialize SMQ
+
+read() is a blocking call that will return with the number of bytes written
+by the subsystem whenever the subsystem sends it some payload. Here are the
+error codes returned in case a call to read() fails:
+EINVAL - Invalid input
+ENODEV - Device has not been opened yet
+ERESTARTSYS - call to wait_for_completion_interruptible is interrupted
+ENODATA - call to smq_receive failed
+
+write() attempts to send user mode payload out to the subsystem. It can fail
+if the SMQ is full. The number of bytes written is returned back to the user.
+Here are the error codes returned in case a call to write() fails:
+EINVAL - Invalid input
+ECOMM - SMQ send failed
+
+In the close() syscall, the control information state of the SMQ is
+initialized to zero thereby preventing any further communication between
+the AP and the subsystem. Here is the error code returned in case
+a call to close() fails:
+ENODEV - device wasn't opened/initialized
+
+The Remote Debug driver uses SMP2P for bi-directional AP to subsystem
+notification. Notifications are sent to indicate that there are new
+debug messages available for processing. Each subsystem that is
+supported will need to add a device tree entry per the usage
+specification of SMP2P driver.
+
+In case the remote stub becomes non operational or the security configuration
+on the subsystem does not permit debugging, any messages put in the SMQ will
+not be responded to. It is the responsibility of the Debug Agent app and the
+host debugger application such as GDB to timeout and notify the user of the
+non availability of remote debugging.
+
+Driver parameters
+=================
+
+None
+
+Config options
+==============
+
+The driver is configured with a device tree entry to map an SMP2P entry
+to the device. The SMP2P entry name used is "rdbg". Please see
+kernel\Documentation\arm\msm\msm_smp2p.txt for information about the
+device tree entry required to configure SMP2P.
+
+The driver uses the SMEM allocation type SMEM_LC_DEBUGGER to allocate memory
+for the queue that is used to share data with the subsystems.
+
+Dependencies
+============
+
+The Debug Agent driver requires services of SMEM to
+allocate shared memory buffers.
+
+SMP2P is used as a bi-directional notification
+mechanism between the AP and a subsystem processor.
+
+User space utilities
+====================
+
+This driver is meant to be used in conjunction with the user mode
+Remote Debug Agent application.
+
+Other
+=====
+
+None
+
+Known issues
+============
+For targets with an external subsystem, we cannot use
+shared memory for communication and would have to use the prevailing
+transport mechanisms that exists between the AP and the external subsystem.
+
+This driver cannot be leveraged for such targets.
+
+To do
+=====
+
+None
diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
new file mode 100644
index 0000000..e63d09b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt
@@ -0,0 +1,44 @@
+* Memory Share Driver (MEMSHARE)
+
+The Memshare driver implements a Kernel QMI service on the
+LA-APSS, which is responsible for providing contiguous physical
+memory to MPSS for use cases when the modem requires additional
+memory (e.g. GPS).
+
+Required properties for Memshare
+
+-Root Node-
+
+- compatible: Must be "qcom,memshare"
+
+Required properties for child nodes:
+
+- compatible: Must be "qcom,memshare-peripheral"
+
+- qcom,peripheral-size: Indicates the size (in bytes) required for that child.
+
+- qcom,client-id: Indicates the client id of the child node.
+
+- label: Indicates the peripheral information for the node. Should be one of
+ the following:
+ - modem /* Represent Modem Peripheral */
+ - adsp /* Represent ADSP Peripheral */
+ - wcnss /* Represent WCNSS Peripheral */
+
+Optional properties for child nodes:
+
+- qcom,allocate-boot-time: Indicates whether clients needs boot time memory allocation.
+
+Example:
+
+qcom,memshare {
+ compatible = "qcom,memshare";
+
+ qcom,client_1 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x200000>;
+ qcom,client-id = <0>;
+ qcom,allocate-boot-time;
+ label = "modem";
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index ae476d0..797dbcc 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -28,9 +28,6 @@
- qcom,default-level: The default low power level that a cluster is
programmed. The SPM of the corresponding device is configured at this
low power mode by default.
- - qcom,cpu: List of CPU phandles to identify the CPUs associated with
- this cluster. This property is required if and only if the cluster
- node contains a qcom,pm-cpu node.
qcom,pm-cluster contains qcom,pm-cluster-level nodes which identify
the various low power modes that the cluster can enter. The
@@ -103,9 +100,13 @@
power collapse (PC)
[Node bindings for qcom,pm-cpu]
-qcom,pm-cpu contains the low power modes that a cpu could enter. Currently it
-doesn't have any required properties and is a container for
-qcom,pm-cpu-levels.
+qcom,pm-cpu contains the low power modes that a cpu could enter and the CPUs
+that share the parameters.It contains the following properties.
+ - qcom,cpu: List of CPU phandles to identify the CPUs associated with
+ this cluster.
+ - qcom,pm-cpu-levels: The different low power modes that a CPU could
+ enter. The following section explains the required properties of this
+ node.
[Node bindings for qcom,pm-cpu-levels]
Required properties:
@@ -184,7 +185,6 @@
label = "a53";
qcom,spm-device-names = "l2";
qcom,default-level=<0>;
- qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cluster-level@0{
reg = <0>;
@@ -210,6 +210,7 @@
qcom,pm-cpu {
#address-cells = <1>;
#size-cells = <0>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cpu-level@0 {
reg = <0>;
qcom,spm-cpu-mode = "wfi";
@@ -255,7 +256,6 @@
label = "a57";
qcom,spm-device-names = "l2";
qcom,default-level=<0>;
- qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,pm-cluster-level@0{
reg = <0>;
@@ -281,6 +281,7 @@
qcom,pm-cpu {
#address-cells = <1>;
#size-cells = <0>;
+ qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,pm-cpu-level@0 {
reg = <0>;
qcom,spm-cpu-mode = "wfi";
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt
index 9e1d230..c5d052c 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt
@@ -17,6 +17,8 @@
by pil. Absence of this property indicates that
subsystem loading through pil voting is disabled for
that subsystem.
+-qcom,dynamic-wakeup-source: Boolean property to indicate that G-Link
+ transport supports dynamic wakeup source
Example:
qcom,ipc_router_modem_xprt {
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
index 0a5c0b3..5fb3e65 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
@@ -13,6 +13,7 @@
- qcom,irq-mask : the bitmask to trigger an interrupt.
- interrupt : the receiving interrupt line.
- mbox-desc-offset : offset of mailbox descriptor from start of the msgram.
+- priority : the priority of this mailbox compared to other mailboxes.
- #mbox-cells: Common mailbox binding property to identify the number of cells
required for the mailbox specifier, should be 1.
@@ -33,6 +34,7 @@
qcom,irq-mask = <0x1>;
interrupt = <0 389 1>;
mbox-desc-offset = <0x100>;
+ priority = <1>;
mbox-offset = <0x500>;
mbox-size = <0x400>;
#mbox-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 8a3e704..cc4c3cc 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -9,7 +9,7 @@
- compatible
Usage: required
Value type: <string>
- Definition: must be "qcom,clk-cpu-osm".
+ Definition: must be "qcom,clk-cpu-osm" or "qcom,clk-cpu-osm-v2".
- reg
Usage: required
@@ -85,24 +85,6 @@
by the OSM hardware for each supported DCVS setpoint
of the Performance cluster.
-- qcom,l3-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the L3 clock domain.
-
-- qcom,pwrcl-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the power cluster.
-
-- qcom,perfcl-min-cpr-vc-binX
- Usage: required
- Value type: <u32>
- Definition: First virtual corner which does not use PLL post-divider
- for the performance cluster.
-
- qcom,osm-no-tz
Usage: optional
Value type: <empty>
@@ -251,8 +233,6 @@
configuration registers for the Performance cluster.
The array must contain exactly three elements.
- corresponding CPRh device.
-
- qcom,perfcl-apcs-mem-acc-threshold-voltage
Usage: optional
Value type: <u32>
@@ -263,6 +243,27 @@
the MEM ACC threshold voltage specified for the
corresponding CPRh device.
+- qcom,l3-memacc-level-vc-binX
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the NOM and TURBO VCs for the L3 clock
+ on that BIN part.
+ The array must contain exactly two elements.
+
+- qcom,pwrcl-memacc-level-vc-binX
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the NOM and TURBO VCs for the Power
+ cluster clock on that BIN part.
+ The array must contain exactly two elements.
+
+- qcom,perfcl-memacc-level-vc-binX
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the NOM and TURBO VCs for the
+ Performance cluster clock on that BIN part.
+ The array must contain exactly two elements.
+
- qcom,apcs-cbc-addr
Usage: required
Value type: <prop-encoded-array>
@@ -501,9 +502,17 @@
< 1881600000 0x404c1462 0x00004e4e 0x2 21 >,
< 1958400000 0x404c1566 0x00005252 0x3 22 >;
- qcom,l3-min-cpr-vc-bin0 = <7>;
- qcom,pwrcl-min-cpr-vc-bin0 = <6>;
- qcom,perfcl-min-cpr-vc-bin0 = <7>;
+ qcom,l3-memacc-level-vc-bin0 = <7 63>;
+ qcom,l3-memacc-level-vc-bin1 = <7 9>;
+ qcom,l3-memacc-level-vc-bin2 = <7 9>;
+
+ qcom,pwrcl-memacc-level-vc-bin0 = <12 63>;
+ qcom,pwrcl-memacc-level-vc-bin1 = <12 17>;
+ qcom,pwrcl-memacc-level-vc-bin2 = <12 17>;
+
+ qcom,perfcl-memacc-level-vc-bin0 = <12 18>;
+ qcom,perfcl-memacc-level-vc-bin1 = <12 18>;
+ qcom,perfcl-memacc-level-vc-bin2 = <12 18>;
qcom,up-timer =
<1000 1000 1000>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
new file mode 100644
index 0000000..655bf89
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
@@ -0,0 +1,17 @@
+QMP debugfs client:
+-----------------
+
+QTI Messaging Protocol(QMP) debugfs client is an interface for clients to
+send data to the Always on processor using QMP.
+
+Required properties :
+- compatible : must be "qcom,debugfs-qmp-client"
+- mboxes : list of QMP mailbox phandle and channel identifier tuples.
+- mbox-names : names of the listed mboxes
+
+Example :
+ qcom,qmp-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt b/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt
new file mode 100644
index 0000000..442ad52
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt
@@ -0,0 +1,30 @@
+Qualcomm Technologies, Inc. Secure Execution Environment IPC Interrupt Bridge
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,qsee-ipc-irq-bridge";
+
+[Second level nodes]
+qcom,qsee-ipc-irq-subsystem
+Required properties:
+-qcom,dev-name: the bridge device name
+-interrupt: IPC interrupt line from remote subsystem to QSEE
+-label : The name of this subsystem.
+
+Required properties if interrupt type is IRQ_TYPE_LEVEL_HIGH[4]:
+-qcom,rx-irq-clr : the register to clear the level triggered rx interrupt
+-qcom,rx-irq-clr-mask : the bitmask to clear the rx interrupt
+
+Example:
+
+ qcom,qsee_ipc_irq_bridge {
+ compatible = "qcom,qsee-ipc-irq-bridge";
+
+ qcom,qsee-ipc-irq-spss {
+ qcom,rx-irq-clr = <0x1d08008 0x4>;
+ qcom,rx-irq-clr-mask = <0x2>;
+ qcom,dev-name = "qsee_ipc_irq_spss";
+ interrupts = <0 349 4>;
+ label = "spss";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
new file mode 100644
index 0000000..1114308
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt
@@ -0,0 +1,20 @@
+Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver
+
+Required properties:
+-compatible : Should be one of
+ To communicate with adsp
+ qcom,smp2pgpio_client_rdbg_2_in (inbound)
+ qcom,smp2pgpio_client_rdbg_2_out (outbound)
+ To communicate with modem
+ qcom,smp2pgpio_client_rdbg_1_in (inbound)
+ qcom,smp2pgpio_client_rdbg_1_out (outbound)
+ To communicate with cdsp
+ qcom,smp2pgpio_client_rdbg_5_in (inbound)
+ qcom,smp2pgpio_client_rdbg_5_out (outbound)
+-gpios : the relevant gpio pins of the entry.
+
+Example:
+ qcom,smp2pgpio_client_rdbg_2_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+ gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
index 231b8a3..37c48ad 100644
--- a/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
@@ -2,7 +2,7 @@
------------------------------------------------------------------------
Required properties :
-- compatible : must be "qcom,aop-qmp-clk"
+- compatible : must be "qcom,aop-qmp-clk-v1" or "qcom,aop-qmp-clk-v2".
- #clock-cells : must contain 1
- mboxes : list of QMP mailbox phandle and channel identifier tuples.
- mbox-names: List of identifier strings for each mailbox channel.
@@ -10,7 +10,7 @@
Example :
clock_qdss: qcom,aopclk {
- compatible = "qcom,aop-qmp-clk";
+ compatible = "qcom,aop-qmp-clk-v1";
#clock-cells = <1>;
mboxes = <&qmp_aop 0>;
mbox-names = "qdss_clk";
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
index f214c58..12676b7 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -4,7 +4,9 @@
Required properties :
- compatible : shall contain only one of the following:
"qcom,gpucc-sdm845",
- "qcom,gfxcc-sdm845"
+ "qcom,gpucc-sdm845-v2",
+ "qcom,gfxcc-sdm845",
+ "qcom,gfxcc-sdm845-v2"
- reg : shall contain base register offset and size.
- #clock-cells : shall contain 1.
diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt
index c801e848..700a8f7 100644
--- a/Documentation/devicetree/bindings/cnss/icnss.txt
+++ b/Documentation/devicetree/bindings/cnss/icnss.txt
@@ -28,6 +28,7 @@
- qcom,icnss-vadc: VADC handle for vph_pwr read APIs.
- qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs.
- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+ - qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory
Example:
@@ -54,6 +55,7 @@
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
qcom,wlan-msa-memory = <0x200000>;
+ qcom,wlan-msa-fixed-region = <&wlan_msa_mem>;
qcom,smmu-s1-bypass;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>;
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index c8077cb..051b315 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -20,6 +20,8 @@
- qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
- qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
- qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+ - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+ - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
Example:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 3c8a79a..fa27198 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -40,6 +40,9 @@
required. For other targets such as fsm, they do not perform
bus scaling. It is not required for those targets.
+ - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+ - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
+
Example:
qcom,qcrypto@fd444000 {
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 863a169..dd668cb 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -52,6 +52,9 @@
The number of offsets defined should reflect the
amount of mixers that can drive data to a panel
interface.
+- qcom,sde-dspp-top-off: Offset address for the dspp top block.
+ The offset is calculated from register "mdp_phys"
+ defined in reg property.
- qcom,sde-dspp-off: Array of offset addresses for the available dspp
blocks. These offsets are calculated from
register "mdp_phys" defined in reg property.
@@ -178,6 +181,9 @@
- qcom,sde-te-size: A u32 value indicates the te block address range.
- qcom,sde-te2-size: A u32 value indicates the te2 block address range.
- qcom,sde-dsc-off: A u32 offset indicates the dsc block offset on pingpong.
+- qcom,sde-dither-off: A u32 offset indicates the dither block offset on pingpong.
+- qcom,sde-dither-version: A u32 value indicates the dither block version.
+- qcom,sde-dither-size: A u32 value indicates the dither block address range.
- qcom,sde-sspp-vig-blocks: A node that lists the blocks inside the VIG hardware. The
block entries will contain the offset and version (if needed)
of each feature block. The presence of a block entry
@@ -204,6 +210,7 @@
e.g. qcom,sde-dspp-blocks
-- qcom,sde-dspp-pcc: offset and version of PCC hardware
-- qcom,sde-dspp-gc: offset and version of GC hardware
+ -- qcom,sde-dspp-igc: offset and version of IGC hardware
-- qcom,sde-dspp-hsic: offset and version of global PA adjustment
-- qcom,sde-dspp-memcolor: offset and version of PA memcolor hardware
-- qcom,sde-dspp-sixzone: offset and version of PA sixzone hardware
@@ -361,17 +368,23 @@
* Current values of src & dst are defined at
include/linux/msm-bus-board.h
+SMMU Subnodes:
+- smmu_sde_****: Child nodes representing sde smmu virtual
+ devices
+
Subnode properties:
-- compatible : Compatible name used in smmu v2.
- smmu_v2 names should be:
- "qcom,smmu-mdp-unsec" - smmu context bank device for
- unsecure mdp domain.
- "qcom,smmu-rot-unsec" - smmu context bank device for
- unsecure rotation domain.
- "qcom,smmu-mdp-sec" - smmu context bank device for
- secure mdp domain.
- "qcom,smmu-rot-sec" - smmu context bank device for
- secure rotation domain.
+- compatible: Compatible names used for smmu devices.
+ names should be:
+ "qcom,smmu_sde_unsec": smmu context bank device
+ for unsecure sde real time domain.
+ "qcom,smmu_sde_sec": smmu context bank device
+ for secure sde real time domain.
+ "qcom,smmu_sde_nrt_unsec": smmu context bank device
+ for unsecure sde non-real time domain.
+ "qcom,smmu_sde_nrt_sec": smmu context bank device
+ for secure sde non-real time domain.
+
+
Please refer to ../../interrupt-controller/interrupts.txt for a general
description of interrupt bindings.
@@ -415,6 +428,7 @@
0x00002600 0x00002800>;
qcom,sde-mixer-off = <0x00045000 0x00046000
0x00047000 0x0004a000>;
+ qcom,sde-dspp-top-off = <0x1300>;
qcom,sde-dspp-off = <0x00055000 0x00057000>;
qcom,sde-dspp-ad-off = <0x24000 0x22800>;
qcom,sde-dspp-ad-version = <0x00030000>;
@@ -472,6 +486,7 @@
qcom,sde-sspp-src-size = <0x100>;
qcom,sde-mixer-size = <0x100>;
qcom,sde-ctl-size = <0x100>;
+ qcom,sde-dspp-top-size = <0xc>;
qcom,sde-dspp-size = <0x100>;
qcom,sde-intf-size = <0x100>;
qcom,sde-dsc-size = <0x100>;
@@ -590,6 +605,7 @@
};
qcom,sde-dspp-blocks {
+ qcom,sde-dspp-igc = <0x0 0x00010000>;
qcom,sde-dspp-pcc = <0x1700 0x00010000>;
qcom,sde-dspp-gc = <0x17c0 0x00010000>;
qcom,sde-dspp-hsic = <0x0 0x00010000>;
@@ -673,4 +689,14 @@
<1 590 0 160000>,
<1 590 0 320000>;
};
+
+ smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+ compatible = "qcom,smmu_sde_unsec";
+ iommus = <&mmss_smmu 0>;
+ };
+
+ smmu_kms_sec: qcom,smmu_kms_sec_cb {
+ compatible = "qcom,smmu_sde_sec";
+ iommus = <&mmss_smmu 1>;
+ };
};
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 669997c..32c31af 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -144,10 +144,11 @@
0xff = default value.
- qcom,mdss-dsi-border-color: Defines the border color value if border is present.
0 = default value.
-- qcom,mdss-dsi-panel-jitter: An integer value defines the panel jitter timing for rsc
- backoff time. The jitter configurition causes the early
- wakeup if panel needs to adjust before vsync.
- Default jitter value is 5%. Max allowed value is 25%.
+- qcom,mdss-dsi-panel-jitter: Panel jitter value is expressed in terms of numerator
+ and denominator. It contains two u32 values - numerator
+ followed by denominator. The jitter configurition causes
+ the early wakeup if panel needs to adjust before vsync.
+ Default jitter value is 2.0%. Max allowed value is 10%.
- qcom,mdss-dsi-panel-prefill-lines: An integer value defines the panel prefill lines required to
calculate the backoff time of rsc.
Default value is 16 lines. Max allowed value is vtotal.
@@ -400,6 +401,24 @@
String that specifies the ctrl state for reading the panel status.
"dsi_lp_mode" = DSI low power mode
"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-lp1-command: An optional byte stream to request low
+ power mode on a panel
+- qcom,mdss-dsi-lp1-command-mode: String that specifies the ctrl state for
+ setting the panel power mode.
+ "dsi_lp_mode" = DSI low power mode
+ "dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-lp2-command: An optional byte stream to request ultra
+ low power mode on a panel
+- qcom,mdss-dsi-lp2-command-mode: String that specifies the ctrl state for
+ setting the panel power mode.
+ "dsi_lp_mode" = DSI low power mode
+ "dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-nolp-command: An optional byte stream to disable low
+ power and ultra low power panel modes
+- qcom,mdss-dsi-nolp-command-mode: String that specifies the ctrl state for
+ setting the panel power mode.
+ "dsi_lp_mode" = DSI low power mode
+ "dsi_hs_mode" = DSI high speed mode
- qcom,mdss-dsi-panel-status-check-mode:Specifies the panel status check method for ESD recovery.
"bta_check" = Uses BTA to check the panel status
"reg_read" = Reads panel status register to check the panel status
@@ -664,7 +683,7 @@
<40 120 128>,
<128 240 64>;
qcom,mdss-dsi-panel-orientation = "180"
- qcom,mdss-dsi-panel-jitter = <0x8>;
+ qcom,mdss-dsi-panel-jitter = <0x8 0x10>;
qcom,mdss-dsi-panel-prefill-lines = <0x10>;
qcom,mdss-dsi-force-clock-lane-hs;
qcom,compression-mode = "dsc";
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dp.txt b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt
index 790da12..c811c28 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt
@@ -25,7 +25,46 @@
- qcom,aux-en-gpio: Specifies the aux-channel enable gpio.
- qcom,aux-sel-gpio: Specifies the aux-channel select gpio.
- qcom,usbplug-cc-gpio: Specifies the usbplug orientation gpio.
-- qcom,aux-cfg-settings: An array that specifies the DP AUX configuration settings.
+- qcom,aux-cfg0-settings: Specifies the DP AUX configuration 0 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg1-settings: Specifies the DP AUX configuration 1 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg2-settings: Specifies the DP AUX configuration 2 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg3-settings: Specifies the DP AUX configuration 3 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg4-settings: Specifies the DP AUX configuration 4 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg5-settings: Specifies the DP AUX configuration 5 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg6-settings: Specifies the DP AUX configuration 6 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg7-settings: Specifies the DP AUX configuration 7 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg8-settings: Specifies the DP AUX configuration 8 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg9-settings: Specifies the DP AUX configuration 9 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
- qcom,max-pclk-frequency-khz: An integer specifying the max. pixel clock in KHz supported by Display Port.
- qcom,dp-usbpd-detection: Phandle for the PMI regulator node for USB PHY PD detection.
- qcom,<type>-supply-entries: A node that lists the elements of the supply used by the a particular "type" of DSI module. The module "types"
@@ -93,7 +132,16 @@
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
- qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+ qcom,aux-cfg0-settings = [1c 00];
+ qcom,aux-cfg1-settings = [20 13 23 1d];
+ qcom,aux-cfg2-settings = [24 00];
+ qcom,aux-cfg3-settings = [28 00];
+ qcom,aux-cfg4-settings = [2c 0a];
+ qcom,aux-cfg5-settings = [30 26];
+ qcom,aux-cfg6-settings = [34 0a];
+ qcom,aux-cfg7-settings = [38 03];
+ qcom,aux-cfg8-settings = [3c bb];
+ qcom,aux-cfg9-settings = [40 03];
qcom,max-pclk-frequency-khz = <593470>;
pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index 51abe56..21edaa0 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -14,6 +14,10 @@
- #size-cells: Should be <0> as i2c addresses have no size component
- qcom,wrapper-core: Wrapper QUPv3 core containing this I2C controller.
+Optional property:
+ - qcom,clk-freq-out : Desired I2C bus clock frequency in Hz.
+ When missing default to 400000Hz.
+
Child nodes should conform to i2c bus binding.
Example:
@@ -32,4 +36,5 @@
#address-cells = <1>;
#size-cells = <0>;
qcom,wrapper-core = <&qupv3_0>;
+ qcom,clk-freq-out = <400000>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
index 8598d0c..26cd70e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -28,6 +28,9 @@
Value type: <string>
Definition: Should contain "qcom,pdc-<target>"
+ * "qcom,pdc-sdm845": For sdm845 pin data
+ * "qcom,pdc-sdm670": For sdm670 pin data
+
- reg:
Usage: required
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 375eaf2..0f8dc27 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -91,6 +91,17 @@
a four level page table configuration. Set to use a three
level page table instead.
+- qcom,no-asid-retention:
+ Some hardware may lose internal state for asid after
+ retention. No cache invalidation operations involving asid
+ may be used.
+
+- qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
+ disable in ms. In case of unmap call, regulator is
+ enabled/disabled. This may introduce additional delay. For
+ clients who do not detach, it's not possible to keep regulator
+ vote while smmu is attached. Type is <u32>.
+
- clocks : List of clocks to be used during SMMU register access. See
Documentation/devicetree/bindings/clock/clock-bindings.txt
for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
index 1e6aac5..42e97f7 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -78,6 +78,8 @@
- qcom,lcd-psm-ctrl : A boolean property to specify if PSM needs to be
controlled dynamically when WLED module is enabled
or disabled.
+- qcom,auto-calibration-enable : A boolean property which enables auto-calibration
+ of the WLED sink configuration.
Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index c9aaa00..54365b1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -13,16 +13,16 @@
property defined.
- gpios : should contain phandle to gpio controller node and array of
#gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+ gpio-req-tbl-num property (in the same order)
- clock-names: name of the clocks required for the device
- clock-rates: clock rate in Hz
Optional properties:
-- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- regulator-names : name of the voltage regulators required for the device.
- gdscr-supply : should contain gdsr regulator used for cci clocks.
- mmagic-supply : should contain mmagic regulator used for mmagic clocks.
@@ -43,18 +43,16 @@
* Qualcomm Technologies, Inc. CCI clock settings
Optional properties:
-- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
- clock cycle
-- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
- clock cycle
-- qcom,hw-tsu-sto : should contain setup time for STOP condition
-- qcom,hw-tsu-sta : should contain setup time for Repeated START condition
-- qcom,hw-thd-dat : should contain hold time for the data
-- qcom,hw-thd-sta : should contain hold time for START condition
-- qcom,hw-tbuf : should contain free time between a STOP and a START condition
-- qcom,hw-scl-stretch-en : should contain enable or disable clock stretching
-- qcom,hw-trdhld : should contain internal hold time for SDA
-- qcom,hw-tsp : should contain filtering of glitches
+- hw-thigh : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tlow : should contain high period of the SCL clock in terms of CCI clock cycle
+- hw-tsu-sto : should contain setup time for STOP condition
+- hw-tsu-sta : should contain setup time for Repeated START condition
+- hw-thd-dat : should contain hold time for the data
+- hw-thd-sta : should contain hold time for START condition
+- hw-tbuf : should contain free time between a STOP and a START condition
+- hw-scl-stretch-en : should contain enable or disable clock stretching
+- hw-trdhld : should contain internal hold time for SDA
+- hw-tsp : should contain filtering of glitches
* Qualcomm Technologies, Inc. MSM Sensor
@@ -64,7 +62,7 @@
- compatible : should be manufacturer name followed by sensor name
- "qcom,camera"
- reg : should contain i2c slave address of the device
-- qcom,csiphy-sd-index : should contain csiphy instance that will used to
+- csiphy-sd-index : should contain csiphy instance that will used to
receive sensor data
- 0, 1, 2
- cam_vdig-supply : should contain regulator from which digital voltage is
@@ -72,67 +70,69 @@
- cam_vana-supply : should contain regulator from which analog voltage is
supplied
- cam_vio-supply : should contain regulator from which IO voltage is supplied
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
sensor
- "cam_vdig", "cam_vana", "cam_vio", "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level for
- regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level for
- regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain optimum voltage level for regulators
- mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,sensor-position-roll : should contain sensor rotational angle with respect
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level for
+ regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level for
+ regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain optimum voltage level for regulators
+ mentioned in regulator-names property (in the same order)
+- sensor-position-roll : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
-- qcom,sensor-position-pitch : should contain sensor rotational angle with respect
+- sensor-position-pitch : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
-- qcom,sensor-position-yaw : should contain sensor rotational angle with respect
+- sensor-position-yaw : should contain sensor rotational angle with respect
to axis of reference
- 0, 90, 180, 360
Optional properties:
-- qcom,slave-id : should contain i2c slave address, device id address, expected
+- slave-id : should contain i2c slave address, device id address, expected
id read value and device id mask
-- qcom,sensor-name : should contain unique sensor name to differentiate from
+- sensor-name : should contain unique sensor name to differentiate from
other sensor
- "s5k3l1yx"
-- qcom,sensor-mode : should contain sensor mode supported
+- sensor-mode : should contain sensor mode supported
- 0 -> back camera 2D
- 1 -> front camera 2D
- 2 -> back camera 3D
- 3 -> back camera int 3D
-- qcom,sensor-type : should contain format of data that sensor streams
+- sensor-type : should contain format of data that sensor streams
- 0 -> bayer format
- 1 -> yuv format
- qcom,secure : should be enabled to operate the camera in secure mode
- 0, 1
-- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
+- gpio-no-mux : should contain field to indicate whether gpio mux table is
available
- 1 if gpio mux is not available, 0 otherwise
- cam_vaf-supply : should contain regulator from which AF voltage is supplied
- gpios : should contain phandle to gpio controller node and array of
- #gpio-cells specifying specific gpio (controller specific)
-- qcom,gpio-reset : should contain index to gpio used by sensors reset_n
-- qcom,gpio-standby : should contain index to gpio used by sensors standby_n
-- qcom,gpio-vio : should contain index to gpio used by sensors io vreg enable
-- qcom,gpio-vana : should contain index to gpio used by sensors analog vreg enable
-- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
-- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
-- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
-- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
-- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-req-tbl-label : should contain name of gpios present in
- qcom,gpio-req-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-num : should contain index of gpios that need to be
+ #gpio-cells specifying specific gpio (controller specific)
+- gpio-reset : should contain index to gpio used by sensors reset_n
+- gpio-standby : should contain index to gpio used by sensors standby_n
+- gpio-vio : should contain index to gpio used by sensors io vreg enable
+- gpio-vana : should contain index to gpio used by sensors analog vreg enable
+- gpio-vdig : should contain index to gpio used by sensors digital vreg enable
+- gpio-vaf : should contain index to gpio used by sensors af vreg enable
+- gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- gpio-req-tbl-flags : should contain direction of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-req-tbl-label : should contain name of gpios present in
+ gpio-req-tbl-num property (in the same order)
+- gpio-set-tbl-num : should contain index of gpios that need to be
configured by msm
-- qcom,gpio-set-tbl-flags : should contain value to be configured for the gpios
- present in qcom,gpio-set-tbl-num property (in the same order)
-- qcom,gpio-set-tbl-delay : should contain amount of delay after configuring
+- gpio-set-tbl-flags : should contain value to be configured for the gpios
+ present in gpio-set-tbl-num property (in the same order)
+- gpio-set-tbl-delay : should contain amount of delay after configuring
gpios as specified in gpio_set_tbl_flags property (in the same order)
-- qcom,csi-phy-sel : should contain CSIPHY core instance from which CSID should
+- csi-phy-sel : should contain CSIPHY core instance from which CSID should
receive data
-- qcom,actuator-cam-name : should contain actuator cam name associated with
+- actuator-cam-name : should contain actuator cam name associated with
this sensor
- If actuator does not exist, this property should not be initialized
- If actuator exist, this field should indicate the index of actuator to
@@ -141,39 +141,40 @@
for actuator
- qcom,actuator-vcm-enable : should contain value to be set for actuator vcm
gpio
-- qcom,sensor-position : should contain the mount angle of the camera sensor
+- sensor-position : should contain the mount angle of the camera sensor
- 0 -> back camera
- 1 -> front camera
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
-- qcom,actuator-src : if auto focus is supported by this sensor, this
+- actuator-src : if auto focus is supported by this sensor, this
property should contain phandle of respective actuator node
-- qcom,led-flash-src : if LED flash is supported by this sensor, this
+- led-flash-src : if LED flash is supported by this sensor, this
property should contain phandle of respective LED flash node
- qcom,vdd-cx-supply : should contain regulator from which cx voltage is
supplied
- qcom,vdd-cx-name : should contain names of cx regulator
-- qcom,eeprom-src : if eeprom memory is supported by this sensor, this
+- eeprom-src : if eeprom memory is supported by this sensor, this
property should contain phandle of respective eeprom nodes
-- qcom,ois-src : if optical image stabilization is supported by this sensor,
+- ois-src : if optical image stabilization is supported by this sensor,
this property should contain phandle of respective ois node
-- qcom,ir-led-src : if ir led is supported by this sensor, this property
+- ir-led-src : if ir led is supported by this sensor, this property
should contain phandle of respective ir-led node
- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
should contain phandle of respective ir-cut node
- qcom,special-support-sensors: if only some special sensors are supported
on this board, add sensor name in this property.
-- qcom,clock-rates: clock rate in Hz.
-- qcom,clock-cntl-support: Says whether clock control support is present or not
-- qcom,clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
+- clock-rates: clock rate in Hz.
+- clock-cntl-level: says what all different cloc level node has.
+- clock-cntl-support: Says whether clock control support is present or not
+- clock-control: The valid fields are "NO_SET_RATE", "INIT_RATE" and
"SET_RATE". "NO_SET_RATE" the corresponding clock is enabled without setting
the rate assuming some other driver has already set it to appropriate rate.
"INIT_RATE" clock rate is not queried assuming some other driver has set
the clock rate and ispif will set the the clock to this rate.
"SET_RATE" clock is enabled and the rate is set to the value specified
- in the property qcom,clock-rates.
+ in the property clock-rates.
* Qualcomm Technologies, Inc. MSM ACTUATOR
@@ -184,20 +185,22 @@
data field which is 0x0
- compatible :
- "qcom,actuator"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
actuator
- "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
- required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+ required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which AF voltage is supplied
@@ -210,22 +213,24 @@
data field which is 0x0
- compatible :
- "qcom,ois"
-- qcom,cci-master : should contain i2c master id to be used for this camera
+- cci-master : should contain i2c master id to be used for this camera
sensor
- 0 -> MASTER 0
- 1 -> MASTER 1
-- qcom,clock-rates: clock rate in Hz.
+- clock-rates: clock rate in Hz.
Optional properties:
-- qcom,cam-vreg-name : should contain names of all regulators needed by this
+- regulator-names : should contain names of all regulators needed by this
ois
- "cam_vaf"
-- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
- for regulators mentioned in qcom,cam-vreg-name property (in the same order)
-- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
- required from the regulators mentioned in the qcom,cam-vreg-name property
+- rgltr-cntrl-support : It is booloean property. This property is required
+ if the code and regulator control parameters e.g. rgltr-min-voltage
+- rgltr-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in regulator-names property (in the same order)
+- rgltr-load-current : should contain the maximum current in microamps
+ required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which ois voltage is supplied
@@ -234,9 +239,9 @@
led_flash0: qcom,camera-flash@0 {
cell-index = <0>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch>;
+ flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch>;
status = "ok";
}
@@ -248,70 +253,71 @@
interrupts = <0 50 0>;
interrupt-names = "cci";
clock-names = "camnoc_axi_clk", "soc_ahb_clk",
- "slow_ahb_src_clk", "cpas_ahb_clk",
- "cci_clk", "cci_clk_src";
- qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+ "slow_ahb_src_clk", "cpas_ahb_clk",
+ "cci_clk", "cci_clk_src";
+ clock-rates = <0 0 80000000 0 0 37500000>;
+ clock-cntl-level = "turbo";
gpios = <&tlmm 17 0>,
<&tlmm 18 0>,
<&tlmm 19 0>,
<&tlmm 20 0>;
- qcom,gpio-tbl-num = <0 1 2 3>;
- qcom,gpio-tbl-flags = <1 1 1 1>;
- qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ gpio-tbl-num = <0 1 2 3>;
+ gpio-tbl-flags = <1 1 1 1>;
+ gpio-tbl-label = "CCI_I2C_DATA0",
"CCI_I2C_CLK0",
"CCI_I2C_DATA1",
"CCI_I2C_CLK1";
i2c_freq_100Khz: qcom,i2c_standard_mode {
- qcom,hw-thigh = <78>;
- qcom,hw-tlow = <114>;
- qcom,hw-tsu-sto = <28>;
- qcom,hw-tsu-sta = <28>;
- qcom,hw-thd-dat = <10>;
- qcom,hw-thd-sta = <77>;
- qcom,hw-tbuf = <118>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <1>;
+ hw-thigh = <78>;
+ hw-tlow = <114>;
+ hw-tsu-sto = <28>;
+ hw-tsu-sta = <28>;
+ hw-thd-dat = <10>;
+ hw-thd-sta = <77>;
+ hw-tbuf = <118>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <1>;
status = "ok";
};
i2c_freq_400Khz: qcom,i2c_fast_mode {
- qcom,hw-thigh = <20>;
- qcom,hw-tlow = <28>;
- qcom,hw-tsu-sto = <21>;
- qcom,hw-tsu-sta = <21>;
- qcom,hw-thd-dat = <13>;
- qcom,hw-thd-sta = <18>;
- qcom,hw-tbuf = <25>;
- qcom,hw-scl-stretch-en = <0>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <3>;
+ hw-thigh = <20>;
+ hw-tlow = <28>;
+ hw-tsu-sto = <21>;
+ hw-tsu-sta = <21>;
+ hw-thd-dat = <13>;
+ hw-thd-sta = <18>;
+ hw-tbuf = <25>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
status = "ok";
};
i2c_freq_custom: qcom,i2c_custom_mode {
- qcom,hw-thigh = <15>;
- qcom,hw-tlow = <28>;
- qcom,hw-tsu-sto = <21>;
- qcom,hw-tsu-sta = <21>;
- qcom,hw-thd-dat = <13>;
- qcom,hw-thd-sta = <18>;
- qcom,hw-tbuf = <25>;
- qcom,hw-scl-stretch-en = <1>;
- qcom,hw-trdhld = <6>;
- qcom,hw-tsp = <3>;
+ hw-thigh = <15>;
+ hw-tlow = <28>;
+ hw-tsu-sto = <21>;
+ hw-tsu-sta = <21>;
+ hw-thd-dat = <13>;
+ hw-thd-sta = <18>;
+ hw-tbuf = <25>;
+ hw-scl-stretch-en = <1>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
status = "ok";
};
i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
- qcom,hw-thigh = <16>;
- qcom,hw-tlow = <22>;
- qcom,hw-tsu-sto = <17>;
- qcom,hw-tsu-sta = <18>;
- qcom,hw-thd-dat = <16>;
- qcom,hw-thd-sta = <15>;
- qcom,hw-tbuf = <19>;
- qcom,hw-scl-stretch-en = <1>;
- qcom,hw-trdhld = <3>;
- qcom,hw-tsp = <3>;
- qcom,cci-clk-src = <37500000>;
+ hw-thigh = <16>;
+ hw-tlow = <22>;
+ hw-tsu-sto = <17>;
+ hw-tsu-sta = <18>;
+ hw-thd-dat = <16>;
+ hw-thd-sta = <15>;
+ hw-tbuf = <19>;
+ hw-scl-stretch-en = <1>;
+ hw-trdhld = <3>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
status = "ok";
};
@@ -319,34 +325,36 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <100000>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <100000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,camera";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,secure = <1>;
- qcom,led-flash-src = <&led_flash0>;
- qcom,actuator-src = <&actuator0>;
- qcom,eeprom-src = <&eeprom0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ secure = <1>;
+ led-flash-src = <&led_flash0>;
+ actuator-src = <&actuator0>;
+ eeprom-src = <&eeprom0>;
cam_vdig-supply = <&pm845_s3>;
cam_vio-supply = <&pm845_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
- qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
- qcom,cam-vreg-op-mode = <0 80000 105000>;
- qcom,gpio-no-mux = <0>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1352000>;
+ rgltr-max-voltage = <0 3312000 1352000>;
+ rgltr-load-current = <0 80000 105000>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -355,19 +363,20 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-standby = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_mmss clk_mclk0_clk_src>,
<&clock_mmss clk_camss_mclk0_clk>;
clock-names = "cam_src_clk", "cam_clk";
+ clock-cntl-level;
};
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
index d62910a..3dc661f 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -118,6 +118,12 @@
Value type: <string>
Definition: List of Clients supported by CDM HW node.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
Example:
qcom,cpas-cdm0@ac48000 {
cell-index = <0>;
@@ -143,5 +149,6 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
cdm-client-names = "ife";
+ clock-cntl-level = "turbo";
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index 62a51cf..99f3ba2 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -97,6 +97,12 @@
Value type: <u32>
Definition: List of clocks rates.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
- qcom,msm-bus,name
- qcom,msm-bus,num-cases
- qcom,msm-bus,num-paths
@@ -191,6 +197,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
src-clock-name = "slow_ahb_clk_src";
clock-rates = <0 0 0 0 80000000 0>;
+ clock-cntl-level = "turbo";
qcom,msm-bus,name = "cam_ahb";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <1>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
index e8a74b3..dd8668c 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -6,13 +6,15 @@
- "qcom,csiphy-v5.01"
- reg : offset and length of the register set for the device
for the csiphy operating in compatible mode.
+reg-cam-base : offset of ceiphy in camera hw block
- reg-names : should specify relevant names to each reg property defined.
- interrupts : should contain the csiphy interrupt.
- interrupt-names : should specify relevant names to each interrupts
property defined.
- clock-names: name of the clocks required for the device
-- qcom,clock-rates: clock rate in Hz
+- clock-rates: clock rate in Hz
- 0 if appropriate clock is required but doesn't have to apply the rate
+- clock-cntl-level: says what all different cloc level node has.
Example:
@@ -20,6 +22,7 @@
cell-index = <0>;
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac65000 0x200>;
+ reg-cam-base = <0x65000>;
reg-names = "csiphy";
interrupts = <0 477 0>;
interrupt-names = "csiphy";
@@ -28,7 +31,9 @@
"cphy_rx_clk_src", "csiphy0_clk",
"csi0phytimer_clk_src", "csi0phytimer_clk",
"ife_0_csid_clk", "ife_0_csid_clk_src";
- qcom,clock-rates =
+ clock-rates =
<0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ clock-cntl-level = "turbo";
+ regulator-names = "gdscr";
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
new file mode 100644
index 0000000..933ad85
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -0,0 +1,438 @@
+* Qualcomm Technologies, Inc. MSM EEPROM
+
+EEPROM is an one time programmed(OTP) device that stores the calibration data
+use for camera sensor. It may either be integrated in the sensor module or in
+the sensor itself. As a result, the power, clock and GPIOs may be the same as
+the camera sensor. The following describes the page block map, power supply,
+clock, GPIO and power on sequence properties of the EEPROM device.
+
+=======================================================
+Required Node Structure if probe happens from userspace
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,eeprom".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for EEPROM HW.
+
+- xxxx-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed in
+ "regulator-names".
+
+- rgltr-cntrl-support
+ Usage: required
+ Value type: <bool>
+ Definition: This property specifies if the regulator control is supported
+ e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain minimum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-max-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain maximum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-load-current
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the maximum current in microamps required for
+ the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio mux type.
+
+- gpios
+ Usage: required
+ Value type: <phandle>
+ Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the reset gpio index.
+
+- gpio-standby
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the gpio labels.
+
+- sensor-position
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+ Usage: required
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ sensor.
+
+- sensor-mode
+ Usage: required
+ Value type: <u32>
+ Definition: should contain sensor mode supported.
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for EEPROM HW.
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks used for EEPROM HW.
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: says what all different clock levels eeprom node has.
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates.
+
+Example:
+
+ eprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pm8998_l5>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ regulator-names = "cam_vdig", "cam_vio";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1200000 0>;
+ rgltr-max-voltage = <1200000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ gpios = <&msmgpio 26 0>,
+ <&msmgpio 37 0>,
+ <&msmgpio 36 0>;
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET1",
+ "CAM_STANDBY";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+=======================================================
+Required Node Structure if probe happens from kernel
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,eeprom".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- qcom,eeprom-name
+ Usage: required
+ Value type: <string>
+ Definition: Name of the EEPROM HW.
+
+- qcom,slave-addr
+ Usage: required
+ Value type: <u32>
+ Definition: Slave address of the EEPROM HW.
+
+- qcom,num-blocks
+ Usage: required
+ Value type: <u32>
+ Definition: Total block number that eeprom contains.
+
+- qcom,pageX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying page size, start address,
+ address type, data, data type, delay in ms.
+ size 0 stand for non-paged.
+
+- qcom,pollX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying poll size, poll reg address,
+ address type, data, data type, delay in ms.
+ size 0 stand for not used.
+
+- qcom,memX
+ Usage: required
+ Value type: <u32>
+ Definition: List of values specifying memory size, start address,
+ address type, data, data type, delay in ms.
+ size 0 stand for not used.
+
+- qcom,saddrX
+ Usage: required
+ Value type: <u32>
+ Definition: property should specify the slave address for block (%d).
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for EEPROM HW.
+
+- qcom,cmm-data-support
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data capability flag..
+
+- qcom,cmm-data-compressed
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data compression flag.
+
+- qcom,cmm-data-offset
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data start offset.
+
+- qcom,cmm-data-size
+ Usage: required
+ Value type: <u32>
+ Definition: Camera MultiModule data size.
+
+- qcom,cam-power-seq-type
+ Usage: required
+ Value type: <string>
+ Definition: should specify the power on sequence types.
+
+- qcom,cam-power-seq-val
+ Usage: required
+ Value type: <string>
+ Definition: should specify the power on sequence values.
+
+- qcom,cam-power-seq-cfg-val
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the power on sequence config values.
+
+- qcom,cam-power-seq-delay
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the power on sequence delay time in ms.
+
+- xxxx-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed in
+ "regulator-names".
+
+- rgltr-cntrl-support
+ Usage: required
+ Value type: <bool>
+ Definition: This property specifies if the regulator control is supported
+ e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain minimum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-max-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: should contain maximum voltage level for regulators
+ mentioned in regulator-names property.
+
+- rgltr-load-current
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the maximum current in microamps required for
+ the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio mux type.
+
+- gpios
+ Usage: required
+ Value type: <phandle>
+ Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the reset gpio index.
+
+- gpio-standby
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: required
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: required
+ Value type: <string>
+ Definition: should specify the gpio labels.
+
+- sensor-position
+ Usage: required
+ Value type: <u32>
+ Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+ Usage: required
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ sensor.
+
+- sensor-mode
+ Usage: required
+ Value type: <u32>
+ Definition: should contain sensor mode supported.
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: says what all different clock levels eeprom node has.
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for EEPROM HW.
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks used for EEPROM HW.
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates.
+
+Example:
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ qcom,eeprom-name = "msm_eeprom";
+ compatible = "qcom,eeprom";
+ qcom,slave-addr = <0x60>;
+ qcom,num-blocks = <2>;
+ qcom,page0 = <1 0x100 2 0x01 1 1>;
+ qcom,poll0 = <0 0x0 2 0 1 1>;
+ qcom,mem0 = <0 0x0 2 0 1 0>;
+ qcom,page1 = <1 0x0200 2 0x8 1 1>;
+ qcom,pageen1 = <1 0x0202 2 0x01 1 10>;
+ qcom,poll1 = <0 0x0 2 0 1 1>;
+ qcom,mem1 = <32 0x3000 2 0 1 0>;
+ qcom,saddr1 = <0x62>;
+ qcom,cmm-data-support;
+ qcom,cmm-data-compressed;
+ qcom,cmm-data-offset = <0>;
+ qcom,cmm-data-size = <0>;
+ qcom,cam-power-seq-type = "sensor_vreg",
+ "sensor_vreg", "sensor_clk",
+ "sensor_gpio", "sensor_gpio";
+ qcom,cam-power-seq-val = "cam_vdig",
+ "cam_vio", "sensor_cam_mclk",
+ "sensor_gpio_reset",
+ "sensor_gpio_standby";
+ qcom,cam-power-seq-cfg-val = <1 1 24000000 1 1>;
+ qcom,cam-power-seq-delay = <1 1 5 5 10>;
+ cam_vdig-supply = <&pm8998_l5>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ regulator-names = "cam_vdig", "cam_vio";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1200000 0>;
+ rgltr-max-voltage = <1200000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ qcom,gpio-no-mux = <0>;
+ gpios = <&msmgpio 26 0>,
+ <&msmgpio 37 0>,
+ <&msmgpio 36 0>;
+ gpio-reset = <1>;
+ gpio-standby = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET1",
+ "CAM_STANDBY";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-cntl-level = "turbo";
+ clock-names = "cam_clk";
+ clock-rates = <24000000>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
index c560a05..28a0920 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -111,6 +111,12 @@
Value type: <phandle>
Definition: List of clocks used for CDM HW.
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
- clock-rates
Usage: required
Value type: <u32>
@@ -157,6 +163,7 @@
<&clock_camcc CAM_CC_ICP_TS_CLK>;
clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
fw_name = "CAMERA_ICP.elf";
};
@@ -177,6 +184,7 @@
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
qcom,ipe1 {
@@ -195,7 +203,8 @@
<&clock_camcc CAM_CC_IPE_1_CLK>,
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
- clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
bps: qcom,bps {
@@ -215,5 +224,6 @@
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
clock-rates = <80000000 400000000 0 0 600000000>;
+ clock-cntl-level = "turbo";
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
new file mode 100644
index 0000000..a2ed98e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
@@ -0,0 +1,187 @@
+* Qualcomm Technologies, Inc. MSM Camera JPEG
+
+The MSM camera JPEG devices are implemented multiple device nodes.
+The root JPEG device node has properties defined to hint the driver
+about the number of Encoder and DMA nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+JPEG root interface node takes care of the handling account for number
+of Encoder and DMA devices present on the hardware.
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,cam-jpeg".
+
+- compat-hw-name
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,jpegenc" or "qcom,jpegdma".
+
+- num-jpeg-enc
+ Usage: required
+ Value type: <u32>
+ Definition: Number of supported Encoder HW blocks.
+
+- num-jpeg-dma
+ Usage: required
+ Value type: <u32>
+ Definition: Number of supported DMA HW blocks.
+
+Example:
+ qcom,cam-jpeg {
+ compatible = "qcom,cam-jpeg";
+ compat-hw-name = "qcom,jpegenc",
+ "qcom,jpegdma";
+ num-jpeg-enc = <1>;
+ num-jpeg-dma = <1>;
+ status = "ok";
+ };
+
+
+=======================
+Required Node Structure
+=======================
+Encoder/DMA Nodes provide interface for JPEG driver about
+the device register map, interrupt map, clocks and regulators.
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Node instance number.
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,cam_jpeg_enc".
+
+- reg-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the register resources.
+
+- reg
+ Usage: optional
+ Value type: <u32>
+ Definition: Register values.
+
+- reg-cam-base
+ Usage: optional
+ Value type: <u32>
+ Definition: Offset of the register space compared to
+ to Camera base register space.
+
+- interrupt-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the interrupt.
+
+- interrupts
+ Usage: optional
+ Value type: <u32>
+ Definition: Interrupt associated with JPEG HW.
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for JPEG HW.
+
+- camss-vdd-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed
+ in "regulator-names".
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for JPEG HW.
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks used for JPEG HW.
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates.
+
+- src-clock-name
+ Usage: required
+ Value type: <string>
+ Definition: Source clock name.
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels.
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+ cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_enc";
+ reg-names = "jpege_hw";
+ reg = <0xac4e000 0x4000>;
+ reg-cam-base = <0x4e000>;
+ interrupt-names = "jpeg";
+ interrupts = <0 474 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegenc_clk_src",
+ "jpegenc_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegenc_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
+ cam_jpeg_dma: qcom,jpegdma@0xac52000{
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_dma";
+ reg-names = "jpegdma_hw";
+ reg = <0xac52000 0x4000>;
+ reg-cam-base = <0x52000>;
+ interrupt-names = "jpegdma";
+ interrupts = <0 475 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegdma_clk_src",
+ "jpegdma_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegdma_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
new file mode 100644
index 0000000..d24314a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
@@ -0,0 +1,126 @@
+* Qualcomm Technologies, Inc. MSM FLASH
+
+The MSM camera Flash driver provides the definitions for
+enabling and disabling LED Torch/Flash by requesting it to
+PMIC/I2C/GPIO based hardware. It provides the functions for
+the Client to control the Flash hardware.
+
+=======================================================
+Required Node Structure
+=======================================================
+The Flash device is described in one level of the device node.
+
+======================================
+First Level Node - CAM FLASH device
+======================================
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,camera-flash".
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Should specify the hardware index id.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Register values.
+
+- flash-source
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain array of phandles to Flash source nodes.
+
+- torch-source
+ Usage: required
+ Value type: <phandle>
+ Definition: Should contain array of phandles to torch source nodes.
+
+- switch-source
+ Usage: Optional
+ Value type: <phandle>
+ Definition: Should contain phandle to switch source nodes.
+
+- slave-id
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain i2c slave address, device id address
+ and expected id read value.
+
+- cci-master
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain i2c master id to be used for this camera
+ flash.
+
+- max-current
+ Usage: optional
+ Value type: <u32>
+ Definition: Max current in mA supported by flash
+
+- max-duration
+ Usage: optional
+ Value type: <u32>
+ Definition: Max duration in ms flash can glow.
+
+- gpios
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpios to be used for the flash.
+
+- gpio-req-tbl-num
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+ Usage: optional
+ Value type: <u32>
+ Definition: should specify the gpio labels.
+
+- gpio-flash-reset
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash reset" pin.
+
+- gpio-flash-en
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash enable" pin.
+
+- gpio-flash-now
+ Usage: optional
+ Value type: <u32>
+ Definition: should contain index to gpio used by flash's "flash now" pin.
+
+Example:
+
+led_flash_rear: qcom,camera-flash@0 {
+ reg = <0x00 0x00>;
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
+ qcom,slave-id = <0x00 0x00 0x0011>;
+ qcom,cci-master = <0>;
+ gpios = <&msmgpio 23 0>,
+ <&msmgpio 24 0>;
+ <&msmgpio 25 0>;
+ qcom,gpio-flash-reset = <0>;
+ qcom,gpio-flash-en = <0>;
+ qcom,gpio-flash-now = <1>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "FLASH_EN",
+ "FLASH_NOW";
+ qcom,max-current = <1500>;
+ qcom,max-duration = <1200>;
+ };
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 937ccb9..46649af 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -81,6 +81,7 @@
limits.
- qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
priority for rotator clients.
+- qcom,mdss-rot-vbif-memtype: Array of u32 vbif memory type settings for each xin port.
- qcom,mdss-rot-cdp-setting: Integer array of size two, to indicate client driven
prefetch is available or not. Index 0 represents
if CDP is enabled for read and index 1, if CDP
@@ -120,6 +121,8 @@
minimum allowable length configuration value.
- qcom,sde-ubwc-swizzle: A u32 property to specify the default UBWC
swizzle configuration value.
+- qcom,rot-reg-bus: Property to provide Bus scaling for register
+ access for rotator blocks.
Subnode properties:
- compatible: Compatible name used in smmu v2.
@@ -171,6 +174,7 @@
/* VBIF QoS remapper settings*/
qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+ qcom,mdss-rot-vbif-memtype = <3 3>;
com,mdss-rot-cdp-setting = <1 1>;
@@ -189,6 +193,16 @@
cache-slice-names = "rotator";
cache-slices = <&llcc 4>;
+ rot_reg: qcom,rot-reg-bus {
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>;
+ };
+
smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
compatible = "qcom,smmu_sde_rot_unsec";
iommus = <&mdp_smmu 0xe00>;
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 6ac06c1..5b6bd97 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -42,6 +42,8 @@
see:
Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
- interrupt-names: Corresponding interrupt name to the interrupts property
+- qcom,can-sleep: Boolean flag indicating that processes waiting on SPMI
+ transactions may sleep
Each child node of SPMI slave id represents a function of the PMIC. In the
example below the rtc device node represents a peripheral of pm8941
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index da9a632..24c75e2 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -16,6 +16,9 @@
Required "interrupt-names" are "hc_irq" and "pwr_irq".
- <supply-name>-supply: phandle to the regulator device tree node
Required "supply-name" are "vdd" and "vdd-io".
+ - qcom,ice-clk-rates: this is an array that specifies supported Inline
+ Crypto Engine (ICE) clock frequencies, Units - Hz.
+ - sdhc-msm-crypto: phandle to SDHC ICE node
Required alias:
- The slot number is specified via an alias with the following format
@@ -77,6 +80,11 @@
register dumps on CRC errors and also downgrade bus speed mode to
SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
this workaround.
+ - qcom,restore-after-cx-collapse - specifies whether the SDCC registers contents need
+ to be saved and restored by software when the CX Power Collapse feature is enabled.
+ On certain chipsets, coming out of the CX Power Collapse event, the SDCC registers
+ contents will not be retained. It is software responsibility to restore the
+ SDCC registers before resuming to normal operation.
In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
@@ -116,6 +124,7 @@
reg-names = "hc_mem", "core_mem";
interrupts = <0 123 0>, <0 138 0>;
interrupt-names = "hc_irq", "pwr_irq";
+ sdhc-msm-crypto = <&sdcc1_ice>;
vdd-supply = <&pm8941_l21>;
vdd-io-supply = <&pm8941_l13>;
@@ -138,6 +147,7 @@
qcom,nonremovable;
qcom,large-address-bus;
qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+ qcom,ice-clk-rates = <300000000 150000000>;
qcom,scaling-lower-bus-speed-mode = "DDR52";
diff --git a/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt b/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt
new file mode 100644
index 0000000..8d5f55d
--- /dev/null
+++ b/Documentation/devicetree/bindings/msm_hdcp/msm_hdcp.txt
@@ -0,0 +1,14 @@
+MSM HDCP driver
+
+Standalone driver managing HDCP related communications
+between TZ and HLOS for MSM chipset.
+
+Required properties:
+
+compatible = "qcom,msm-hdcp";
+
+Example:
+
+qcom_msmhdcp: qcom,msm_hdcp {
+ compatible = "qcom,msm-hdcp";
+};
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010faf..c7194e8 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@
* Ethernet controller node
Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
- reg: Address and length of the register set for the device
- interrupts: Should contain the three frame engines interrupts in numeric
order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index bc1c3c8..62bdc5f 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -35,6 +35,15 @@
- broken-turn-around: If set, indicates the PHY device does not correctly
release the turn around line low at the end of a MDIO transaction.
+- eee-broken-100tx:
+- eee-broken-1000t:
+- eee-broken-10gt:
+- eee-broken-1000kx:
+- eee-broken-10gkx4:
+- eee-broken-10gkr:
+ Mark the corresponding energy efficient ethernet mode as broken and
+ request the ethernet to stop advertising it.
+
Example:
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 5d21141..75bcaa3 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
new file mode 100644
index 0000000..b4ce7cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
@@ -0,0 +1,83 @@
+* TSPP ( QTI Transport Stream Packet Processor )
+
+Hardware driver for QTI TSIF 12seg wrapper core, which consists of a TSPP, a
+BAM (Bus access manager, used for DMA) and two TSIF inputs.
+
+The TSPP driver is responsible for:
+ - TSPP/TSIF hardware configuration (using SPS driver to configure BAM hardware)
+ - TSIF GPIO/Clocks configuration
+ - Memory resource management
+ - Handling TSIF/TSPP interrupts and BAM events
+ - TSPP Power management
+
+Required properties:
+- compatible : Should be "qcom,msm_tspp"
+- reg : Specifies the base physical addresses and sizes of TSIF, TSPP & BAM registers.
+- reg-names : Specifies the register names of TSIF, TSPP & BAM base registers.
+- interrupts : Specifies the interrupts associated with TSIF 12 seg core.
+- interrupt-names: Specifies interrupt names for TSIF, TSPP & BAM interrupts.
+- clock-names: Specifies the clock names used for interface & reference clocks.
+- clocks: GCC_TSIF_AHB_CLK clock for interface clock & GCC_TSIF_REF_CLK clock for reference clock.
+- qcom, msm_bus,name: Should be "tsif"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
+- pinctrl-names: Names for the TSIF mode configuration to specify which TSIF interface is active.
+- qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+- iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
+
+Example:
+
+ tspp: msm_tspp@0x8880000 {
+ compatible = "qcom,msm_tspp";
+ reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */
+ <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */
+ <0x088a9000 0x1000>, /* MSM_TSPP_PHYS */
+ <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+ reg-names = "MSM_TSIF0_PHYS",
+ "MSM_TSIF1_PHYS",
+ "MSM_TSPP_PHYS",
+ "MSM_TSPP_BAM_PHYS";
+ interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+ <0 119 0>, /* TSIF0_IRQ */
+ <0 120 0>, /* TSIF1_IRQ */
+ <0 122 0>; /* TSIF_BAM_IRQ */
+ interrupt-names = "TSIF_TSPP_IRQ",
+ "TSIF0_IRQ",
+ "TSIF1_IRQ",
+ "TSIF_BAM_IRQ";
+
+ clock-names = "iface_clk", "ref_clk";
+ clocks = <&clock_gcc GCC_TSIF_AHB_CLK>,
+ <&clock_gcc GCC_TSIF_REF_CLK>;
+
+ qcom,msm-bus,name = "tsif";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <82 512 0 0>, /* No vote */
+ <82 512 12288 24576>;
+ /* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+ pinctrl-names = "disabled",
+ "tsif0-mode1", "tsif0-mode2",
+ "tsif1-mode1", "tsif1-mode2",
+ "dual-tsif-mode1", "dual-tsif-mode2";
+
+ pinctrl-0 = <>; /* disabled */
+ pinctrl-1 = <&tsif0_signals_active>; /* tsif0-mode1 */
+ pinctrl-2 = <&tsif0_signals_active
+ &tsif0_sync_active>; /* tsif0-mode2 */
+ pinctrl-3 = <&tsif1_signals_active>; /* tsif1-mode1 */
+ pinctrl-4 = <&tsif1_signals_active
+ &tsif1_sync_active>; /* tsif1-mode2 */
+ pinctrl-5 = <&tsif0_signals_active
+ &tsif1_signals_active>; /* dual-tsif-mode1 */
+ pinctrl-6 = <&tsif0_signals_active
+ &tsif0_sync_active
+ &tsif1_signals_active
+ &tsif1_sync_active>; /* dual-tsif-mode2 */
+
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x20 0x0f>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 0123682..d0d878b 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -385,6 +385,11 @@
property "qcom,slope-limit-temp-threshold" to make dynamic
slope limit adjustment functional.
+- qcom,fg-bmd-en-delay-ms
+ Usage: optional
+ Value type: <u32>
+ Definition: The delay in ms for FG to enable BMD after reading RID.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 441d771..05fa6e4 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -85,21 +85,6 @@
maximum charge current in mA for each thermal
level.
-- qcom,step-soc-thresholds
- Usage: optional
- Value type: Array of <u32>
- Definition: Array of SOC threshold values, size of 4. This should be a
- flat array that denotes the percentage ranging from 0 to 100.
- If the array is not present, step charging is disabled.
-
-- qcom,step-current-deltas
- Usage: optional
- Value type: Array of <s32>
- Definition: Array of delta values for charging current, size of 5, with
- FCC as base. This should be a flat array that denotes the
- offset of charging current in uA, from -3100000 to 3200000.
- If the array is not present, step charging is disabled.
-
- io-channels
Usage: optional
Value type: List of <phandle u32>
@@ -182,6 +167,22 @@
Definition: Specifies the deglitch interval for OTG detection.
If the value is not present, 50 msec is used as default.
+- qcom,step-charging-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables step-charging.
+
+- qcom,wd-bark-time-secs
+ Usage: optional
+ Value type: <u32>
+ Definition: WD bark-timeout in seconds. The possible values are
+ 16, 32, 64, 128. If not defined it defaults to 64.
+
+- qcom,sw-jeita-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables sw compensation for jeita
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
@@ -217,9 +218,6 @@
dpdm-supply = <&qusb_phy0>;
- qcom,step-soc-thresholds = <60 70 80 90>;
- qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
-
qcom,chgr@1000 {
reg = <0x1000 0x100>;
interrupts = <0x2 0x10 0x0 IRQ_TYPE_NONE>,
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index 95cc85a..7711b8b 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -65,6 +65,17 @@
and follow appropriate steps to ensure communication on the bus
can be resumed after subsytem restart. By default slimbus driver
register with ADSP subsystem.
+ - qcom,iommu-s1-bypass: Boolean flag to bypass IOMMU stage 1 translation.
+
+Optional subnodes:
+qcom,iommu_slim_ctrl_cb : Child node representing the Slimbus controller
+ context bank.
+
+Subnode Required properties:
+- compatible : Must be "qcom,slim-ctrl-cb";
+- iommus : A list of phandle and IOMMU specifier pairs that
+ describe the IOMMU master interfaces of the device.
+
Example:
slim@fe12f000 {
cell-index = <1>;
@@ -78,4 +89,9 @@
qcom,rxreg-access;
qcom,apps-ch-pipes = <0x60000000>;
qcom,ea-pc = <0x30>;
+
+ iommu_slim_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+ compatible = "qcom,iommu-slim-ctrl-cb";
+ iommus = <&apps_smmu 0x1 0x0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index c116e42..4d05e50 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -329,7 +329,7 @@
- qcom,gpio-connect Gpio that connects to parent
interrupt controller
-* audio-ext-clk
+* audio-ext-clk-up
Required properties:
@@ -2579,3 +2579,256 @@
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
"SpkrLeft", "SpkrRight";
};
+
+* SDM670 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd"
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-hs-micbias-type : This property is used to recognize the headset
+ micbias type, internal or external.
+- qcom,msm-mbhc-hphl-swh: This property is used to distinguish headset HPHL
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,msm-mbhc-gnd-swh: This property is used to distinguish headset GND
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,audio-routing : A list of the connections between audio components.
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- qcom,msm-micbias1-ext-cap : Boolean. Enable micbias1 external
+capacitor mode.
+- qcom,msm-micbias2-ext-cap : Boolean. Enable micbias2 external
+capacitor mode.
+- qcom,wsa-disable : Boolean. Disables WSA speaker dailinks from sound node.
+- qcom,msm-spk-ext-pa : GPIO which enables external speaker pa.
+- qcom,msm-mclk-freq : This property is used to inform machine driver about
+mclk frequency needs to be configured for internal and external PA.
+- asoc-platform: This is phandle list containing the references to platform device
+ nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+ the platform names should match to that of the phandle order
+ given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+ that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+ cpu dai names should match to that of the phandle order given.
+- asoc-codec: This is phandle list containing the references to codec dai device
+ nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+ codec dai names should match to that of the phandle order given
+ in "asoc-codec".
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,cdc-pdm-gpios : phandle for pdm gpios.
+- qcom,cdc-comp-gpios : phandle for compander gpios.
+- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios.
+- qcom,cdc-sdw-gpios : phandle for soundwire clk and data gpios.
+- qcom,msm-mbhc-moist-cfg: This property is used to set moisture detection
+ threshold values for different codecs. First parameter is V(voltage)
+ second one is i(current), third one is r (resistance). Depending on the
+ codec set corresponding element in array and set others to 0.
+
+Example:
+ sound {
+ compatible = "qcom,sdm670-asoc-snd";
+ qcom,model = "sdm670-snd-card";
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
+ qcom,msm-hs-micbias-type = "internal";
+ qcom,msm-micbias1-ext-cap;
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "SPK_RX_BIAS", "MCLK",
+ "INT_LDO_H", "MCLK",
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS Internal2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External";
+ qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+ qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+ qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+ qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+ qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&lpa>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless", "msm-pcm-afe",
+ "msm-lsm-client", "msm-pcm-routing", "msm-pcm-lpa";
+ asoc-cpu = <&dai_pri_auxpcm>, <&dai_hdmi>,
+ <&dai_mi2s0>, <&dai_mi2s1>, <&dai_mi2s2>, <&dai_mi2s3>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>,
+ <&bt_sco_rx>, <&bt_sco_tx>, <&int_fm_rx>, <&int_fm_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, <&afe_proxy_tx>,
+ <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+ "msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770";
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+ <&wsa881x_213>, <&wsa881x_214>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+ "SpkrRight", "SpkrLeft";
+ };
+
+* SDM670 ASoC Slimbus Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd-tasha" for tasha codec,
+ "qcom,sdm670-asoc-snd-tavil" for tavil codec.
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-mclk-freq : MCLK frequency value for external codec
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios. Say we have 2^N combinations for N GPIOs,
+this would list all the 2^N combinations.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor. This can be sometimes same as qcom, pinctrl-names i.e with 2^N
+combinations or will have less incase if some combination is not supported.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+- qcom,audio-routing : A list of the connections between audio components.
+- asoc-platform: This is phandle list containing the references to platform device
+ nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+ the platform names should match to that of the phandle order
+ given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+ that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+ cpu dai names should match to that of the phandle order given
+ in "asoc-cpu". The cpu names are in the form of "%s.%d" form,
+ where the id (%d) field represents the back-end AFE port id that
+ this CPU dai is associated with.
+- asoc-codec: This is phandle list containing the references to codec dai device
+ nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+ codec dai names should match to that of the phandle order given
+ in "asoc-codec".
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- clock-names : clock name defined for external clock.
+- clocks : external clock defined for codec clock.
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+
+Example:
+
+ sound-9335 {
+ compatible = "qcom,sdm670-asoc-snd-tasha";
+ qcom,model = "sdm670-tasha-snd-card";
+
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "LDO_H", "MCLK",
+ "AIF4 MAD", "MCLK",
+ "ultrasound amp", "LINEOUT1",
+ "ultrasound amp", "LINEOUT3",
+ "AMIC1", "MIC BIAS1 Internal1",
+ "MIC BIAS1 Internal1", "Handset Mic",
+ "AMIC2", "MIC BIAS2 External",
+ "MIC BIAS2 External", "Headset Mic",
+ "AMIC3", "MIC BIAS2 External",
+ "MIC BIAS2 External", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2 External",
+ "MIC BIAS2 External", "ANCLeft Headset Mic",
+ "DMIC1", "MIC BIAS1 External",
+ "MIC BIAS1 External", "Digital Mic1",
+ "DMIC2", "MIC BIAS1 External",
+ "MIC BIAS1 External", "Digital Mic2",
+ "DMIC3", "MIC BIAS3 External",
+ "MIC BIAS3 External", "Digital Mic3",
+ "DMIC4", "MIC BIAS3 External",
+ "MIC BIAS3 External", "Digital Mic4",
+ "DMIC5", "MIC BIAS4 External",
+ "MIC BIAS4 External", "Digital Mic5",
+ "DMIC6", "MIC BIAS4 External",
+ "MIC BIAS4 External", "Digital Mic6";
+
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,msm-gpios =
+ "slim",
+ "us_eu_gpio";
+ qcom,pinctrl-names =
+ "all_off",
+ "slim_act",
+ "us_eu_gpio_act",
+ "slim_us_eu_gpio_act";
+ pinctrl-names =
+ "all_off",
+ "slim_act",
+ "us_eu_gpio_act",
+ "slim_us_eu_gpio_act";
+ pinctrl-0 = <&cdc_slim_lines_sus &cross_conn_det_sus>;
+ pinctrl-1 = <&cdc_slim_lines_act &cross_conn_det_sus>;
+ pinctrl-2 = <&cdc_slim_lines_sus &cross_conn_det_act>;
+ pinctrl-3 = <&cdc_slim_lines_act &cross_conn_det_act>;
+ qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-cpe-lsm",
+ "msm-compr-dsp";
+ asoc-cpu = <&dai_hdmi>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+ <&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>, <&sb_5_rx>;
+ asoc-cpu-names = "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394";
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+ <&wsa881x_213>, <&wsa881x_214>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+ "SpkrRight", "SpkrLeft";
+ };
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index 0df9417..c848ab5 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -12,7 +12,6 @@
- qcom,wcd-rst-gpio-node: Phandle reference to the DT node having codec reset gpio
configuration. If this property is not defined, it is
expected to atleast define "qcom,cdc-reset-gpio" property.
-
- cdc-vdd-buck-supply: phandle of buck supply's regulator device tree node.
- qcom,cdc-vdd-buck-voltage: buck supply's voltage level min and max in mV.
- qcom,cdc-vdd-buck-current: buck supply's max current in mA.
@@ -142,6 +141,11 @@
- clock-names : clock name defined for external clock.
- clocks : external clock defined for codec clock.
+ - qcom,has-buck-vsel-gpio: Boolean property to select if WCD_BUCK has VSEL
+ controlled by GPIO.
+ - qcom,buck-vsel-gpio-node: Phandle reference to the DT node having wcd buck
+ VSEL gpio configuration.
+
Example:
taiko_codec {
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
index ceac719..2131c33 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
@@ -35,6 +35,19 @@
the corresponding addresses are specified in the reg
property.
+- clocks
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Clock tuple consisting of a phandle to a clock controller
+ device and the clock ID number for the SPMI debug controller
+ clock.
+
+- clock-names
+ Usage: required if clocks property is specified
+ Value type: <string>
+ Definition: Defines the name of the clock defined in the "clocks"
+ property. This must be "core_clk".
+
- #address-cells
Usage: required
Value type: <u32>
@@ -57,6 +70,8 @@
compatible = "qcom,spmi-pmic-arb-debug";
reg = <0x6b22000 0x60>, <0x7820A8 4>;
reg-names = "core", "fuse";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
qcom,fuse-disable-bit = <12>;
#address-cells = <2>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
index 8bead0d..be50d45 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
@@ -31,12 +31,29 @@
Definition: Should specify the cluster affinity this hardware
corresponds to.
+- isens_vref-supply:
+ Usage: optional
+ Value type: <phandle>
+ Definition: Should specify the phandle of the vref regulator used by
+ the isens hardware. This active only regulator will be
+ enabled by LMH DCVSh.
+
+- isens-vref-settings:
+ Usage: optional
+ Value type: <u32 array>
+ Definition: Should specify the min voltage(uV), max voltage(uV) and
+ max load(uA) for the isens vref regulator. This
+ property is valid only if there is valid entry for
+ isens_vref-supply.
+
Example:
lmh_dcvs0: qcom,limits-dcvs@0 {
compatible = "qcom,msm-hw-limits";
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
qcom,affinity = <0>;
+ isens_vref-supply = <&pm8998_l1_ao>;
+ isens-vref-settings = <880000 880000 36000>;
};
CPU0: cpu@0 {
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
new file mode 100644
index 0000000..51c5eac
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
@@ -0,0 +1,113 @@
+QMI thermal mitigation(TMD) cooling devices.
+
+The QMI TMD cooling device, will be used for various mitigations for remote
+subsystem including remote processor mitigation, rail voltage restriction etc.
+This cooling device uses kernel qti QMI interface to send the message to
+remote subsystem.
+
+Each child node of the QMI TMD devicetree node represents each remote
+subsystem and each child of this subsystem represents separate cooling
+devices. It requires minimum one remote subsystem node and each subsystem
+node requires minimum one cooling device node.
+
+Properties:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qmi_cooling_devices"
+
+
+Subsystem properties:
+- qcom,instance-id:
+ Usage: required
+ Value type: <integer>
+ Definition: Remote subsystem QMI server instance id to be used for
+ communicating with QMI.
+
+ Minimum one child node is required. Child node name and its alias are
+ used as cooling device name and phandle for that cooling device.
+
+ cooling device node properties:
+ -qcom,qmi-dev-name:
+ Usage: required
+ Value type: <string>
+ Definition: Remote subsystem device identifier. Below strings
+ are the only acceptable device names,
+ "pa" -> for pa cooling device,
+ "cpuv_restriction_cold" -> for vdd restriction,
+ "cx_vdd_limit" -> for vdd limit,
+ "modem" -> for processor passive cooling device,
+ "modem_current" -> for current limiting device,
+ "modem_bw" -> for bus bandwidth limiting device,
+ "cpr_cold" -> for cpr restriction.
+
+ -#cooling-cells:
+ Usage: required
+ Value type: <integer>
+ Definition: Must be 2. Needed for of_thermal as cooling device
+ identifier. Please refer to
+ <devicetree/bindings/thermal/thermal.txt> for more
+ details.
+Example:
+
+ qmi-tmd-devices {
+ compatible = "qcom,qmi_cooling_devices";
+
+ modem {
+ qcom,instance-id = <0x0>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_proc: modem_proc {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_cpr_cold: modem_cpr_cold {
+ qcom,qmi-dev-name = "cpr_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ adsp {
+ qcom,instance-id = <0x1>;
+
+ adsp_vdd: adsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ cdsp {
+ qcom,instance-id = <0x43>;
+
+ cdsp_vdd: cdsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ slpi {
+ qcom,instance-id = <0x53>;
+
+ slpi_vdd: slpi_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt
new file mode 100644
index 0000000..b7734ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt
@@ -0,0 +1,44 @@
+RPMh regulator cooling device.
+
+The RPMh regulator cooling device, will be used to place a voltage floor
+restriction on a rail. This cooling device will use a QMP AOP mail box to send
+the message to apply and clear voltage floor restriction.
+
+The cooling device node should be a child of the regulator devicetree node,
+which it is trying to place the floor restriction.
+
+Properties:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: shall be "qcom,rpmh-reg-cdev"
+
+- qcom,reg-resource-name:
+ Usage: required
+ Value type: <string>
+ Definition: The regulator resource name to be used for communicating
+ with RPMh. This value should be any of the below
+ resource name,
+ cx -> For CX rail,
+ mx -> For MX rail,
+ ebi -> For EBI rail.
+
+- mboxes:
+ Usage: required
+ Value type: <phandle>
+ Definition: A phandle to the QMP AOP mail box, that needs to be used
+ for sending the floor restriction message.
+
+- #cooling-cells: Must be 2. Please refer to
+ <devicetree/bindings/thermal/thermal.txt> for more
+ details.
+
+Example:
+
+ vdd_cx: rpmh-cx-regulator-cdev {
+ compatible = "qcom,rpmh-reg-cdev";
+ mboxes = <&qmp_aop 0>;
+ qcom,reg-resource-name = "cx";
+ #cooling-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
index 3e59c43..67ffaed 100644
--- a/Documentation/devicetree/bindings/thermal/tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -18,6 +18,7 @@
should be "qcom,sdm660-tsens" for 660 TSENS driver.
should be "qcom,sdm630-tsens" for 630 TSENS driver.
should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
+ should be "qcom,tsens24xx" for 2.4 TSENS controller.
The compatible property is used to identify the respective controller to use
for the corresponding SoC.
- reg : offset and length of the TSENS registers with associated property in reg-names
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index a9480be..6109fad 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -152,6 +152,7 @@
and reset lines used by this controller.
- reset-names: reset signal name strings sorted in the same order as the resets
property.
+ - qcom,qusb-phy-reg-offset: Provides important phy register offsets in an order defined in phy driver.
Optional properties:
- reg-names: Additional registers corresponding with the following:
@@ -174,7 +175,6 @@
- qcom,hold-reset: Indicates that hold QUSB PHY into reset state.
- qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided.
- qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
- - qcom,phy-auto-resume-offset: Provides phy auto-resume register offset.
Example:
qusb_phy: qusb@f9b39000 {
@@ -185,6 +185,13 @@
vdda18-supply = <&pm8994_l6>;
vdda33-supply = <&pm8994_l24>;
qcom,vdd-voltage-level = <1 5 7>;
+ qcom,qusb-phy-reg-offset =
+ <0x240 /* QUSB2PHY_PORT_TUNE1 */
+ 0x1a0 /* QUSB2PHY_PLL_COMMON_STATUS_ONE */
+ 0x210 /* QUSB2PHY_PWR_CTRL1 */
+ 0x230 /* QUSB2PHY_INTR_CTRL */
+ 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
+ 0x254>; /* QUSB2PHY_TEST1 */
qcom,efuse-bit-pos = <21>;
qcom,efuse-num-bits = <3>;
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 6e027ae..ea40927 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -138,6 +138,7 @@
maps Memory maps to executables and library files (2.4)
mem Memory held by this process
root Link to the root directory of this process
+ reclaim Reclaim pages in this process
stat Process status
statm Process memory status information
status Process status in human readable form
@@ -528,6 +529,25 @@
Any other value written to /proc/PID/clear_refs will have no effect.
+The file /proc/PID/reclaim is used to reclaim pages in this process.
+To reclaim file-backed pages,
+ > echo file > /proc/PID/reclaim
+
+To reclaim anonymous pages,
+ > echo anon > /proc/PID/reclaim
+
+To reclaim all pages,
+ > echo all > /proc/PID/reclaim
+
+Also, you can specify address range of process so part of address space
+will be reclaimed. The format is following as
+ > echo addr size-byte > /proc/PID/reclaim
+
+NOTE: addr should be page-aligned.
+
+Below is example which try to reclaim 2M from 0x100000.
+ > echo 0x100000 2M > /proc/PID/reclaim
+
The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags
using /proc/kpageflags and number of times a page is mapped using
/proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8527965..2b576cc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3948,6 +3948,13 @@
spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Documentation/misc-devices/qcom_invoke_driver.txt b/Documentation/misc-devices/qcom_invoke_driver.txt
new file mode 100644
index 0000000..38c976a
--- /dev/null
+++ b/Documentation/misc-devices/qcom_invoke_driver.txt
@@ -0,0 +1,54 @@
+Introduction:
+=============
+Invoke driver is a misc driver which helps communication between non secure
+and secure world. Invoke driver communicates with secure side using SCM
+driver. To use invoke driver, open must be called on invoke device i.e.
+/dev/invoke. Invoke driver exposes only one IOCTL invoke which passes
+userspace request to TZ.
+
+SW Architecture
+===============
+Following is SW stack for Invoke driver.
+
++++++++++++++++++++++++++++++++++++++++++
++ Applications +
++++++++++++++++++++++++++++++++++++++++++
++ System Layer +
++++++++++++++++++++++++++++++++++++++++++
++ Kernel +
++ +++++++++++++++++++ +
++ + Invoke driver + +
++ +++++++++++++++++++ +
++ + SCM Driver + +
++++++++++++++++++++++++++++++++++++++++++
+ ||
+ ||
+ \/
++++++++++++++++++++++++++++++++++++++++++
++ Trust Zone +
++ +++++++++++ +++++++++++ +
++ + TZ App1 + + TZ App2 + +
++++++++++++++++++++++++++++++++++++++++++
+
+
+Interfaces
+==========
+Invoke driver exposes INVOKE_IOCTL_INVOKE_REQ IOCTL for userspace to
+communicate with driver. More details of IOCTL are avilable in
+corresponding header file.
+
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+TBD
+
+Dependencies
+============
+Invoke driver depends on SCM driver to communicate with TZ.
diff --git a/Makefile b/Makefile
index 9e428c5..5894331 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 32
+SUBLEVEL = 40
EXTRAVERSION =
NAME = Roaring Lionus
@@ -565,7 +565,7 @@
# Objects we will link into vmlinux / subdirs we need to visit
init-y := init/
-drivers-y := drivers/ sound/ firmware/
+drivers-y := drivers/ sound/ firmware/ techpack/
net-y := net/
libs-y := lib/
core-y := usr/
@@ -633,6 +633,9 @@
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
@@ -655,6 +658,12 @@
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
@@ -800,12 +809,6 @@
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
@@ -1152,6 +1155,7 @@
$(error Headers not exportable for the $(SRCARCH) architecture))
$(Q)$(MAKE) $(hdr-inst)=include/uapi
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst)
+ $(Q)$(MAKE) $(hdr-inst)=techpack
PHONY += headers_check_all
headers_check_all: headers_install_all
@@ -1161,6 +1165,7 @@
headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst) HDRCHECK=1
+ $(Q)$(MAKE) $(hdr-inst)=techpack HDRCHECK=1
# ---------------------------------------------------------------------------
# Kernel selftest
diff --git a/arch/Kconfig b/arch/Kconfig
index 659bdd0..babac73 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -218,6 +218,12 @@
config GENERIC_IDLE_POLL_SETUP
bool
+config ARCH_HAS_FORTIFY_SOURCE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run with CONFIG_FORTIFY_SOURCE.
+
# Select if arch init_task initializer is different to init/init_task.c
config ARCH_INIT_TASK
bool
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd86..8b90d25 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@
GET_CPU_ID r5
cmp r5, 0
mov.nz r0, r5
-#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
- ; Non-Master can proceed as system would be booted sufficiently
- jnz first_lines_of_secondary
-#else
+ bz .Lmaster_proceed
+
; Non-Masters wait for Master to boot enough and bring them up
- jnz arc_platform_smp_wait_to_boot
-#endif
- ; Master falls thru
+ ; when they resume, tail-call to entry point
+ mov blink, @first_lines_of_secondary
+ j arc_platform_smp_wait_to_boot
+
+.Lmaster_proceed:
#endif
; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d9..2afbafa 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@
*/
static volatile int wake_flag;
+#ifdef CONFIG_ISA_ARCOMPACT
+
+#define __boot_read(f) f
+#define __boot_write(f, v) f = v
+
+#else
+
+#define __boot_read(f) arc_read_uncached_32(&f)
+#define __boot_write(f, v) arc_write_uncached_32(&f, v)
+
+#endif
+
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
BUG_ON(cpu == 0);
- wake_flag = cpu;
+
+ __boot_write(wake_flag, cpu);
}
void arc_platform_smp_wait_to_boot(int cpu)
{
- while (wake_flag != cpu)
+ /* for halt-on-reset, we've waited already */
+ if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
+ return;
+
+ while (__boot_read(wake_flag) != cpu)
;
- wake_flag = 0;
- __asm__ __volatile__("j @first_lines_of_secondary \n");
+ __boot_write(wake_flag, 0);
}
-
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56..cf4ae69 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d04e168..21c66eb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -117,7 +117,7 @@
config ARM_DMA_IOMMU_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
range 4 9
- default 8
+ default 9
help
DMA mapping framework by default aligns all buffers to the smallest
PAGE_SIZE order which is greater than or equal to the requested buffer
@@ -230,6 +230,9 @@
config ARCH_MTD_XIP
bool
+config ARCH_WANT_KMAP_ATOMIC_FLUSH
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -567,6 +570,7 @@
select SPARSE_IRQ
select USE_OF
select PINCTRL
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
@@ -1760,6 +1764,29 @@
source "mm/Kconfig"
+choice
+ prompt "Virtual Memory Reclaim"
+ default NO_VM_RECLAIM
+ help
+ Select the method of reclaiming virtual memory
+
+config ENABLE_VMALLOC_SAVING
+ bool "Reclaim memory for each subsystem"
+ help
+ Enable this config to reclaim the virtual space belonging
+ to any subsystem which is expected to have a lifetime of
+ the entire system. This feature allows lowmem to be non-
+ contiguous.
+
+config NO_VM_RECLAIM
+ bool "Do not reclaim memory"
+ help
+ Do not reclaim any memory. This might result in less lowmem
+ and wasting virtual memory space which could otherwise be
+ reclaimed by using any of the other two config options.
+
+endchoice
+
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 8f9a69c..efe5399 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -121,7 +121,7 @@
&i2c3 {
clock-frequency = <400000>;
at24@50 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c64";
readonly;
reg = <0x50>;
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index a2a3231..45a0fdc 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -211,6 +211,94 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ tsens0: tsens@c222000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc222000 0x4>,
+ <0xc263000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 163 0>, <0 165 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal_zones: thermal-zones {
+ mpm-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ q6-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ ctile-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 4>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ qcom,ipa_fws {
+ compatible = "qcom,pil-tz-generic";
+ qcom,pas-id = <0xf>;
+ qcom,firmware-name = "ipa_fws";
+ };
};
#include "sdxpoorwills-regulator.dtsi"
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index ea316c4..d3f1768 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -64,8 +64,8 @@
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 18e59fe..7f479cd 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -56,8 +56,8 @@
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1674c11..b531fa5 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -175,8 +175,6 @@
# CONFIG_NET_VENDOR_INTEL is not set
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_ECM_IPA=y
-CONFIG_RNDIS_IPA=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
@@ -212,6 +210,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_SOUND=y
@@ -270,6 +269,8 @@
CONFIG_STAGING=y
CONFIG_GSI=y
CONFIG_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 38a531f..88bd16c 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -167,8 +167,6 @@
# CONFIG_NET_VENDOR_INTEL is not set
CONFIG_KS8851=y
# CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_ECM_IPA=y
-CONFIG_RNDIS_IPA=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
@@ -205,6 +203,7 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS=y
CONFIG_MSM_CDC_PINCTRL=y
CONFIG_MSM_CDC_SUPPLY=y
CONFIG_REGULATOR=y
@@ -266,6 +265,8 @@
CONFIG_STAGING=y
CONFIG_GSI=y
CONFIG_IPA3=y
+CONFIG_ECM_IPA=y
+CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 58c6398..8e349ce 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -178,10 +178,26 @@
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
+extern void __dma_map_area(const void *addr, size_t size, int dir);
+extern void __dma_unmap_area(const void *addr, size_t size, int dir);
extern void dmac_inv_range(const void *, const void *);
extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
+static inline void __dma_inv_area(const void *start, size_t len)
+{
+ dmac_inv_range(start, start + len);
+}
+
+static inline void __dma_clean_area(const void *start, size_t len)
+{
+ dmac_clean_range(start, start + len);
+}
+
+static inline void __dma_flush_area(const void *start, size_t len)
+{
+ dmac_flush_range(start, start + len);
+}
#endif
/*
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index b4e74af..74643f5 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -8,6 +8,7 @@
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
#include <linux/kref.h>
+#include <linux/dma-mapping-fast.h>
struct dma_iommu_mapping {
/* iommu specific data */
@@ -22,6 +23,8 @@
spinlock_t lock;
struct kref kref;
+
+ struct dma_fast_smmu_mapping *fast;
};
#ifdef CONFIG_ARM_DMA_USE_IOMMU
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ff..f13ae15 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index d14f310..f7c75dc 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -157,6 +157,11 @@
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#define dmac_inv_range __glue(_CACHE, _dma_inv_range)
#define dmac_clean_range __glue(_CACHE, _dma_clean_range)
+#define dmac_map_area __glue(_CACHE, _dma_map_area)
+#define dmac_unmap_area __glue(_CACHE, _dma_unmap_area)
+
+#define __dma_map_area dmac_map_area
+#define __dma_unmap_area dmac_unmap_area
#endif
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d..f23454d 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
+#define TIF_MM_RELEASED 21 /* task MM has been released */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index e920dd8..f989145 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -222,6 +222,14 @@
dev_err(dev, "failed to idle\n");
}
break;
+ case BUS_NOTIFY_BIND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+ pm_runtime_status_suspended(dev)) {
+ od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+ pm_runtime_set_active(dev);
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 19f444e..cb2c9f4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -29,6 +29,7 @@
#include <linux/sizes.h>
#include <linux/cma.h>
#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/dma-mapping-fast.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -50,6 +51,8 @@
pgprot_t prot;
const void *caller;
bool want_vaddr;
+ bool skip_cpu_sync;
+ bool skip_zeroing;
int coherent_flag;
};
@@ -113,6 +116,21 @@
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
size_t, enum dma_data_direction);
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller);
+
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn);
+
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot);
+
+static void *arm_dma_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size,
+ unsigned long attrs);
+
+static void arm_dma_unremap(struct device *dev, void *remapped_addr,
+ size_t size);
+
/**
* arm_dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -194,6 +212,8 @@
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .remap = arm_dma_remap,
+ .unremap = arm_dma_unremap,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -276,7 +296,8 @@
return mask;
}
-static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
+static void __dma_clear_buffer(struct page *page, size_t size,
+ bool skip_zeroing, int coherent_flag)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
@@ -287,7 +308,8 @@
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
- memset(ptr, 0, PAGE_SIZE);
+ if (!skip_zeroing)
+ memset(ptr, 0, PAGE_SIZE);
if (coherent_flag != COHERENT)
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
@@ -298,7 +320,8 @@
outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
- memset(ptr, 0, size);
+ if (!skip_zeroing)
+ memset(ptr, 0, size);
if (coherent_flag != COHERENT) {
dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size);
@@ -327,7 +350,7 @@
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- __dma_clear_buffer(page, size, coherent_flag);
+ __dma_clear_buffer(page, size, false, coherent_flag);
return page;
}
@@ -350,6 +373,7 @@
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
+ bool skip_cpu_sync, bool skip_zeroing,
int coherent_flag);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
@@ -369,10 +393,10 @@
prot, caller);
}
-static void __dma_free_remap(void *cpu_addr, size_t size)
+static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn)
{
dma_common_free_remap(cpu_addr, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, false);
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP, no_warn);
}
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -421,7 +445,8 @@
*/
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true, NORMAL);
+ &page, atomic_pool_init, true, false,
+ false, NORMAL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
@@ -520,21 +545,39 @@
return 0;
}
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ pte_clear(&init_mm, addr, pte);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+ bool want_vaddr)
{
unsigned long start = (unsigned long) page_address(page);
unsigned end = start + size;
+ int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
- apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ if (!want_vaddr)
+ func = __dma_clear_pte;
+ else
+ func = __dma_update_pte;
+
+ apply_to_page_range(&init_mm, start, size, func, &prot);
+ mb(); /*Ensure pte's are updated */
flush_tlb_kernel_range(start, end);
}
+
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr)
{
struct page *page;
- void *ptr = NULL;
+ void *ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
/*
* __alloc_remap_buffer is only called when the device is
* non-coherent
@@ -595,6 +638,7 @@
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
+ bool skip_cpu_sync, bool skip_zeroing,
int coherent_flag)
{
unsigned long order = get_order(size);
@@ -606,23 +650,37 @@
if (!page)
return NULL;
- __dma_clear_buffer(page, size, coherent_flag);
-
- if (!want_vaddr)
- goto out;
+ /*
+ * skip completely if we neither need to zero nor sync.
+ */
+ if (!(skip_cpu_sync && skip_zeroing))
+ __dma_clear_buffer(page, size, skip_zeroing, coherent_flag);
if (PageHighMem(page)) {
- ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
- if (!ptr) {
- dma_release_from_contiguous(dev, page, count);
- return NULL;
+ if (!want_vaddr) {
+ /*
+ * Something non-NULL needs to be returned here. Give
+ * back a dummy address that is unmapped to catch
+ * clients trying to use the address incorrectly
+ */
+ ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
+
+ /* also flush out the stale highmem mappings */
+ kmap_flush_unused();
+ kmap_atomic_flush_unused();
+ } else {
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
+ caller);
+ if (!ptr) {
+ dma_release_from_contiguous(dev, page, count);
+ return NULL;
+ }
}
} else {
- __dma_remap(page, size, prot);
+ __dma_remap(page, size, prot, want_vaddr);
ptr = page_address(page);
}
- out:
*ret_page = page;
return ptr;
}
@@ -630,12 +688,10 @@
static void __free_from_contiguous(struct device *dev, struct page *page,
void *cpu_addr, size_t size, bool want_vaddr)
{
- if (want_vaddr) {
- if (PageHighMem(page))
- __dma_free_remap(cpu_addr, size);
- else
- __dma_remap(page, size, PAGE_KERNEL);
- }
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size, true);
+ else
+ __dma_remap(page, size, PAGE_KERNEL, true);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -656,10 +712,11 @@
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, \
+ wv, scs, sz, coherent_flag) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
-#define __dma_free_remap(cpu_addr, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size, w) do { } while (0)
#endif /* CONFIG_MMU */
@@ -698,7 +755,8 @@
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr, args->coherent_flag);
+ args->want_vaddr, args->skip_cpu_sync,
+ args->skip_zeroing, args->coherent_flag);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
@@ -739,7 +797,7 @@
static void remap_allocator_free(struct arm_dma_free_args *args)
{
if (args->want_vaddr)
- __dma_free_remap(args->cpu_addr, args->size);
+ __dma_free_remap(args->cpu_addr, args->size, false);
__dma_free_buffer(args->page, args->size);
}
@@ -765,6 +823,8 @@
.prot = prot,
.caller = caller,
.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
+ .skip_cpu_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC),
+ .skip_zeroing = (attrs & DMA_ATTR_SKIP_ZEROING),
.coherent_flag = is_coherent ? COHERENT : NORMAL,
};
@@ -826,7 +886,7 @@
kfree(buf);
}
- return args.want_vaddr ? addr : page;
+ return addr;
}
/*
@@ -874,6 +934,38 @@
return ret;
}
+static void *arm_dma_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ void *ptr;
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
+ unsigned long offset = handle & ~PAGE_MASK;
+
+ size = PAGE_ALIGN(size + offset);
+ ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot,
+ __builtin_return_address(0));
+ return ptr ? ptr + offset : ptr;
+}
+
+static void arm_dma_unremap(struct device *dev, void *remapped_addr,
+ size_t size)
+{
+ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+ struct vm_struct *area;
+
+ remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
+
+ area = find_vm_area(remapped_addr);
+ if (!area || (area->flags & flags) != flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n",
+ remapped_addr);
+ return;
+ }
+
+ vunmap(remapped_addr);
+}
/*
* Create userspace mapping for the DMA-coherent memory.
*/
@@ -1298,7 +1390,7 @@
if (!page)
goto error;
- __dma_clear_buffer(page, size, coherent_flag);
+ __dma_clear_buffer(page, size, false, coherent_flag);
for (i = 0; i < count; i++)
pages[i] = page + i;
@@ -1348,7 +1440,8 @@
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order,
+ false, coherent_flag);
i += 1 << order;
count -= 1 << order;
}
@@ -2170,6 +2263,9 @@
if (!bitmap_size)
return ERR_PTR(-EINVAL);
+ WARN(!IS_ALIGNED(size, SZ_128M),
+ "size is not aligned to 128M, alignment enforced");
+
if (bitmap_size > PAGE_SIZE) {
extensions = bitmap_size / PAGE_SIZE;
bitmap_size = PAGE_SIZE;
@@ -2192,7 +2288,7 @@
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
mapping->base = base;
- mapping->bits = bits;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
@@ -2284,12 +2380,21 @@
struct dma_iommu_mapping *mapping)
{
int err;
+ int s1_bypass = 0;
+ int is_fast = 0;
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+ if (is_fast)
+ return fast_smmu_attach_device(dev, mapping);
err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
- set_dma_ops(dev, &iommu_ops);
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (!s1_bypass)
+ set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
@@ -2297,6 +2402,7 @@
static void __arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
+ int is_fast;
mapping = to_dma_iommu_mapping(dev);
if (!mapping) {
@@ -2306,6 +2412,9 @@
if (msm_dma_unmap_all_for_dev(dev))
dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+ if (is_fast)
+ return fast_smmu_detach_device(dev, mapping);
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
@@ -2323,8 +2432,21 @@
*/
void arm_iommu_detach_device(struct device *dev)
{
+ struct dma_iommu_mapping *mapping;
+ int s1_bypass = 0;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
__arm_iommu_detach_device(dev);
- set_dma_ops(dev, NULL);
+
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (!s1_bypass)
+ set_dma_ops(dev, NULL);
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
index 70ea6852..29c54f7 100644
--- a/arch/arm/mm/dma.h
+++ b/arch/arm/mm/dma.h
@@ -4,9 +4,6 @@
#include <asm/glue-cache.h>
#ifndef MULTI_CACHE
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
-
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index d02f818..5d73327 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
@@ -147,3 +148,58 @@
return (void *)vaddr;
}
+
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+static void kmap_remove_unused_cpu(int cpu)
+{
+ int start_idx, idx, type;
+
+ pagefault_disable();
+ type = kmap_atomic_idx();
+ start_idx = type + 1 + KM_TYPE_NR * cpu;
+
+ for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) {
+ unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ pte_t ptep;
+
+ ptep = get_top_pte(vaddr);
+ if (ptep)
+ set_top_pte(vaddr, __pte(0));
+ }
+ pagefault_enable();
+}
+
+static void kmap_remove_unused(void *unused)
+{
+ kmap_remove_unused_cpu(smp_processor_id());
+}
+
+void kmap_atomic_flush_unused(void)
+{
+ on_each_cpu(kmap_remove_unused, NULL, 1);
+}
+
+static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ kmap_remove_unused_cpu((int)hcpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hotplug_kmap_atomic_notifier = {
+ .notifier_call = hotplug_kmap_atomic_callback,
+};
+
+static int __init init_kmap_atomic(void)
+{
+ return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
+}
+early_initcall(init_kmap_atomic);
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 370581a..51496dd 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -469,6 +469,54 @@
#endif
}
+#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
+#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
+#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+static void print_vmalloc_lowmem_info(void)
+{
+ struct memblock_region *reg, *prev_reg = NULL;
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)high_memory, VMALLOC_END));
+
+ for_each_memblock_rev(memory, reg) {
+ phys_addr_t start_phys = reg->base;
+ phys_addr_t end_phys = reg->base + reg->size;
+
+ if (start_phys > arm_lowmem_limit)
+ continue;
+
+ if (end_phys > arm_lowmem_limit)
+ end_phys = arm_lowmem_limit;
+
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+
+ continue;
+ }
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(end_phys),
+ (unsigned long)__va(prev_reg->base)));
+
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+ }
+}
+#endif
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -497,9 +545,6 @@
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -507,29 +552,34 @@
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_END),
+ MLK(FIXADDR_START, FIXADDR_END));
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ print_vmalloc_lowmem_info();
+#else
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
+ printk(KERN_NOTICE
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
#ifdef CONFIG_HIGHMEM
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
@@ -543,10 +593,6 @@
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
@@ -573,6 +619,10 @@
}
#ifdef CONFIG_DEBUG_RODATA
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
struct section_perm {
const char *name;
unsigned long start;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ff0eed2..203728d 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -92,7 +92,8 @@
void *vaddr;
vm = &svm->vm;
- vm_area_add_early(vm);
+ if (!vm_area_check_early(vm))
+ vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 66353ca..641334e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -140,7 +140,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 5cbfd9f..ddc72dc 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1168,6 +1168,19 @@
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ struct memblock_region *prev_reg = NULL;
+
+ for_each_memblock(memory, reg) {
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+ continue;
+ }
+ vmalloc_limit += reg->base - (prev_reg->base + prev_reg->size);
+ prev_reg = reg;
+ }
+#endif
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
@@ -1211,15 +1224,15 @@
high_memory = __va(arm_lowmem_limit - 1) + 1;
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
- if (memblock_limit)
- memblock_limit = round_down(memblock_limit, PMD_SIZE);
- if (!memblock_limit)
- memblock_limit = arm_lowmem_limit;
+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
@@ -1428,12 +1441,21 @@
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
#endif
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ struct static_vm *svm;
+ phys_addr_t start;
+ phys_addr_t end;
+ unsigned long vaddr;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+ int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
+ nr++;
if (memblock_is_nomap(reg))
continue;
@@ -1485,6 +1507,34 @@
}
}
}
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+
+ for_each_memblock(memory, reg) {
+ struct vm_struct *vm;
+
+ start = reg->base;
+ end = start + reg->size;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+ vm = &svm->vm;
+ pfn = __phys_to_pfn(start);
+ vaddr = __phys_to_virt(start);
+ length = end - start;
+ type = MT_MEMORY_RW;
+
+ vm->addr = (void *)(vaddr & PAGE_MASK);
+ vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(pfn);
+ vm->flags = VM_LOWMEM;
+ vm->flags |= VM_ARM_MTYPE(type);
+ vm->caller = map_lowmem;
+ add_static_vm_early(svm++);
+ mark_vmalloc_reserved_area(vm->addr, vm->size);
+ }
}
#ifdef CONFIG_ARM_PV_FIXUP
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 84867ba..b5f9be7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,6 +9,7 @@
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
@@ -108,6 +109,7 @@
select POWER_SUPPLY
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
help
ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index dae2f9f..f96fba6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -116,6 +116,8 @@
select PINCTRL
select SOC_BUS
select PM_OPP
+ select MFD_CORE
+ select SND_SOC_COMPRESS
help
This enables support for the ARMv8 based Qualcomm chipsets.
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 92dc1e6..b97f1de 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -28,6 +28,17 @@
DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
endif
+# Add RTIC DTB to the DTB list if RTIC MPGen is enabled
+ifdef RTIC_MPGEN
+DTB_OBJS += rtic_mp.dtb
+endif
+
+rtic_mp.dtb: vmlinux FORCE
+ $(RTIC_MPGEN) --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+ --binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
+ --cc="${CC}" --dts=rtic_mp.dts && \
+ $(DTC) -O dtb -o rtic_mp.dtb -b 0 $(DTC_FLAGS) rtic_mp.dts
+
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index e6e3491..f150a4c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -85,6 +85,18 @@
status = "okay";
pinctrl-0 = <ð_pins>;
pinctrl-names = "default";
+ phy-handle = <ð_phy0>;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ reg = <0>;
+ eee-broken-1000t;
+ };
+ };
};
&ir {
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index e9bd587..49a5d8c 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -75,14 +75,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
- <GIC_PPI 14
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
- <GIC_PPI 11
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
- <GIC_PPI 10
- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
};
soc {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 64b250d..ee7f735 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,6 +2,32 @@
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += apq8096-db820c.dtb
+ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
+ dtbo-$(CONFIG_ARCH_SDM845) += \
+ sdm845-cdp-overlay.dtbo \
+ sdm845-mtp-overlay.dtbo \
+ sdm845-qrd-overlay.dtbo \
+ sdm845-v2-cdp-overlay.dtbo \
+ sdm845-v2-mtp-overlay.dtbo \
+ sdm845-v2-qrd-overlay.dtbo \
+ sdm845-4k-panel-mtp-overlay.dtbo \
+ sdm845-4k-panel-cdp-overlay.dtbo \
+ sdm845-4k-panel-qrd-overlay.dtbo \
+ sdm845-interposer-sdm670-cdp-overlay.dtbo \
+ sdm845-interposer-sdm670-mtp-overlay.dtbo
+
+sdm845-cdp-overlay.dtbo-base := sdm845.dtb
+sdm845-mtp-overlay.dtbo-base := sdm845.dtb
+sdm845-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
+sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
+sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
+sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-interposer-sdm670-cdp-overlay.dtbo-base := sdm845-interposer-sdm670.dtb
+sdm845-interposer-sdm670-mtp-overlay.dtbo-base := sdm845-interposer-sdm670.dtb
+else
dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
sdm845-rumi.dtb \
sdm845-mtp.dtb \
@@ -13,15 +39,9 @@
sdm845-v2-qrd.dtb \
sdm845-4k-panel-mtp.dtb \
sdm845-4k-panel-cdp.dtb \
- sdm845-4k-panel-qrd.dtb
-
-ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
- dtbo-$(CONFIG_ARCH_SDM845) += \
- sdm845-cdp-overlay.dtbo \
- sdm845-mtp-overlay.dtbo
-
-sdm845-cdp-overlay.dtbo-base := sdm845.dtb
-sdm845-mtp-overlay.dtbo-base := sdm845.dtb
+ sdm845-4k-panel-qrd.dtb \
+ sdm845-interposer-sdm670-mtp.dtb \
+ sdm845-interposer-sdm670-cdp.dtb
endif
dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index c52c18b..0ca1175 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -15,191 +15,19 @@
qcom,mdss-dsi-panel-name =
"nt35597 cmd mode dsi truly panel with DSC";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1440>;
- qcom,mdss-dsi-panel-height = <2560>;
- qcom,mdss-dsi-h-front-porch = <100>;
- qcom,mdss-dsi-h-back-porch = <32>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <8>;
- qcom,mdss-dsi-v-front-porch = <10>;
- qcom,mdss-dsi-v-pulse-width = <2>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-pan-physical-width-dimension = <74>;
+ qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [
- /* CMD2_P0 */
- 15 01 00 00 00 00 02 ff 20
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 01
- 15 01 00 00 00 00 02 01 55
- 15 01 00 00 00 00 02 02 45
- 15 01 00 00 00 00 02 05 40
- 15 01 00 00 00 00 02 06 19
- 15 01 00 00 00 00 02 07 1e
- 15 01 00 00 00 00 02 0b 73
- 15 01 00 00 00 00 02 0c 73
- 15 01 00 00 00 00 02 0e b0
- 15 01 00 00 00 00 02 0f ae
- 15 01 00 00 00 00 02 11 b8
- 15 01 00 00 00 00 02 13 00
- 15 01 00 00 00 00 02 58 80
- 15 01 00 00 00 00 02 59 01
- 15 01 00 00 00 00 02 5a 00
- 15 01 00 00 00 00 02 5b 01
- 15 01 00 00 00 00 02 5c 80
- 15 01 00 00 00 00 02 5d 81
- 15 01 00 00 00 00 02 5e 00
- 15 01 00 00 00 00 02 5f 01
- 15 01 00 00 00 00 02 72 31
- 15 01 00 00 00 00 02 68 03
- /* CMD2_P4 */
- 15 01 00 00 00 00 02 ff 24
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 1c
- 15 01 00 00 00 00 02 01 0b
- 15 01 00 00 00 00 02 02 0c
- 15 01 00 00 00 00 02 03 01
- 15 01 00 00 00 00 02 04 0f
- 15 01 00 00 00 00 02 05 10
- 15 01 00 00 00 00 02 06 10
- 15 01 00 00 00 00 02 07 10
- 15 01 00 00 00 00 02 08 89
- 15 01 00 00 00 00 02 09 8a
- 15 01 00 00 00 00 02 0a 13
- 15 01 00 00 00 00 02 0b 13
- 15 01 00 00 00 00 02 0c 15
- 15 01 00 00 00 00 02 0d 15
- 15 01 00 00 00 00 02 0e 17
- 15 01 00 00 00 00 02 0f 17
- 15 01 00 00 00 00 02 10 1c
- 15 01 00 00 00 00 02 11 0b
- 15 01 00 00 00 00 02 12 0c
- 15 01 00 00 00 00 02 13 01
- 15 01 00 00 00 00 02 14 0f
- 15 01 00 00 00 00 02 15 10
- 15 01 00 00 00 00 02 16 10
- 15 01 00 00 00 00 02 17 10
- 15 01 00 00 00 00 02 18 89
- 15 01 00 00 00 00 02 19 8a
- 15 01 00 00 00 00 02 1a 13
- 15 01 00 00 00 00 02 1b 13
- 15 01 00 00 00 00 02 1c 15
- 15 01 00 00 00 00 02 1d 15
- 15 01 00 00 00 00 02 1e 17
- 15 01 00 00 00 00 02 1f 17
- /* STV */
- 15 01 00 00 00 00 02 20 40
- 15 01 00 00 00 00 02 21 01
- 15 01 00 00 00 00 02 22 00
- 15 01 00 00 00 00 02 23 40
- 15 01 00 00 00 00 02 24 40
- 15 01 00 00 00 00 02 25 6d
- 15 01 00 00 00 00 02 26 40
- 15 01 00 00 00 00 02 27 40
- /* Vend */
- 15 01 00 00 00 00 02 e0 00
- 15 01 00 00 00 00 02 dc 21
- 15 01 00 00 00 00 02 dd 22
- 15 01 00 00 00 00 02 de 07
- 15 01 00 00 00 00 02 df 07
- 15 01 00 00 00 00 02 e3 6D
- 15 01 00 00 00 00 02 e1 07
- 15 01 00 00 00 00 02 e2 07
- /* UD */
- 15 01 00 00 00 00 02 29 d8
- 15 01 00 00 00 00 02 2a 2a
- /* CLK */
- 15 01 00 00 00 00 02 4b 03
- 15 01 00 00 00 00 02 4c 11
- 15 01 00 00 00 00 02 4d 10
- 15 01 00 00 00 00 02 4e 01
- 15 01 00 00 00 00 02 4f 01
- 15 01 00 00 00 00 02 50 10
- 15 01 00 00 00 00 02 51 00
- 15 01 00 00 00 00 02 52 80
- 15 01 00 00 00 00 02 53 00
- 15 01 00 00 00 00 02 56 00
- 15 01 00 00 00 00 02 54 07
- 15 01 00 00 00 00 02 58 07
- 15 01 00 00 00 00 02 55 25
- /* Reset XDONB */
- 15 01 00 00 00 00 02 5b 43
- 15 01 00 00 00 00 02 5c 00
- 15 01 00 00 00 00 02 5f 73
- 15 01 00 00 00 00 02 60 73
- 15 01 00 00 00 00 02 63 22
- 15 01 00 00 00 00 02 64 00
- 15 01 00 00 00 00 02 67 08
- 15 01 00 00 00 00 02 68 04
- /* Resolution:1440x2560*/
- 15 01 00 00 00 00 02 72 02
- /* mux */
- 15 01 00 00 00 00 02 7a 80
- 15 01 00 00 00 00 02 7b 91
- 15 01 00 00 00 00 02 7c D8
- 15 01 00 00 00 00 02 7d 60
- 15 01 00 00 00 00 02 7f 15
- 15 01 00 00 00 00 02 75 15
- /* ABOFF */
- 15 01 00 00 00 00 02 b3 C0
- 15 01 00 00 00 00 02 b4 00
- 15 01 00 00 00 00 02 b5 00
- /* Source EQ */
- 15 01 00 00 00 00 02 78 00
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 80 00
- 15 01 00 00 00 00 02 83 00
- /* FP BP */
- 15 01 00 00 00 00 02 93 0a
- 15 01 00 00 00 00 02 94 0a
- /* Inversion Type */
- 15 01 00 00 00 00 02 8a 00
- 15 01 00 00 00 00 02 9b ff
- /* IMGSWAP =1 @PortSwap=1 */
- 15 01 00 00 00 00 02 9d b0
- 15 01 00 00 00 00 02 9f 63
- 15 01 00 00 00 00 02 98 10
- /* FRM */
- 15 01 00 00 00 00 02 ec 00
- /* CMD1 */
- 15 01 00 00 00 00 02 ff 10
- /* VESA DSC PPS settings(1440x2560 slide 16H) */
- 39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68
- 01 bb 00 0a 06 67 04 c5
- 39 01 00 00 00 00 03 c2 10 f0
- /* C0h = 0x0(2 Port SDC)0x01(1 PortA FBC)
- * 0x02(MTK) 0x03(1 PortA VESA)
- */
- 15 01 00 00 00 00 02 c0 03
- /* VBP+VSA=,VFP = 10H */
- 15 01 00 00 00 00 04 3b 03 0a 0a
- /* FTE on */
- 15 01 00 00 00 00 02 35 00
- /* EN_BK =1(auto black) */
- 15 01 00 00 00 00 02 e5 01
- /* CMD mode(10) VDO mode(03) */
- 15 01 00 00 00 00 02 bb 10
- /* Non Reload MTP */
- 15 01 00 00 00 00 02 fb 01
- /* SlpOut + DispOn */
- 05 01 00 00 78 00 02 11 00
- 05 01 00 00 78 00 02 29 00
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
-
- qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -210,7 +38,6 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
-
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-te-pin-select = <1>;
@@ -220,13 +47,201 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
- qcom,compression-mode = "dsc";
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-width = <1440>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <32>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <8>;
+ qcom,mdss-dsi-v-front-porch = <10>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
+ qcom,mdss-dsi-on-command = [
+ /* CMD2_P0 */
+ 15 01 00 00 00 00 02 ff 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 05 40
+ 15 01 00 00 00 00 02 06 19
+ 15 01 00 00 00 00 02 07 1e
+ 15 01 00 00 00 00 02 0b 73
+ 15 01 00 00 00 00 02 0c 73
+ 15 01 00 00 00 00 02 0e b0
+ 15 01 00 00 00 00 02 0f ae
+ 15 01 00 00 00 00 02 11 b8
+ 15 01 00 00 00 00 02 13 00
+ 15 01 00 00 00 00 02 58 80
+ 15 01 00 00 00 00 02 59 01
+ 15 01 00 00 00 00 02 5a 00
+ 15 01 00 00 00 00 02 5b 01
+ 15 01 00 00 00 00 02 5c 80
+ 15 01 00 00 00 00 02 5d 81
+ 15 01 00 00 00 00 02 5e 00
+ 15 01 00 00 00 00 02 5f 01
+ 15 01 00 00 00 00 02 72 31
+ 15 01 00 00 00 00 02 68 03
+ /* CMD2_P4 */
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 1c
+ 15 01 00 00 00 00 02 01 0b
+ 15 01 00 00 00 00 02 02 0c
+ 15 01 00 00 00 00 02 03 01
+ 15 01 00 00 00 00 02 04 0f
+ 15 01 00 00 00 00 02 05 10
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 89
+ 15 01 00 00 00 00 02 09 8a
+ 15 01 00 00 00 00 02 0a 13
+ 15 01 00 00 00 00 02 0b 13
+ 15 01 00 00 00 00 02 0c 15
+ 15 01 00 00 00 00 02 0d 15
+ 15 01 00 00 00 00 02 0e 17
+ 15 01 00 00 00 00 02 0f 17
+ 15 01 00 00 00 00 02 10 1c
+ 15 01 00 00 00 00 02 11 0b
+ 15 01 00 00 00 00 02 12 0c
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 0f
+ 15 01 00 00 00 00 02 15 10
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 89
+ 15 01 00 00 00 00 02 19 8a
+ 15 01 00 00 00 00 02 1a 13
+ 15 01 00 00 00 00 02 1b 13
+ 15 01 00 00 00 00 02 1c 15
+ 15 01 00 00 00 00 02 1d 15
+ 15 01 00 00 00 00 02 1e 17
+ 15 01 00 00 00 00 02 1f 17
+ /* STV */
+ 15 01 00 00 00 00 02 20 40
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6d
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ /* Vend */
+ 15 01 00 00 00 00 02 e0 00
+ 15 01 00 00 00 00 02 dc 21
+ 15 01 00 00 00 00 02 dd 22
+ 15 01 00 00 00 00 02 de 07
+ 15 01 00 00 00 00 02 df 07
+ 15 01 00 00 00 00 02 e3 6D
+ 15 01 00 00 00 00 02 e1 07
+ 15 01 00 00 00 00 02 e2 07
+ /* UD */
+ 15 01 00 00 00 00 02 29 d8
+ 15 01 00 00 00 00 02 2a 2a
+ /* CLK */
+ 15 01 00 00 00 00 02 4b 03
+ 15 01 00 00 00 00 02 4c 11
+ 15 01 00 00 00 00 02 4d 10
+ 15 01 00 00 00 00 02 4e 01
+ 15 01 00 00 00 00 02 4f 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 55 25
+ /* Reset XDONB */
+ 15 01 00 00 00 00 02 5b 43
+ 15 01 00 00 00 00 02 5c 00
+ 15 01 00 00 00 00 02 5f 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ /* Resolution:1440x2560*/
+ 15 01 00 00 00 00 02 72 02
+ /* mux */
+ 15 01 00 00 00 00 02 7a 80
+ 15 01 00 00 00 00 02 7b 91
+ 15 01 00 00 00 00 02 7c D8
+ 15 01 00 00 00 00 02 7d 60
+ 15 01 00 00 00 00 02 7f 15
+ 15 01 00 00 00 00 02 75 15
+ /* ABOFF */
+ 15 01 00 00 00 00 02 b3 C0
+ 15 01 00 00 00 00 02 b4 00
+ 15 01 00 00 00 00 02 b5 00
+ /* Source EQ */
+ 15 01 00 00 00 00 02 78 00
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 80 00
+ 15 01 00 00 00 00 02 83 00
+ /* FP BP */
+ 15 01 00 00 00 00 02 93 0a
+ 15 01 00 00 00 00 02 94 0a
+ /* Inversion Type */
+ 15 01 00 00 00 00 02 8a 00
+ 15 01 00 00 00 00 02 9b ff
+ /* IMGSWAP =1 @PortSwap=1 */
+ 15 01 00 00 00 00 02 9d b0
+ 15 01 00 00 00 00 02 9f 63
+ 15 01 00 00 00 00 02 98 10
+ /* FRM */
+ 15 01 00 00 00 00 02 ec 00
+ /* CMD1 */
+ 15 01 00 00 00 00 02 ff 10
+ /* VESA DSC PPS settings
+ * (1440x2560 slide 16H)
+ */
+ 39 01 00 00 00 00 11 c1 09
+ 20 00 10 02 00 02 68 01 bb
+ 00 0a 06 67 04 c5
+ 39 01 00 00 00 00 03 c2 10 f0
+ /* C0h = 0x0(2 Port SDC)
+ * 0x01(1 PortA FBC)
+ * 0x02(MTK) 0x03(1 PortA VESA)
+ */
+ 15 01 00 00 00 00 02 c0 03
+ /* VBP+VSA=,VFP = 10H */
+ 15 01 00 00 00 00 04 3b 03 0a 0a
+ /* FTE on */
+ 15 01 00 00 00 00 02 35 00
+ /* EN_BK =1(auto black) */
+ 15 01 00 00 00 00 02 e5 01
+ /* CMD mode(10) VDO mode(03) */
+ 15 01 00 00 00 00 02 bb 10
+ /* Non Reload MTP */
+ 15 01 00 00 00 00 02 fb 01
+ /* SlpOut + DispOn */
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+ 02 28 00 05 01 00 00 78 00 02 10 00];
+
+ qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <720>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
index fe9129c..ac8a956 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -15,185 +15,16 @@
qcom,mdss-dsi-panel-name =
"nt35597 video mode dsi truly panel with DSC";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1440>;
- qcom,mdss-dsi-panel-height = <2560>;
- qcom,mdss-dsi-h-front-porch = <100>;
- qcom,mdss-dsi-h-back-porch = <32>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <8>;
- qcom,mdss-dsi-v-front-porch = <10>;
- qcom,mdss-dsi-v-pulse-width = <2>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [
- /* CMD2_P0 */
- 15 01 00 00 00 00 02 ff 20
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 01
- 15 01 00 00 00 00 02 01 55
- 15 01 00 00 00 00 02 02 45
- 15 01 00 00 00 00 02 05 40
- 15 01 00 00 00 00 02 06 19
- 15 01 00 00 00 00 02 07 1e
- 15 01 00 00 00 00 02 0b 73
- 15 01 00 00 00 00 02 0c 73
- 15 01 00 00 00 00 02 0e b0
- 15 01 00 00 00 00 02 0f aE
- 15 01 00 00 00 00 02 11 b8
- 15 01 00 00 00 00 02 13 00
- 15 01 00 00 00 00 02 58 80
- 15 01 00 00 00 00 02 59 01
- 15 01 00 00 00 00 02 5a 00
- 15 01 00 00 00 00 02 5b 01
- 15 01 00 00 00 00 02 5c 80
- 15 01 00 00 00 00 02 5d 81
- 15 01 00 00 00 00 02 5e 00
- 15 01 00 00 00 00 02 5f 01
- 15 01 00 00 00 00 02 72 31
- 15 01 00 00 00 00 02 68 03
- /* CMD2_P4 */
- 15 01 00 00 00 00 02 ff 24
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 1c
- 15 01 00 00 00 00 02 01 0b
- 15 01 00 00 00 00 02 02 0c
- 15 01 00 00 00 00 02 03 01
- 15 01 00 00 00 00 02 04 0f
- 15 01 00 00 00 00 02 05 10
- 15 01 00 00 00 00 02 06 10
- 15 01 00 00 00 00 02 07 10
- 15 01 00 00 00 00 02 08 89
- 15 01 00 00 00 00 02 09 8a
- 15 01 00 00 00 00 02 0a 13
- 15 01 00 00 00 00 02 0b 13
- 15 01 00 00 00 00 02 0c 15
- 15 01 00 00 00 00 02 0d 15
- 15 01 00 00 00 00 02 0e 17
- 15 01 00 00 00 00 02 0f 17
- 15 01 00 00 00 00 02 10 1c
- 15 01 00 00 00 00 02 11 0b
- 15 01 00 00 00 00 02 12 0c
- 15 01 00 00 00 00 02 13 01
- 15 01 00 00 00 00 02 14 0f
- 15 01 00 00 00 00 02 15 10
- 15 01 00 00 00 00 02 16 10
- 15 01 00 00 00 00 02 17 10
- 15 01 00 00 00 00 02 18 89
- 15 01 00 00 00 00 02 19 8a
- 15 01 00 00 00 00 02 1a 13
- 15 01 00 00 00 00 02 1b 13
- 15 01 00 00 00 00 02 1c 15
- 15 01 00 00 00 00 02 1d 15
- 15 01 00 00 00 00 02 1e 17
- 15 01 00 00 00 00 02 1f 17
- /* STV */
- 15 01 00 00 00 00 02 20 40
- 15 01 00 00 00 00 02 21 01
- 15 01 00 00 00 00 02 22 00
- 15 01 00 00 00 00 02 23 40
- 15 01 00 00 00 00 02 24 40
- 15 01 00 00 00 00 02 25 6d
- 15 01 00 00 00 00 02 26 40
- 15 01 00 00 00 00 02 27 40
- /* Vend */
- 15 01 00 00 00 00 02 e0 00
- 15 01 00 00 00 00 02 dc 21
- 15 01 00 00 00 00 02 dd 22
- 15 01 00 00 00 00 02 de 07
- 15 01 00 00 00 00 02 df 07
- 15 01 00 00 00 00 02 e3 6d
- 15 01 00 00 00 00 02 e1 07
- 15 01 00 00 00 00 02 e2 07
- /* UD */
- 15 01 00 00 00 00 02 29 d8
- 15 01 00 00 00 00 02 2a 2a
- /* CLK */
- 15 01 00 00 00 00 02 4b 03
- 15 01 00 00 00 00 02 4c 11
- 15 01 00 00 00 00 02 4d 10
- 15 01 00 00 00 00 02 4e 01
- 15 01 00 00 00 00 02 4f 01
- 15 01 00 00 00 00 02 50 10
- 15 01 00 00 00 00 02 51 00
- 15 01 00 00 00 00 02 52 80
- 15 01 00 00 00 00 02 53 00
- 15 01 00 00 00 00 02 56 00
- 15 01 00 00 00 00 02 54 07
- 15 01 00 00 00 00 02 58 07
- 15 01 00 00 00 00 02 55 25
- /* Reset XDONB */
- 15 01 00 00 00 00 02 5b 43
- 15 01 00 00 00 00 02 5c 00
- 15 01 00 00 00 00 02 5f 73
- 15 01 00 00 00 00 02 60 73
- 15 01 00 00 00 00 02 63 22
- 15 01 00 00 00 00 02 64 00
- 15 01 00 00 00 00 02 67 08
- 15 01 00 00 00 00 02 68 04
- /* Resolution:1440x2560*/
- 15 01 00 00 00 00 02 72 02
- /* mux */
- 15 01 00 00 00 00 02 7a 80
- 15 01 00 00 00 00 02 7b 91
- 15 01 00 00 00 00 02 7c d8
- 15 01 00 00 00 00 02 7d 60
- 15 01 00 00 00 00 02 7f 15
- 15 01 00 00 00 00 02 75 15
- /* ABOFF */
- 15 01 00 00 00 00 02 b3 c0
- 15 01 00 00 00 00 02 b4 00
- 15 01 00 00 00 00 02 b5 00
- /* Source EQ */
- 15 01 00 00 00 00 02 78 00
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 80 00
- 15 01 00 00 00 00 02 83 00
- /* FP BP */
- 15 01 00 00 00 00 02 93 0a
- 15 01 00 00 00 00 02 94 0a
- /* Inversion Type */
- 15 01 00 00 00 00 02 8a 00
- 15 01 00 00 00 00 02 9b ff
- /* IMGSWAP =1 @PortSwap=1 */
- 15 01 00 00 00 00 02 9d b0
- 15 01 00 00 00 00 02 9f 63
- 15 01 00 00 00 00 02 98 10
- /* FRM */
- 15 01 00 00 00 00 02 ec 00
- /* CMD1 */
- 15 01 00 00 00 00 02 ff 10
- /* VESA DSC PPS settings(1440x2560 slide 16H) */
- 39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68 01
- bb 00 0a 06 67 04 c5
- 39 01 00 00 00 00 03 c2 10 f0
- /* C0h = 0x00(2 Port SDC); 0x01(1 PortA FBC);
- * 0x02(MTK); 0x03(1 PortA VESA)
- */
- 15 01 00 00 00 00 02 c0 03
- /* VBP+VSA=,VFP = 10H */
- 39 01 00 00 00 00 04 3b 03 0a 0a
- /* FTE on */
- 15 01 00 00 00 00 02 35 00
- /* EN_BK =1(auto black) */
- 15 01 00 00 00 00 02 e5 01
- /* CMD mode(10) VDO mode(03) */
- 15 01 00 00 00 00 02 bb 03
- /* Non Reload MTP */
- 15 01 00 00 00 00 02 fb 01
- /* SlpOut + DispOn */
- 05 01 00 00 78 00 02 11 00
- 05 01 00 00 78 00 02 29 00
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -207,12 +38,195 @@
qcom,mdss-pan-physical-width-dimension = <74>;
qcom,mdss-pan-physical-height-dimension = <131>;
- qcom,compression-mode = "dsc";
- qcom,mdss-dsc-slice-height = <16>;
- qcom,mdss-dsc-slice-width = <720>;
- qcom,mdss-dsc-slice-per-pkt = <2>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1440>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <32>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <8>;
+ qcom,mdss-dsi-v-front-porch = <10>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command = [
+ /* CMD2_P0 */
+ 15 01 00 00 00 00 02 ff 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 05 40
+ 15 01 00 00 00 00 02 06 19
+ 15 01 00 00 00 00 02 07 1e
+ 15 01 00 00 00 00 02 0b 73
+ 15 01 00 00 00 00 02 0c 73
+ 15 01 00 00 00 00 02 0e b0
+ 15 01 00 00 00 00 02 0f aE
+ 15 01 00 00 00 00 02 11 b8
+ 15 01 00 00 00 00 02 13 00
+ 15 01 00 00 00 00 02 58 80
+ 15 01 00 00 00 00 02 59 01
+ 15 01 00 00 00 00 02 5a 00
+ 15 01 00 00 00 00 02 5b 01
+ 15 01 00 00 00 00 02 5c 80
+ 15 01 00 00 00 00 02 5d 81
+ 15 01 00 00 00 00 02 5e 00
+ 15 01 00 00 00 00 02 5f 01
+ 15 01 00 00 00 00 02 72 31
+ 15 01 00 00 00 00 02 68 03
+ /* CMD2_P4 */
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 1c
+ 15 01 00 00 00 00 02 01 0b
+ 15 01 00 00 00 00 02 02 0c
+ 15 01 00 00 00 00 02 03 01
+ 15 01 00 00 00 00 02 04 0f
+ 15 01 00 00 00 00 02 05 10
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 89
+ 15 01 00 00 00 00 02 09 8a
+ 15 01 00 00 00 00 02 0a 13
+ 15 01 00 00 00 00 02 0b 13
+ 15 01 00 00 00 00 02 0c 15
+ 15 01 00 00 00 00 02 0d 15
+ 15 01 00 00 00 00 02 0e 17
+ 15 01 00 00 00 00 02 0f 17
+ 15 01 00 00 00 00 02 10 1c
+ 15 01 00 00 00 00 02 11 0b
+ 15 01 00 00 00 00 02 12 0c
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 0f
+ 15 01 00 00 00 00 02 15 10
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 89
+ 15 01 00 00 00 00 02 19 8a
+ 15 01 00 00 00 00 02 1a 13
+ 15 01 00 00 00 00 02 1b 13
+ 15 01 00 00 00 00 02 1c 15
+ 15 01 00 00 00 00 02 1d 15
+ 15 01 00 00 00 00 02 1e 17
+ 15 01 00 00 00 00 02 1f 17
+ /* STV */
+ 15 01 00 00 00 00 02 20 40
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6d
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ /* Vend */
+ 15 01 00 00 00 00 02 e0 00
+ 15 01 00 00 00 00 02 dc 21
+ 15 01 00 00 00 00 02 dd 22
+ 15 01 00 00 00 00 02 de 07
+ 15 01 00 00 00 00 02 df 07
+ 15 01 00 00 00 00 02 e3 6d
+ 15 01 00 00 00 00 02 e1 07
+ 15 01 00 00 00 00 02 e2 07
+ /* UD */
+ 15 01 00 00 00 00 02 29 d8
+ 15 01 00 00 00 00 02 2a 2a
+ /* CLK */
+ 15 01 00 00 00 00 02 4b 03
+ 15 01 00 00 00 00 02 4c 11
+ 15 01 00 00 00 00 02 4d 10
+ 15 01 00 00 00 00 02 4e 01
+ 15 01 00 00 00 00 02 4f 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 55 25
+ /* Reset XDONB */
+ 15 01 00 00 00 00 02 5b 43
+ 15 01 00 00 00 00 02 5c 00
+ 15 01 00 00 00 00 02 5f 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ /* Resolution:1440x2560*/
+ 15 01 00 00 00 00 02 72 02
+ /* mux */
+ 15 01 00 00 00 00 02 7a 80
+ 15 01 00 00 00 00 02 7b 91
+ 15 01 00 00 00 00 02 7c d8
+ 15 01 00 00 00 00 02 7d 60
+ 15 01 00 00 00 00 02 7f 15
+ 15 01 00 00 00 00 02 75 15
+ /* ABOFF */
+ 15 01 00 00 00 00 02 b3 c0
+ 15 01 00 00 00 00 02 b4 00
+ 15 01 00 00 00 00 02 b5 00
+ /* Source EQ */
+ 15 01 00 00 00 00 02 78 00
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 80 00
+ 15 01 00 00 00 00 02 83 00
+ /* FP BP */
+ 15 01 00 00 00 00 02 93 0a
+ 15 01 00 00 00 00 02 94 0a
+ /* Inversion Type */
+ 15 01 00 00 00 00 02 8a 00
+ 15 01 00 00 00 00 02 9b ff
+ /* IMGSWAP =1 @PortSwap=1 */
+ 15 01 00 00 00 00 02 9d b0
+ 15 01 00 00 00 00 02 9f 63
+ 15 01 00 00 00 00 02 98 10
+ /* FRM */
+ 15 01 00 00 00 00 02 ec 00
+ /* CMD1 */
+ 15 01 00 00 00 00 02 ff 10
+ /* VESA DSC PPS settings
+ * (1440x2560 slide 16H)
+ */
+ 39 01 00 00 00 00 11 c1 09
+ 20 00 10 02 00 02 68 01 bb
+ 00 0a 06 67 04 c5
+
+ 39 01 00 00 00 00 03 c2 10 f0
+ /* C0h = 0x00(2 Port SDC);
+ * 0x01(1 PortA FBC);
+ * 0x02(MTK); 0x03(1 PortA VESA)
+ */
+ 15 01 00 00 00 00 02 c0 03
+ /* VBP+VSA=,VFP = 10H */
+ 39 01 00 00 00 00 04 3b 03 0a 0a
+ /* FTE on */
+ 15 01 00 00 00 00 02 35 00
+ /* EN_BK =1(auto black) */
+ 15 01 00 00 00 00 02 e5 01
+ /* CMD mode(10) VDO mode(03) */
+ 15 01 00 00 00 00 02 bb 03
+ /* Non Reload MTP */
+ 15 01 00 00 00 00 02 fb 01
+ /* SlpOut + DispOn */
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+ 02 28 00 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <16>;
+ qcom,mdss-dsc-slice-width = <720>;
+ qcom,mdss-dsc-slice-per-pkt = <2>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index e4a0370..87cabae 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -15,27 +15,12 @@
qcom,mdss-dsi-panel-name =
"Dual nt35597 cmd mode dsi truly panel without DSC";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <720>;
- qcom,mdss-dsi-panel-height = <2560>;
- qcom,mdss-dsi-h-front-porch = <100>;
- qcom,mdss-dsi-h-back-porch = <32>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <7>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <1>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -45,6 +30,8 @@
qcom,mdss-dsi-lane-3-state;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-pan-physical-width-dimension = <74>;
+ qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
@@ -54,167 +41,186 @@
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
- qcom,mdss-dsi-on-command = [
- /* CMD2_P0 */
- 15 01 00 00 00 00 02 FF 20
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 01
- 15 01 00 00 00 00 02 01 55
- 15 01 00 00 00 00 02 02 45
- 15 01 00 00 00 00 02 05 40
- 15 01 00 00 00 00 02 06 19
- 15 01 00 00 00 00 02 07 1E
- 15 01 00 00 00 00 02 0B 73
- 15 01 00 00 00 00 02 0C 73
- 15 01 00 00 00 00 02 0E B0
- 15 01 00 00 00 00 02 0F AE
- 15 01 00 00 00 00 02 11 B8
- 15 01 00 00 00 00 02 13 00
- 15 01 00 00 00 00 02 58 80
- 15 01 00 00 00 00 02 59 01
- 15 01 00 00 00 00 02 5A 00
- 15 01 00 00 00 00 02 5B 01
- 15 01 00 00 00 00 02 5C 80
- 15 01 00 00 00 00 02 5D 81
- 15 01 00 00 00 00 02 5E 00
- 15 01 00 00 00 00 02 5F 01
- 15 01 00 00 00 00 02 72 31
- 15 01 00 00 00 00 02 68 03
- /* CMD2_P4 */
- 15 01 00 00 00 00 02 ff 24
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 1C
- 15 01 00 00 00 00 02 01 0B
- 15 01 00 00 00 00 02 02 0C
- 15 01 00 00 00 00 02 03 01
- 15 01 00 00 00 00 02 04 0F
- 15 01 00 00 00 00 02 05 10
- 15 01 00 00 00 00 02 06 10
- 15 01 00 00 00 00 02 07 10
- 15 01 00 00 00 00 02 08 89
- 15 01 00 00 00 00 02 09 8A
- 15 01 00 00 00 00 02 0A 13
- 15 01 00 00 00 00 02 0B 13
- 15 01 00 00 00 00 02 0C 15
- 15 01 00 00 00 00 02 0D 15
- 15 01 00 00 00 00 02 0E 17
- 15 01 00 00 00 00 02 0F 17
- 15 01 00 00 00 00 02 10 1C
- 15 01 00 00 00 00 02 11 0B
- 15 01 00 00 00 00 02 12 0C
- 15 01 00 00 00 00 02 13 01
- 15 01 00 00 00 00 02 14 0F
- 15 01 00 00 00 00 02 15 10
- 15 01 00 00 00 00 02 16 10
- 15 01 00 00 00 00 02 17 10
- 15 01 00 00 00 00 02 18 89
- 15 01 00 00 00 00 02 19 8A
- 15 01 00 00 00 00 02 1A 13
- 15 01 00 00 00 00 02 1B 13
- 15 01 00 00 00 00 02 1C 15
- 15 01 00 00 00 00 02 1D 15
- 15 01 00 00 00 00 02 1E 17
- 15 01 00 00 00 00 02 1F 17
- /* STV */
- 15 01 00 00 00 00 02 20 40
- 15 01 00 00 00 00 02 21 01
- 15 01 00 00 00 00 02 22 00
- 15 01 00 00 00 00 02 23 40
- 15 01 00 00 00 00 02 24 40
- 15 01 00 00 00 00 02 25 6D
- 15 01 00 00 00 00 02 26 40
- 15 01 00 00 00 00 02 27 40
- /* Vend */
- 15 01 00 00 00 00 02 E0 00
- 15 01 00 00 00 00 02 DC 21
- 15 01 00 00 00 00 02 DD 22
- 15 01 00 00 00 00 02 DE 07
- 15 01 00 00 00 00 02 DF 07
- 15 01 00 00 00 00 02 E3 6D
- 15 01 00 00 00 00 02 E1 07
- 15 01 00 00 00 00 02 E2 07
- /* UD */
- 15 01 00 00 00 00 02 29 D8
- 15 01 00 00 00 00 02 2A 2A
- /* CLK */
- 15 01 00 00 00 00 02 4B 03
- 15 01 00 00 00 00 02 4C 11
- 15 01 00 00 00 00 02 4D 10
- 15 01 00 00 00 00 02 4E 01
- 15 01 00 00 00 00 02 4F 01
- 15 01 00 00 00 00 02 50 10
- 15 01 00 00 00 00 02 51 00
- 15 01 00 00 00 00 02 52 80
- 15 01 00 00 00 00 02 53 00
- 15 01 00 00 00 00 02 56 00
- 15 01 00 00 00 00 02 54 07
- 15 01 00 00 00 00 02 58 07
- 15 01 00 00 00 00 02 55 25
- /* Reset XDONB */
- 15 01 00 00 00 00 02 5B 43
- 15 01 00 00 00 00 02 5C 00
- 15 01 00 00 00 00 02 5F 73
- 15 01 00 00 00 00 02 60 73
- 15 01 00 00 00 00 02 63 22
- 15 01 00 00 00 00 02 64 00
- 15 01 00 00 00 00 02 67 08
- 15 01 00 00 00 00 02 68 04
- /* Resolution:1440x2560*/
- 15 01 00 00 00 00 02 72 02
- /* mux */
- 15 01 00 00 00 00 02 7A 80
- 15 01 00 00 00 00 02 7B 91
- 15 01 00 00 00 00 02 7C D8
- 15 01 00 00 00 00 02 7D 60
- 15 01 00 00 00 00 02 7F 15
- 15 01 00 00 00 00 02 75 15
- /* ABOFF */
- 15 01 00 00 00 00 02 B3 C0
- 15 01 00 00 00 00 02 B4 00
- 15 01 00 00 00 00 02 B5 00
- /* Source EQ */
- 15 01 00 00 00 00 02 78 00
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 80 00
- 15 01 00 00 00 00 02 83 00
- /* FP BP */
- 15 01 00 00 00 00 02 93 0A
- 15 01 00 00 00 00 02 94 0A
- /* Inversion Type */
- 15 01 00 00 00 00 02 8A 00
- 15 01 00 00 00 00 02 9B FF
- /* IMGSWAP =1 @PortSwap=1 */
- 15 01 00 00 00 00 02 9D B0
- 15 01 00 00 00 00 02 9F 63
- 15 01 00 00 00 00 02 98 10
- /* FRM */
- 15 01 00 00 00 00 02 EC 00
- /* CMD1 */
- 15 01 00 00 00 00 02 ff 10
- /* VBP+VSA=,VFP = 10H */
- 15 01 00 00 00 00 04 3B 03 0A 0A
- /* FTE on */
- 15 01 00 00 00 00 02 35 00
- /* EN_BK =1(auto black) */
- 15 01 00 00 00 00 02 E5 01
- /* CMD mode(10) VDO mode(03) */
- 15 01 00 00 00 00 02 BB 10
- /* Non Reload MTP */
- 15 01 00 00 00 00 02 FB 01
- /* SlpOut + DispOn */
- 05 01 00 00 78 00 02 11 00
- 05 01 00 00 78 00 02 29 00
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
-
- qcom,config-select = <&dsi_dual_nt35597_truly_cmd_config0>;
-
- dsi_dual_nt35597_truly_cmd_config0: config0 {
- qcom,split-mode = "dualctl-split";
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <32>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <7>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
+ qcom,mdss-dsi-on-command = [
+ /* CMD2_P0 */
+ 15 01 00 00 00 00 02 FF 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 05 40
+ 15 01 00 00 00 00 02 06 19
+ 15 01 00 00 00 00 02 07 1E
+ 15 01 00 00 00 00 02 0B 73
+ 15 01 00 00 00 00 02 0C 73
+ 15 01 00 00 00 00 02 0E B0
+ 15 01 00 00 00 00 02 0F AE
+ 15 01 00 00 00 00 02 11 B8
+ 15 01 00 00 00 00 02 13 00
+ 15 01 00 00 00 00 02 58 80
+ 15 01 00 00 00 00 02 59 01
+ 15 01 00 00 00 00 02 5A 00
+ 15 01 00 00 00 00 02 5B 01
+ 15 01 00 00 00 00 02 5C 80
+ 15 01 00 00 00 00 02 5D 81
+ 15 01 00 00 00 00 02 5E 00
+ 15 01 00 00 00 00 02 5F 01
+ 15 01 00 00 00 00 02 72 31
+ 15 01 00 00 00 00 02 68 03
+ /* CMD2_P4 */
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 1C
+ 15 01 00 00 00 00 02 01 0B
+ 15 01 00 00 00 00 02 02 0C
+ 15 01 00 00 00 00 02 03 01
+ 15 01 00 00 00 00 02 04 0F
+ 15 01 00 00 00 00 02 05 10
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 89
+ 15 01 00 00 00 00 02 09 8A
+ 15 01 00 00 00 00 02 0A 13
+ 15 01 00 00 00 00 02 0B 13
+ 15 01 00 00 00 00 02 0C 15
+ 15 01 00 00 00 00 02 0D 15
+ 15 01 00 00 00 00 02 0E 17
+ 15 01 00 00 00 00 02 0F 17
+ 15 01 00 00 00 00 02 10 1C
+ 15 01 00 00 00 00 02 11 0B
+ 15 01 00 00 00 00 02 12 0C
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 0F
+ 15 01 00 00 00 00 02 15 10
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 89
+ 15 01 00 00 00 00 02 19 8A
+ 15 01 00 00 00 00 02 1A 13
+ 15 01 00 00 00 00 02 1B 13
+ 15 01 00 00 00 00 02 1C 15
+ 15 01 00 00 00 00 02 1D 15
+ 15 01 00 00 00 00 02 1E 17
+ 15 01 00 00 00 00 02 1F 17
+ /* STV */
+ 15 01 00 00 00 00 02 20 40
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6D
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ /* Vend */
+ 15 01 00 00 00 00 02 E0 00
+ 15 01 00 00 00 00 02 DC 21
+ 15 01 00 00 00 00 02 DD 22
+ 15 01 00 00 00 00 02 DE 07
+ 15 01 00 00 00 00 02 DF 07
+ 15 01 00 00 00 00 02 E3 6D
+ 15 01 00 00 00 00 02 E1 07
+ 15 01 00 00 00 00 02 E2 07
+ /* UD */
+ 15 01 00 00 00 00 02 29 D8
+ 15 01 00 00 00 00 02 2A 2A
+ /* CLK */
+ 15 01 00 00 00 00 02 4B 03
+ 15 01 00 00 00 00 02 4C 11
+ 15 01 00 00 00 00 02 4D 10
+ 15 01 00 00 00 00 02 4E 01
+ 15 01 00 00 00 00 02 4F 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 55 25
+ /* Reset XDONB */
+ 15 01 00 00 00 00 02 5B 43
+ 15 01 00 00 00 00 02 5C 00
+ 15 01 00 00 00 00 02 5F 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ /* Resolution:1440x2560*/
+ 15 01 00 00 00 00 02 72 02
+ /* mux */
+ 15 01 00 00 00 00 02 7A 80
+ 15 01 00 00 00 00 02 7B 91
+ 15 01 00 00 00 00 02 7C D8
+ 15 01 00 00 00 00 02 7D 60
+ 15 01 00 00 00 00 02 7F 15
+ 15 01 00 00 00 00 02 75 15
+ /* ABOFF */
+ 15 01 00 00 00 00 02 B3 C0
+ 15 01 00 00 00 00 02 B4 00
+ 15 01 00 00 00 00 02 B5 00
+ /* Source EQ */
+ 15 01 00 00 00 00 02 78 00
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 80 00
+ 15 01 00 00 00 00 02 83 00
+ /* FP BP */
+ 15 01 00 00 00 00 02 93 0A
+ 15 01 00 00 00 00 02 94 0A
+ /* Inversion Type */
+ 15 01 00 00 00 00 02 8A 00
+ 15 01 00 00 00 00 02 9B FF
+ /* IMGSWAP =1 @PortSwap=1 */
+ 15 01 00 00 00 00 02 9D B0
+ 15 01 00 00 00 00 02 9F 63
+ 15 01 00 00 00 00 02 98 10
+ /* FRM */
+ 15 01 00 00 00 00 02 EC 00
+ /* CMD1 */
+ 15 01 00 00 00 00 02 ff 10
+ /* VBP+VSA=,VFP = 10H */
+ 15 01 00 00 00 00 04 3B 03 0A 0A
+ /* FTE on */
+ 15 01 00 00 00 00 02 35 00
+ /* EN_BK =1(auto black) */
+ 15 01 00 00 00 00 02 E5 01
+ /* CMD mode(10) VDO mode(03) */
+ 15 01 00 00 00 00 02 BB 10
+ /* Non Reload MTP */
+ 15 01 00 00 00 00 02 FB 01
+ /* SlpOut + DispOn */
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+ 02 28 00 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index d6ef3d8..0d0e7f7 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -15,177 +15,13 @@
qcom,mdss-dsi-panel-name =
"Dual nt35597 video mode dsi truly panel without DSC";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <720>;
- qcom,mdss-dsi-panel-height = <2560>;
- qcom,mdss-dsi-h-front-porch = <100>;
- qcom,mdss-dsi-h-back-porch = <32>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <7>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <1>;
- qcom,mdss-dsi-bpp = <24>;
- qcom,mdss-dsi-underflow-color = <0x3ff>;
- qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [
- /* CMD2_P0 */
- 15 01 00 00 00 00 02 FF 20
- 15 01 00 00 00 00 02 FB 01
- 15 01 00 00 00 00 02 00 01
- 15 01 00 00 00 00 02 01 55
- 15 01 00 00 00 00 02 02 45
- 15 01 00 00 00 00 02 05 40
- 15 01 00 00 00 00 02 06 19
- 15 01 00 00 00 00 02 07 1E
- 15 01 00 00 00 00 02 0B 73
- 15 01 00 00 00 00 02 0C 73
- 15 01 00 00 00 00 02 0E B0
- 15 01 00 00 00 00 02 0F AE
- 15 01 00 00 00 00 02 11 B8
- 15 01 00 00 00 00 02 13 00
- 15 01 00 00 00 00 02 58 80
- 15 01 00 00 00 00 02 59 01
- 15 01 00 00 00 00 02 5A 00
- 15 01 00 00 00 00 02 5B 01
- 15 01 00 00 00 00 02 5C 80
- 15 01 00 00 00 00 02 5D 81
- 15 01 00 00 00 00 02 5E 00
- 15 01 00 00 00 00 02 5F 01
- 15 01 00 00 00 00 02 72 31
- 15 01 00 00 00 00 02 68 03
- /* CMD2_P4 */
- 15 01 00 00 00 00 02 FF 24
- 15 01 00 00 00 00 02 FB 01
- 15 01 00 00 00 00 02 00 1C
- 15 01 00 00 00 00 02 01 0B
- 15 01 00 00 00 00 02 02 0C
- 15 01 00 00 00 00 02 03 01
- 15 01 00 00 00 00 02 04 0F
- 15 01 00 00 00 00 02 05 10
- 15 01 00 00 00 00 02 06 10
- 15 01 00 00 00 00 02 07 10
- 15 01 00 00 00 00 02 08 89
- 15 01 00 00 00 00 02 09 8A
- 15 01 00 00 00 00 02 0A 13
- 15 01 00 00 00 00 02 0B 13
- 15 01 00 00 00 00 02 0C 15
- 15 01 00 00 00 00 02 0D 15
- 15 01 00 00 00 00 02 0E 17
- 15 01 00 00 00 00 02 0F 17
- 15 01 00 00 00 00 02 10 1C
- 15 01 00 00 00 00 02 11 0B
- 15 01 00 00 00 00 02 12 0C
- 15 01 00 00 00 00 02 13 01
- 15 01 00 00 00 00 02 14 0F
- 15 01 00 00 00 00 02 15 10
- 15 01 00 00 00 00 02 16 10
- 15 01 00 00 00 00 02 17 10
- 15 01 00 00 00 00 02 18 89
- 15 01 00 00 00 00 02 19 8A
- 15 01 00 00 00 00 02 1A 13
- 15 01 00 00 00 00 02 1B 13
- 15 01 00 00 00 00 02 1C 15
- 15 01 00 00 00 00 02 1D 15
- 15 01 00 00 00 00 02 1E 17
- 15 01 00 00 00 00 02 1F 17
- /* STV */
- 15 01 00 00 00 00 02 20 40
- 15 01 00 00 00 00 02 21 01
- 15 01 00 00 00 00 02 22 00
- 15 01 00 00 00 00 02 23 40
- 15 01 00 00 00 00 02 24 40
- 15 01 00 00 00 00 02 25 6D
- 15 01 00 00 00 00 02 26 40
- 15 01 00 00 00 00 02 27 40
- /* Vend */
- 15 01 00 00 00 00 02 E0 00
- 15 01 00 00 00 00 02 DC 21
- 15 01 00 00 00 00 02 DD 22
- 15 01 00 00 00 00 02 DE 07
- 15 01 00 00 00 00 02 DF 07
- 15 01 00 00 00 00 02 E3 6D
- 15 01 00 00 00 00 02 E1 07
- 15 01 00 00 00 00 02 E2 07
- /* UD */
- 15 01 00 00 00 00 02 29 D8
- 15 01 00 00 00 00 02 2A 2A
- /* CLK */
- 15 01 00 00 00 00 02 4B 03
- 15 01 00 00 00 00 02 4C 11
- 15 01 00 00 00 00 02 4D 10
- 15 01 00 00 00 00 02 4E 01
- 15 01 00 00 00 00 02 4F 01
- 15 01 00 00 00 00 02 50 10
- 15 01 00 00 00 00 02 51 00
- 15 01 00 00 00 00 02 52 80
- 15 01 00 00 00 00 02 53 00
- 15 01 00 00 00 00 02 56 00
- 15 01 00 00 00 00 02 54 07
- 15 01 00 00 00 00 02 58 07
- 15 01 00 00 00 00 02 55 25
- /* Reset XDONB */
- 15 01 00 00 00 00 02 5B 43
- 15 01 00 00 00 00 02 5C 00
- 15 01 00 00 00 00 02 5F 73
- 15 01 00 00 00 00 02 60 73
- 15 01 00 00 00 00 02 63 22
- 15 01 00 00 00 00 02 64 00
- 15 01 00 00 00 00 02 67 08
- 15 01 00 00 00 00 02 68 04
- /* Resolution:1440x2560*/
- 15 01 00 00 00 00 02 72 02
- /* mux */
- 15 01 00 00 00 00 02 7A 80
- 15 01 00 00 00 00 02 7B 91
- 15 01 00 00 00 00 02 7C D8
- 15 01 00 00 00 00 02 7D 60
- 15 01 00 00 00 00 02 7F 15
- 15 01 00 00 00 00 02 75 15
- /* ABOFF */
- 15 01 00 00 00 00 02 B3 C0
- 15 01 00 00 00 00 02 B4 00
- 15 01 00 00 00 00 02 B5 00
- /* Source EQ */
- 15 01 00 00 00 00 02 78 00
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 80 00
- 15 01 00 00 00 00 02 83 00
- /* FP BP */
- 15 01 00 00 00 00 02 93 0A
- 15 01 00 00 00 00 02 94 0A
- /* Inversion Type */
- 15 01 00 00 00 00 02 8A 00
- 15 01 00 00 00 00 02 9B FF
- /* IMGSWAP =1 @PortSwap=1 */
- 15 01 00 00 00 00 02 9D B0
- 15 01 00 00 00 00 02 9F 63
- 15 01 00 00 00 00 02 98 10
- /* FRM */
- 15 01 00 00 00 00 02 EC 00
- /* CMD1 */
- 15 01 00 00 00 00 02 FF 10
- /* VBP+VSA=,VFP = 10H */
- 15 01 00 00 00 00 04 3B 03 0A 0A
- /* FTE on */
- 15 01 00 00 00 00 02 35 00
- /* EN_BK =1(auto black) */
- 15 01 00 00 00 00 02 E5 01
- /* CMD mode(10) VDO mode(03) */
- 15 01 00 00 00 00 02 BB 03
- /* Non Reload MTP */
- 15 01 00 00 00 00 02 FB 01
- /* SlpOut + DispOn */
- 05 01 00 00 78 00 02 11 00
- 05 01 00 00 78 00 02 29 00
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+ 17000 15500 30000 8000 3000>;
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -197,14 +33,182 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-pan-physical-width-dimension = <74>;
+ qcom,mdss-pan-physical-height-dimension = <131>;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-dsi-underflow-color = <0x3ff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-bpp = <24>;
- qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
-
- dsi_dual_nt35597_truly_video_config0: config0 {
- qcom,split-mode = "dualctl-split";
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <32>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <7>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command = [
+ /* CMD2_P0 */
+ 15 01 00 00 00 00 02 FF 20
+ 15 01 00 00 00 00 02 FB 01
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 02 45
+ 15 01 00 00 00 00 02 05 40
+ 15 01 00 00 00 00 02 06 19
+ 15 01 00 00 00 00 02 07 1E
+ 15 01 00 00 00 00 02 0B 73
+ 15 01 00 00 00 00 02 0C 73
+ 15 01 00 00 00 00 02 0E B0
+ 15 01 00 00 00 00 02 0F AE
+ 15 01 00 00 00 00 02 11 B8
+ 15 01 00 00 00 00 02 13 00
+ 15 01 00 00 00 00 02 58 80
+ 15 01 00 00 00 00 02 59 01
+ 15 01 00 00 00 00 02 5A 00
+ 15 01 00 00 00 00 02 5B 01
+ 15 01 00 00 00 00 02 5C 80
+ 15 01 00 00 00 00 02 5D 81
+ 15 01 00 00 00 00 02 5E 00
+ 15 01 00 00 00 00 02 5F 01
+ 15 01 00 00 00 00 02 72 31
+ 15 01 00 00 00 00 02 68 03
+ /* CMD2_P4 */
+ 15 01 00 00 00 00 02 FF 24
+ 15 01 00 00 00 00 02 FB 01
+ 15 01 00 00 00 00 02 00 1C
+ 15 01 00 00 00 00 02 01 0B
+ 15 01 00 00 00 00 02 02 0C
+ 15 01 00 00 00 00 02 03 01
+ 15 01 00 00 00 00 02 04 0F
+ 15 01 00 00 00 00 02 05 10
+ 15 01 00 00 00 00 02 06 10
+ 15 01 00 00 00 00 02 07 10
+ 15 01 00 00 00 00 02 08 89
+ 15 01 00 00 00 00 02 09 8A
+ 15 01 00 00 00 00 02 0A 13
+ 15 01 00 00 00 00 02 0B 13
+ 15 01 00 00 00 00 02 0C 15
+ 15 01 00 00 00 00 02 0D 15
+ 15 01 00 00 00 00 02 0E 17
+ 15 01 00 00 00 00 02 0F 17
+ 15 01 00 00 00 00 02 10 1C
+ 15 01 00 00 00 00 02 11 0B
+ 15 01 00 00 00 00 02 12 0C
+ 15 01 00 00 00 00 02 13 01
+ 15 01 00 00 00 00 02 14 0F
+ 15 01 00 00 00 00 02 15 10
+ 15 01 00 00 00 00 02 16 10
+ 15 01 00 00 00 00 02 17 10
+ 15 01 00 00 00 00 02 18 89
+ 15 01 00 00 00 00 02 19 8A
+ 15 01 00 00 00 00 02 1A 13
+ 15 01 00 00 00 00 02 1B 13
+ 15 01 00 00 00 00 02 1C 15
+ 15 01 00 00 00 00 02 1D 15
+ 15 01 00 00 00 00 02 1E 17
+ 15 01 00 00 00 00 02 1F 17
+ /* STV */
+ 15 01 00 00 00 00 02 20 40
+ 15 01 00 00 00 00 02 21 01
+ 15 01 00 00 00 00 02 22 00
+ 15 01 00 00 00 00 02 23 40
+ 15 01 00 00 00 00 02 24 40
+ 15 01 00 00 00 00 02 25 6D
+ 15 01 00 00 00 00 02 26 40
+ 15 01 00 00 00 00 02 27 40
+ /* Vend */
+ 15 01 00 00 00 00 02 E0 00
+ 15 01 00 00 00 00 02 DC 21
+ 15 01 00 00 00 00 02 DD 22
+ 15 01 00 00 00 00 02 DE 07
+ 15 01 00 00 00 00 02 DF 07
+ 15 01 00 00 00 00 02 E3 6D
+ 15 01 00 00 00 00 02 E1 07
+ 15 01 00 00 00 00 02 E2 07
+ /* UD */
+ 15 01 00 00 00 00 02 29 D8
+ 15 01 00 00 00 00 02 2A 2A
+ /* CLK */
+ 15 01 00 00 00 00 02 4B 03
+ 15 01 00 00 00 00 02 4C 11
+ 15 01 00 00 00 00 02 4D 10
+ 15 01 00 00 00 00 02 4E 01
+ 15 01 00 00 00 00 02 4F 01
+ 15 01 00 00 00 00 02 50 10
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 52 80
+ 15 01 00 00 00 00 02 53 00
+ 15 01 00 00 00 00 02 56 00
+ 15 01 00 00 00 00 02 54 07
+ 15 01 00 00 00 00 02 58 07
+ 15 01 00 00 00 00 02 55 25
+ /* Reset XDONB */
+ 15 01 00 00 00 00 02 5B 43
+ 15 01 00 00 00 00 02 5C 00
+ 15 01 00 00 00 00 02 5F 73
+ 15 01 00 00 00 00 02 60 73
+ 15 01 00 00 00 00 02 63 22
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 67 08
+ 15 01 00 00 00 00 02 68 04
+ /* Resolution:1440x2560*/
+ 15 01 00 00 00 00 02 72 02
+ /* mux */
+ 15 01 00 00 00 00 02 7A 80
+ 15 01 00 00 00 00 02 7B 91
+ 15 01 00 00 00 00 02 7C D8
+ 15 01 00 00 00 00 02 7D 60
+ 15 01 00 00 00 00 02 7F 15
+ 15 01 00 00 00 00 02 75 15
+ /* ABOFF */
+ 15 01 00 00 00 00 02 B3 C0
+ 15 01 00 00 00 00 02 B4 00
+ 15 01 00 00 00 00 02 B5 00
+ /* Source EQ */
+ 15 01 00 00 00 00 02 78 00
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 80 00
+ 15 01 00 00 00 00 02 83 00
+ /* FP BP */
+ 15 01 00 00 00 00 02 93 0A
+ 15 01 00 00 00 00 02 94 0A
+ /* Inversion Type */
+ 15 01 00 00 00 00 02 8A 00
+ 15 01 00 00 00 00 02 9B FF
+ /* IMGSWAP =1 @PortSwap=1 */
+ 15 01 00 00 00 00 02 9D B0
+ 15 01 00 00 00 00 02 9F 63
+ 15 01 00 00 00 00 02 98 10
+ /* FRM */
+ 15 01 00 00 00 00 02 EC 00
+ /* CMD1 */
+ 15 01 00 00 00 00 02 FF 10
+ /* VBP+VSA=,VFP = 10H */
+ 15 01 00 00 00 00 04 3B 03 0A 0A
+ /* FTE on */
+ 15 01 00 00 00 00 02 35 00
+ /* EN_BK =1(auto black) */
+ 15 01 00 00 00 00 02 E5 01
+ /* CMD mode(10) VDO mode(03) */
+ 15 01 00 00 00 00 02 BB 03
+ /* Non Reload MTP */
+ 15 01 00 00 00 00 02 FB 01
+ /* SlpOut + DispOn */
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 78 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+ 02 28 00 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ };
};
-
-
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
index c2b054a..aebc8b9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -16,48 +16,15 @@
qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
qcom,mdss-dsi-panel-destination = "display_1";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <850000000>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1080>;
- qcom,mdss-dsi-panel-height = <1920>;
- qcom,mdss-dsi-h-front-porch = <0>;
- qcom,mdss-dsi-h-back-porch = <0>;
- qcom,mdss-dsi-h-pulse-width = <0>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <0>;
- qcom,mdss-dsi-v-front-porch = <0>;
- qcom,mdss-dsi-v-pulse-width = <0>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
- qcom,mdss-dsi-on-command = [
- 15 01 00 00 00 00 02 bb 10
- 15 01 00 00 00 00 02 b0 03
- 05 01 00 00 78 00 01 11
- 15 01 00 00 00 00 02 51 ff
- 15 01 00 00 00 00 02 53 24
- 15 01 00 00 00 00 02 ff 23
- 15 01 00 00 00 00 02 08 05
- 15 01 00 00 00 00 02 46 90
- 15 01 00 00 00 00 02 ff 10
- 15 01 00 00 00 00 02 ff f0
- 15 01 00 00 00 00 02 92 01
- 15 01 00 00 00 00 02 ff 10
- 15 01 00 00 00 00 02 35 00 /* enable TE generation */
- 05 01 00 00 28 00 01 29];
- qcom,mdss-dsi-off-command = [
- 05 01 00 00 10 00 01 28
- 05 01 00 00 40 00 01 10];
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-pan-physical-width-dimension = <64>;
+ qcom,mdss-pan-physical-height-dimension = <117>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -74,5 +41,46 @@
qcom,mdss-dsi-te-dcs-command = <1>;
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <0>;
+ qcom,mdss-dsi-h-back-porch = <0>;
+ qcom,mdss-dsi-h-pulse-width = <0>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <0>;
+ qcom,mdss-dsi-v-front-porch = <0>;
+ qcom,mdss-dsi-v-pulse-width = <0>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command = [
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 b0 03
+ 05 01 00 00 78 00 01 11
+ 15 01 00 00 00 00 02 51 ff
+ 15 01 00 00 00 00 02 53 24
+ 15 01 00 00 00 00 02 ff 23
+ 15 01 00 00 00 00 02 08 05
+ 15 01 00 00 00 00 02 46 90
+ 15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 ff f0
+ 15 01 00 00 00 00 02 92 01
+ 15 01 00 00 00 00 02 ff 10
+ /* enable TE generation */
+ 15 01 00 00 00 00 02 35 00
+ 05 01 00 00 28 00 01 29];
+ qcom,mdss-dsi-off-command = [
+ 05 01 00 00 10 00 01 28
+ 05 01 00 00 40 00 01 10];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 061f1d9..ce849c6 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -14,23 +14,10 @@
dsi_sharp_4k_dsc_cmd: qcom,mdss_dsi_sharp_4k_dsc_cmd {
qcom,mdss-dsi-panel-name = "Sharp 4k cmd mode dsc dsi panel";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1080>;
- qcom,mdss-dsi-panel-height = <3840>;
- qcom,mdss-dsi-h-front-porch = <30>;
- qcom,mdss-dsi-h-back-porch = <100>;
- qcom,mdss-dsi-h-pulse-width = <4>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <7>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <1>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -40,7 +27,9 @@
qcom,mdss-dsi-lane-3-state;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+ qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+ qcom,mdss-pan-physical-width-dimension = <71>;
+ qcom,mdss-pan-physical-height-dimension = <129>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
@@ -49,39 +38,61 @@
qcom,mdss-dsi-te-using-te-pin;
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-tx-eot-append;
-
qcom,adjust-timer-wakeup-ms = <1>;
- qcom,mdss-dsi-on-command = [
- 39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
- 0f 03 19 01 97
- 39 01 00 00 00 00 03 92 10 f0
- 15 01 00 00 00 00 02 90 03
- 15 01 00 00 00 00 02 03 01
- 39 01 00 00 00 00 06 f0 55 aa 52 08 04
- 15 01 00 00 00 00 02 c0 03
- 39 01 00 00 00 00 06 f0 55 aa 52 08 07
- 15 01 00 00 00 00 02 ef 01
- 39 01 00 00 00 00 06 f0 55 aa 52 08 00
- 15 01 00 00 00 00 02 b4 01
- 15 01 00 00 00 00 02 35 00
- 39 01 00 00 00 00 06 f0 55 aa 52 08 01
- 39 01 00 00 00 00 05 ff aa 55 a5 80
- 15 01 00 00 00 00 02 6f 01
- 15 01 00 00 00 00 02 f3 10
- 39 01 00 00 00 00 05 ff aa 55 a5 00
- 05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
- 05 01 00 00 78 00 01 29 /* display on + delay 120ms */
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <3840>;
+ qcom,mdss-dsi-h-front-porch = <30>;
+ qcom,mdss-dsi-h-back-porch = <100>;
+ qcom,mdss-dsi-h-pulse-width = <4>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <7>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-jitter = <0x8 0xa>;
- qcom,compression-mode = "dsc";
- qcom,mdss-dsc-slice-height = <32>;
- qcom,mdss-dsc-slice-width = <1080>;
- qcom,mdss-dsc-slice-per-pkt = <1>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsi-on-command = [
+ 39 01 00 00 00 00 11 91 09 20 00 20 02
+ 00 03 1c 04 21 00
+ 0f 03 19 01 97
+ 39 01 00 00 00 00 03 92 10 f0
+ 15 01 00 00 00 00 02 90 03
+ 15 01 00 00 00 00 02 03 01
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 04
+ 15 01 00 00 00 00 02 c0 03
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 07
+ 15 01 00 00 00 00 02 ef 01
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 00
+ 15 01 00 00 00 00 02 b4 01
+ 15 01 00 00 00 00 02 35 00
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 01
+ 39 01 00 00 00 00 05 ff aa 55 a5 80
+ 15 01 00 00 00 00 02 6f 01
+ 15 01 00 00 00 00 02 f3 10
+ 39 01 00 00 00 00 05 ff aa 55 a5 00
+ /* sleep out + delay 120ms */
+ 05 01 00 00 78 00 01 11
+ /* display on + delay 120ms */
+ 05 01 00 00 78 00 01 29
+ ];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 78 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <32>;
+ qcom,mdss-dsc-slice-width = <1080>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index b314e99..d3411c8 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -14,23 +14,10 @@
dsi_sharp_4k_dsc_video: qcom,mdss_dsi_sharp_4k_dsc_video {
qcom,mdss-dsi-panel-name = "Sharp 4k video mode dsc dsi panel";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1080>;
- qcom,mdss-dsi-panel-height = <3840>;
- qcom,mdss-dsi-h-front-porch = <30>;
- qcom,mdss-dsi-h-back-porch = <100>;
- qcom,mdss-dsi-h-pulse-width = <4>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <7>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <1>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -41,40 +28,64 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+ qcom,mdss-pan-physical-width-dimension = <71>;
+ qcom,mdss-pan-physical-height-dimension = <129>;
qcom,mdss-dsi-tx-eot-append;
qcom,adjust-timer-wakeup-ms = <1>;
- qcom,mdss-dsi-on-command = [
- 39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
- 0f 03 19 01 97
- 39 01 00 00 00 00 03 92 10 f0
- 15 01 00 00 00 00 02 90 03
- 15 01 00 00 00 00 02 03 01
- 39 01 00 00 00 00 06 f0 55 aa 52 08 04
- 15 01 00 00 00 00 02 c0 03
- 39 01 00 00 00 00 06 f0 55 aa 52 08 07
- 15 01 00 00 00 00 02 ef 01
- 39 01 00 00 00 00 06 f0 55 aa 52 08 00
- 15 01 00 00 00 00 02 b4 10
- 15 01 00 00 00 00 02 35 00
- 39 01 00 00 00 00 06 f0 55 aa 52 08 01
- 39 01 00 00 00 00 05 ff aa 55 a5 80
- 15 01 00 00 00 00 02 6f 01
- 15 01 00 00 00 00 02 f3 10
- 39 01 00 00 00 00 05 ff aa 55 a5 00
- 05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
- 05 01 00 00 78 00 01 29 /* display on + delay 120ms */
- ];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <3840>;
+ qcom,mdss-dsi-h-front-porch = <30>;
+ qcom,mdss-dsi-h-back-porch = <100>;
+ qcom,mdss-dsi-h-pulse-width = <4>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <7>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
- qcom,compression-mode = "dsc";
- qcom,mdss-dsc-slice-height = <32>;
- qcom,mdss-dsc-slice-width = <1080>;
- qcom,mdss-dsc-slice-per-pkt = <1>;
- qcom,mdss-dsc-bit-per-component = <8>;
- qcom,mdss-dsc-bit-per-pixel = <8>;
- qcom,mdss-dsc-block-prediction-enable;
+ qcom,mdss-dsi-on-command = [
+ 39 01 00 00 00 00 11 91 09 20 00 20 02
+ 00 03 1c 04 21 00
+ 0f 03 19 01 97
+ 39 01 00 00 00 00 03 92 10 f0
+ 15 01 00 00 00 00 02 90 03
+ 15 01 00 00 00 00 02 03 01
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 04
+ 15 01 00 00 00 00 02 c0 03
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 07
+ 15 01 00 00 00 00 02 ef 01
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 00
+ 15 01 00 00 00 00 02 b4 10
+ 15 01 00 00 00 00 02 35 00
+ 39 01 00 00 00 00 06 f0 55 aa 52 08 01
+ 39 01 00 00 00 00 05 ff aa 55 a5 80
+ 15 01 00 00 00 00 02 6f 01
+ 15 01 00 00 00 00 02 f3 10
+ 39 01 00 00 00 00 05 ff aa 55 a5 00
+ /* sleep out + delay 120ms */
+ 05 01 00 00 78 00 01 11
+ /* display on + delay 120ms */
+ 05 01 00 00 78 00 01 29
+ ];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 78 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <32>;
+ qcom,mdss-dsc-slice-width = <1080>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
index 2071649..6dc621e 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
@@ -15,595 +15,12 @@
qcom,mdss-dsi-panel-name =
"sharp 1080p 120hz dual dsi cmd mode panel";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <120>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <540>;
- qcom,mdss-dsi-panel-height = <1920>;
- qcom,mdss-dsi-h-front-porch = <28>;
- qcom,mdss-dsi-h-back-porch = <4>;
- qcom,mdss-dsi-h-pulse-width = <4>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <12>;
- qcom,mdss-dsi-v-front-porch = <12>;
- qcom,mdss-dsi-v-pulse-width = <2>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 10>;
- qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 ba 07
- 15 01 00 00 00 00 02 c0 00
- 15 01 00 00 00 00 02 bb 10
- 15 01 00 00 00 00 02 d9 00
- 15 01 00 00 00 00 02 ef 70
- 15 01 00 00 00 00 02 f7 80
- 39 01 00 00 00 00 06 3b 03 0e 0c 08 1c
- 15 01 00 00 00 00 02 e9 0e
- 15 01 00 00 00 00 02 ea 0c
- 15 01 00 00 00 00 02 35 00
- 15 01 00 00 00 00 02 c0 00
- 15 01 00 00 00 00 02 ff 20
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 59 6a
- 15 01 00 00 00 00 02 0b 1b
- 15 01 00 00 00 00 02 61 f7
- 15 01 00 00 00 00 02 62 6c
- 15 01 00 00 00 00 02 00 01
- 15 01 00 00 00 00 02 01 55
- 15 01 00 00 00 00 02 04 c8
- 15 01 00 00 00 00 02 05 1a
- 15 01 00 00 00 00 02 0d 93
- 15 01 00 00 00 00 02 0e 93
- 15 01 00 00 00 00 02 0f 7e
- 15 01 00 00 00 00 02 06 69
- 15 01 00 00 00 00 02 07 bc
- 15 01 00 00 00 00 02 10 03
- 15 01 00 00 00 00 02 11 64
- 15 01 00 00 00 00 02 12 5a
- 15 01 00 00 00 00 02 13 40
- 15 01 00 00 00 00 02 14 40
- 15 01 00 00 00 00 02 15 00
- 15 01 00 00 00 00 02 33 13
- 15 01 00 00 00 00 02 5a 40
- 15 01 00 00 00 00 02 5b 40
- 15 01 00 00 00 00 02 5e 80
- 15 01 00 00 00 00 02 ff 24
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 80
- 15 01 00 00 00 00 02 14 80
- 15 01 00 00 00 00 02 01 80
- 15 01 00 00 00 00 02 15 80
- 15 01 00 00 00 00 02 02 80
- 15 01 00 00 00 00 02 16 80
- 15 01 00 00 00 00 02 03 0a
- 15 01 00 00 00 00 02 17 0c
- 15 01 00 00 00 00 02 04 06
- 15 01 00 00 00 00 02 18 08
- 15 01 00 00 00 00 02 05 80
- 15 01 00 00 00 00 02 19 80
- 15 01 00 00 00 00 02 06 80
- 15 01 00 00 00 00 02 1a 80
- 15 01 00 00 00 00 02 07 80
- 15 01 00 00 00 00 02 1b 80
- 15 01 00 00 00 00 02 08 80
- 15 01 00 00 00 00 02 1c 80
- 15 01 00 00 00 00 02 09 80
- 15 01 00 00 00 00 02 1d 80
- 15 01 00 00 00 00 02 0a 80
- 15 01 00 00 00 00 02 1e 80
- 15 01 00 00 00 00 02 0b 1a
- 15 01 00 00 00 00 02 1f 1b
- 15 01 00 00 00 00 02 0c 16
- 15 01 00 00 00 00 02 20 17
- 15 01 00 00 00 00 02 0d 1c
- 15 01 00 00 00 00 02 21 1d
- 15 01 00 00 00 00 02 0e 18
- 15 01 00 00 00 00 02 22 19
- 15 01 00 00 00 00 02 0f 0e
- 15 01 00 00 00 00 02 23 10
- 15 01 00 00 00 00 02 10 80
- 15 01 00 00 00 00 02 24 80
- 15 01 00 00 00 00 02 11 80
- 15 01 00 00 00 00 02 25 80
- 15 01 00 00 00 00 02 12 80
- 15 01 00 00 00 00 02 26 80
- 15 01 00 00 00 00 02 13 80
- 15 01 00 00 00 00 02 27 80
- 15 01 00 00 00 00 02 74 ff
- 15 01 00 00 00 00 02 75 ff
- 15 01 00 00 00 00 02 8d 00
- 15 01 00 00 00 00 02 8e 00
- 15 01 00 00 00 00 02 8f 9c
- 15 01 00 00 00 00 02 90 0c
- 15 01 00 00 00 00 02 91 0e
- 15 01 00 00 00 00 02 d6 00
- 15 01 00 00 00 00 02 d7 20
- 15 01 00 00 00 00 02 d8 00
- 15 01 00 00 00 00 02 d9 88
- 15 01 00 00 00 00 02 e5 05
- 15 01 00 00 00 00 02 e6 10
- 15 01 00 00 00 00 02 54 06
- 15 01 00 00 00 00 02 55 05
- 15 01 00 00 00 00 02 56 04
- 15 01 00 00 00 00 02 58 03
- 15 01 00 00 00 00 02 59 33
- 15 01 00 00 00 00 02 5a 33
- 15 01 00 00 00 00 02 5b 01
- 15 01 00 00 00 00 02 5c 00
- 15 01 00 00 00 00 02 5d 01
- 15 01 00 00 00 00 02 5e 0a
- 15 01 00 00 00 00 02 5f 0a
- 15 01 00 00 00 00 02 60 0a
- 15 01 00 00 00 00 02 61 0a
- 15 01 00 00 00 00 02 62 10
- 15 01 00 00 00 00 02 63 01
- 15 01 00 00 00 00 02 64 00
- 15 01 00 00 00 00 02 65 00
- 15 01 00 00 00 00 02 ef 00
- 15 01 00 00 00 00 02 f0 00
- 15 01 00 00 00 00 02 6d 20
- 15 01 00 00 00 00 02 66 44
- 15 01 00 00 00 00 02 68 01
- 15 01 00 00 00 00 02 69 00
- 15 01 00 00 00 00 02 67 11
- 15 01 00 00 00 00 02 6a 06
- 15 01 00 00 00 00 02 6b 31
- 15 01 00 00 00 00 02 6c 90
- 15 01 00 00 00 00 02 ab c3
- 15 01 00 00 00 00 02 b1 49
- 15 01 00 00 00 00 02 aa 80
- 15 01 00 00 00 00 02 b0 90
- 15 01 00 00 00 00 02 b2 a4
- 15 01 00 00 00 00 02 b3 00
- 15 01 00 00 00 00 02 b4 23
- 15 01 00 00 00 00 02 b5 00
- 15 01 00 00 00 00 02 b6 00
- 15 01 00 00 00 00 02 b7 00
- 15 01 00 00 00 00 02 b8 00
- 15 01 00 00 00 00 02 b9 00
- 15 01 00 00 00 00 02 ba 00
- 15 01 00 00 00 00 02 bb 00
- 15 01 00 00 00 00 02 bc 00
- 15 01 00 00 00 00 02 bd 00
- 15 01 00 00 00 00 02 be 00
- 15 01 00 00 00 00 02 bf 00
- 15 01 00 00 00 00 02 c0 00
- 15 01 00 00 00 00 02 c7 40
- 15 01 00 00 00 00 02 c9 00
- 15 01 00 00 00 00 02 c1 2a
- 15 01 00 00 00 00 02 c2 2a
- 15 01 00 00 00 00 02 c3 00
- 15 01 00 00 00 00 02 c4 00
- 15 01 00 00 00 00 02 c5 00
- 15 01 00 00 00 00 02 c6 00
- 15 01 00 00 00 00 02 c8 ab
- 15 01 00 00 00 00 02 ca 00
- 15 01 00 00 00 00 02 cb 00
- 15 01 00 00 00 00 02 cc 20
- 15 01 00 00 00 00 02 cd 40
- 15 01 00 00 00 00 02 ce a8
- 15 01 00 00 00 00 02 cf a8
- 15 01 00 00 00 00 02 d0 00
- 15 01 00 00 00 00 02 d1 00
- 15 01 00 00 00 00 02 d2 00
- 15 01 00 00 00 00 02 d3 00
- 15 01 00 00 00 00 02 af 01
- 15 01 00 00 00 00 02 a4 1e
- 15 01 00 00 00 00 02 95 41
- 15 01 00 00 00 00 02 96 03
- 15 01 00 00 00 00 02 98 00
- 15 01 00 00 00 00 02 9a 9a
- 15 01 00 00 00 00 02 9b 03
- 15 01 00 00 00 00 02 9d 80
- 15 01 00 00 00 00 02 ff 26
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 fa d0
- 15 01 00 00 00 00 02 6b 80
- 15 01 00 00 00 00 02 6c 5c
- 15 01 00 00 00 00 02 6d 0c
- 15 01 00 00 00 00 02 6e 0e
- 15 01 00 00 00 00 02 58 01
- 15 01 00 00 00 00 02 59 15
- 15 01 00 00 00 00 02 5a 01
- 15 01 00 00 00 00 02 5b 00
- 15 01 00 00 00 00 02 5c 01
- 15 01 00 00 00 00 02 5d 2b
- 15 01 00 00 00 00 02 74 00
- 15 01 00 00 00 00 02 75 ba
- 15 01 00 00 00 00 02 81 0a
- 15 01 00 00 00 00 02 4e 81
- 15 01 00 00 00 00 02 4f 83
- 15 01 00 00 00 00 02 51 00
- 15 01 00 00 00 00 02 53 4d
- 15 01 00 00 00 00 02 54 03
- 15 01 00 00 00 00 02 ff e0
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 b2 81
- 15 01 00 00 00 00 02 62 28
- 15 01 00 00 00 00 02 a2 09
- 15 01 00 00 00 00 02 b3 01
- 15 01 00 00 00 00 02 ed 00
- 15 01 00 00 00 00 02 ff 10
- 05 01 00 00 78 00 01 11
- 15 01 00 00 00 00 02 ff 20
- 15 01 00 00 00 00 02 75 00
- 15 01 00 00 00 00 02 76 71
- 15 01 00 00 00 00 02 77 00
- 15 01 00 00 00 00 02 78 84
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 7a a5
- 15 01 00 00 00 00 02 7b 00
- 15 01 00 00 00 00 02 7c bb
- 15 01 00 00 00 00 02 7d 00
- 15 01 00 00 00 00 02 7e ce
- 15 01 00 00 00 00 02 7f 00
- 15 01 00 00 00 00 02 80 e0
- 15 01 00 00 00 00 02 81 00
- 15 01 00 00 00 00 02 82 ef
- 15 01 00 00 00 00 02 83 00
- 15 01 00 00 00 00 02 84 ff
- 15 01 00 00 00 00 02 85 01
- 15 01 00 00 00 00 02 86 0b
- 15 01 00 00 00 00 02 87 01
- 15 01 00 00 00 00 02 88 38
- 15 01 00 00 00 00 02 89 01
- 15 01 00 00 00 00 02 8a 5b
- 15 01 00 00 00 00 02 8b 01
- 15 01 00 00 00 00 02 8c 95
- 15 01 00 00 00 00 02 8d 01
- 15 01 00 00 00 00 02 8e c4
- 15 01 00 00 00 00 02 8f 02
- 15 01 00 00 00 00 02 90 0d
- 15 01 00 00 00 00 02 91 02
- 15 01 00 00 00 00 02 92 4a
- 15 01 00 00 00 00 02 93 02
- 15 01 00 00 00 00 02 94 4c
- 15 01 00 00 00 00 02 95 02
- 15 01 00 00 00 00 02 96 85
- 15 01 00 00 00 00 02 97 02
- 15 01 00 00 00 00 02 98 c3
- 15 01 00 00 00 00 02 99 02
- 15 01 00 00 00 00 02 9a e9
- 15 01 00 00 00 00 02 9b 03
- 15 01 00 00 00 00 02 9c 16
- 15 01 00 00 00 00 02 9d 03
- 15 01 00 00 00 00 02 9e 34
- 15 01 00 00 00 00 02 9f 03
- 15 01 00 00 00 00 02 a0 56
- 15 01 00 00 00 00 02 a2 03
- 15 01 00 00 00 00 02 a3 62
- 15 01 00 00 00 00 02 a4 03
- 15 01 00 00 00 00 02 a5 6c
- 15 01 00 00 00 00 02 a6 03
- 15 01 00 00 00 00 02 a7 74
- 15 01 00 00 00 00 02 a9 03
- 15 01 00 00 00 00 02 aa 80
- 15 01 00 00 00 00 02 ab 03
- 15 01 00 00 00 00 02 ac 89
- 15 01 00 00 00 00 02 ad 03
- 15 01 00 00 00 00 02 ae 8b
- 15 01 00 00 00 00 02 af 03
- 15 01 00 00 00 00 02 b0 8d
- 15 01 00 00 00 00 02 b1 03
- 15 01 00 00 00 00 02 b2 8e
- 15 01 00 00 00 00 02 b3 00
- 15 01 00 00 00 00 02 b4 71
- 15 01 00 00 00 00 02 b5 00
- 15 01 00 00 00 00 02 b6 84
- 15 01 00 00 00 00 02 b7 00
- 15 01 00 00 00 00 02 b8 a5
- 15 01 00 00 00 00 02 b9 00
- 15 01 00 00 00 00 02 ba bb
- 15 01 00 00 00 00 02 bb 00
- 15 01 00 00 00 00 02 bc ce
- 15 01 00 00 00 00 02 bd 00
- 15 01 00 00 00 00 02 be e0
- 15 01 00 00 00 00 02 bf 00
- 15 01 00 00 00 00 02 c0 ef
- 15 01 00 00 00 00 02 c1 00
- 15 01 00 00 00 00 02 c2 ff
- 15 01 00 00 00 00 02 c3 01
- 15 01 00 00 00 00 02 c4 0b
- 15 01 00 00 00 00 02 c5 01
- 15 01 00 00 00 00 02 c6 38
- 15 01 00 00 00 00 02 c7 01
- 15 01 00 00 00 00 02 c8 5b
- 15 01 00 00 00 00 02 c9 01
- 15 01 00 00 00 00 02 ca 95
- 15 01 00 00 00 00 02 cb 01
- 15 01 00 00 00 00 02 cc c4
- 15 01 00 00 00 00 02 cd 02
- 15 01 00 00 00 00 02 ce 0d
- 15 01 00 00 00 00 02 cf 02
- 15 01 00 00 00 00 02 d0 4a
- 15 01 00 00 00 00 02 d1 02
- 15 01 00 00 00 00 02 d2 4c
- 15 01 00 00 00 00 02 d3 02
- 15 01 00 00 00 00 02 d4 85
- 15 01 00 00 00 00 02 d5 02
- 15 01 00 00 00 00 02 d6 c3
- 15 01 00 00 00 00 02 d7 02
- 15 01 00 00 00 00 02 d8 e9
- 15 01 00 00 00 00 02 d9 03
- 15 01 00 00 00 00 02 da 16
- 15 01 00 00 00 00 02 db 03
- 15 01 00 00 00 00 02 dc 34
- 15 01 00 00 00 00 02 dd 03
- 15 01 00 00 00 00 02 de 56
- 15 01 00 00 00 00 02 df 03
- 15 01 00 00 00 00 02 e0 62
- 15 01 00 00 00 00 02 e1 03
- 15 01 00 00 00 00 02 e2 6c
- 15 01 00 00 00 00 02 e3 03
- 15 01 00 00 00 00 02 e4 74
- 15 01 00 00 00 00 02 e5 03
- 15 01 00 00 00 00 02 e6 80
- 15 01 00 00 00 00 02 e7 03
- 15 01 00 00 00 00 02 e8 89
- 15 01 00 00 00 00 02 e9 03
- 15 01 00 00 00 00 02 ea 8b
- 15 01 00 00 00 00 02 eb 03
- 15 01 00 00 00 00 02 ec 8d
- 15 01 00 00 00 00 02 ed 03
- 15 01 00 00 00 00 02 ee 8e
- 15 01 00 00 00 00 02 ef 00
- 15 01 00 00 00 00 02 f0 71
- 15 01 00 00 00 00 02 f1 00
- 15 01 00 00 00 00 02 f2 84
- 15 01 00 00 00 00 02 f3 00
- 15 01 00 00 00 00 02 f4 a5
- 15 01 00 00 00 00 02 f5 00
- 15 01 00 00 00 00 02 f6 bb
- 15 01 00 00 00 00 02 f7 00
- 15 01 00 00 00 00 02 f8 ce
- 15 01 00 00 00 00 02 f9 00
- 15 01 00 00 00 00 02 fa e0
- 15 01 00 00 00 00 02 ff 21
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 00 00
- 15 01 00 00 00 00 02 01 ef
- 15 01 00 00 00 00 02 02 00
- 15 01 00 00 00 00 02 03 ff
- 15 01 00 00 00 00 02 04 01
- 15 01 00 00 00 00 02 05 0b
- 15 01 00 00 00 00 02 06 01
- 15 01 00 00 00 00 02 07 38
- 15 01 00 00 00 00 02 08 01
- 15 01 00 00 00 00 02 09 5b
- 15 01 00 00 00 00 02 0a 01
- 15 01 00 00 00 00 02 0b 95
- 15 01 00 00 00 00 02 0c 01
- 15 01 00 00 00 00 02 0d c4
- 15 01 00 00 00 00 02 0e 02
- 15 01 00 00 00 00 02 0f 0d
- 15 01 00 00 00 00 02 10 02
- 15 01 00 00 00 00 02 11 4a
- 15 01 00 00 00 00 02 12 02
- 15 01 00 00 00 00 02 13 4c
- 15 01 00 00 00 00 02 14 02
- 15 01 00 00 00 00 02 15 85
- 15 01 00 00 00 00 02 16 02
- 15 01 00 00 00 00 02 17 c3
- 15 01 00 00 00 00 02 18 02
- 15 01 00 00 00 00 02 19 e9
- 15 01 00 00 00 00 02 1a 03
- 15 01 00 00 00 00 02 1b 16
- 15 01 00 00 00 00 02 1c 03
- 15 01 00 00 00 00 02 1d 34
- 15 01 00 00 00 00 02 1e 03
- 15 01 00 00 00 00 02 1f 56
- 15 01 00 00 00 00 02 20 03
- 15 01 00 00 00 00 02 21 62
- 15 01 00 00 00 00 02 22 03
- 15 01 00 00 00 00 02 23 6c
- 15 01 00 00 00 00 02 24 03
- 15 01 00 00 00 00 02 25 74
- 15 01 00 00 00 00 02 26 03
- 15 01 00 00 00 00 02 27 80
- 15 01 00 00 00 00 02 28 03
- 15 01 00 00 00 00 02 29 89
- 15 01 00 00 00 00 02 2a 03
- 15 01 00 00 00 00 02 2b 8b
- 15 01 00 00 00 00 02 2d 03
- 15 01 00 00 00 00 02 2f 8d
- 15 01 00 00 00 00 02 30 03
- 15 01 00 00 00 00 02 31 8e
- 15 01 00 00 00 00 02 32 00
- 15 01 00 00 00 00 02 33 71
- 15 01 00 00 00 00 02 34 00
- 15 01 00 00 00 00 02 35 84
- 15 01 00 00 00 00 02 36 00
- 15 01 00 00 00 00 02 37 a5
- 15 01 00 00 00 00 02 38 00
- 15 01 00 00 00 00 02 39 bb
- 15 01 00 00 00 00 02 3a 00
- 15 01 00 00 00 00 02 3b ce
- 15 01 00 00 00 00 02 3d 00
- 15 01 00 00 00 00 02 3f e0
- 15 01 00 00 00 00 02 40 00
- 15 01 00 00 00 00 02 41 ef
- 15 01 00 00 00 00 02 42 00
- 15 01 00 00 00 00 02 43 ff
- 15 01 00 00 00 00 02 44 01
- 15 01 00 00 00 00 02 45 0b
- 15 01 00 00 00 00 02 46 01
- 15 01 00 00 00 00 02 47 38
- 15 01 00 00 00 00 02 48 01
- 15 01 00 00 00 00 02 49 5b
- 15 01 00 00 00 00 02 4a 01
- 15 01 00 00 00 00 02 4b 95
- 15 01 00 00 00 00 02 4c 01
- 15 01 00 00 00 00 02 4d c4
- 15 01 00 00 00 00 02 4e 02
- 15 01 00 00 00 00 02 4f 0d
- 15 01 00 00 00 00 02 50 02
- 15 01 00 00 00 00 02 51 4a
- 15 01 00 00 00 00 02 52 02
- 15 01 00 00 00 00 02 53 4c
- 15 01 00 00 00 00 02 54 02
- 15 01 00 00 00 00 02 55 85
- 15 01 00 00 00 00 02 56 02
- 15 01 00 00 00 00 02 58 c3
- 15 01 00 00 00 00 02 59 02
- 15 01 00 00 00 00 02 5a e9
- 15 01 00 00 00 00 02 5b 03
- 15 01 00 00 00 00 02 5c 16
- 15 01 00 00 00 00 02 5d 03
- 15 01 00 00 00 00 02 5e 34
- 15 01 00 00 00 00 02 5f 03
- 15 01 00 00 00 00 02 60 56
- 15 01 00 00 00 00 02 61 03
- 15 01 00 00 00 00 02 62 62
- 15 01 00 00 00 00 02 63 03
- 15 01 00 00 00 00 02 64 6c
- 15 01 00 00 00 00 02 65 03
- 15 01 00 00 00 00 02 66 74
- 15 01 00 00 00 00 02 67 03
- 15 01 00 00 00 00 02 68 80
- 15 01 00 00 00 00 02 69 03
- 15 01 00 00 00 00 02 6a 89
- 15 01 00 00 00 00 02 6b 03
- 15 01 00 00 00 00 02 6c 8b
- 15 01 00 00 00 00 02 6d 03
- 15 01 00 00 00 00 02 6e 8d
- 15 01 00 00 00 00 02 6f 03
- 15 01 00 00 00 00 02 70 8e
- 15 01 00 00 00 00 02 71 00
- 15 01 00 00 00 00 02 72 71
- 15 01 00 00 00 00 02 73 00
- 15 01 00 00 00 00 02 74 84
- 15 01 00 00 00 00 02 75 00
- 15 01 00 00 00 00 02 76 a5
- 15 01 00 00 00 00 02 77 00
- 15 01 00 00 00 00 02 78 bb
- 15 01 00 00 00 00 02 79 00
- 15 01 00 00 00 00 02 7a ce
- 15 01 00 00 00 00 02 7b 00
- 15 01 00 00 00 00 02 7c e0
- 15 01 00 00 00 00 02 7d 00
- 15 01 00 00 00 00 02 7e ef
- 15 01 00 00 00 00 02 7f 00
- 15 01 00 00 00 00 02 80 ff
- 15 01 00 00 00 00 02 81 01
- 15 01 00 00 00 00 02 82 0b
- 15 01 00 00 00 00 02 83 01
- 15 01 00 00 00 00 02 84 38
- 15 01 00 00 00 00 02 85 01
- 15 01 00 00 00 00 02 86 5b
- 15 01 00 00 00 00 02 87 01
- 15 01 00 00 00 00 02 88 95
- 15 01 00 00 00 00 02 89 01
- 15 01 00 00 00 00 02 8a c4
- 15 01 00 00 00 00 02 8b 02
- 15 01 00 00 00 00 02 8c 0d
- 15 01 00 00 00 00 02 8d 02
- 15 01 00 00 00 00 02 8e 4a
- 15 01 00 00 00 00 02 8f 02
- 15 01 00 00 00 00 02 90 4c
- 15 01 00 00 00 00 02 91 02
- 15 01 00 00 00 00 02 92 85
- 15 01 00 00 00 00 02 93 02
- 15 01 00 00 00 00 02 94 c3
- 15 01 00 00 00 00 02 95 02
- 15 01 00 00 00 00 02 96 e9
- 15 01 00 00 00 00 02 97 03
- 15 01 00 00 00 00 02 98 16
- 15 01 00 00 00 00 02 99 03
- 15 01 00 00 00 00 02 9a 34
- 15 01 00 00 00 00 02 9b 03
- 15 01 00 00 00 00 02 9c 56
- 15 01 00 00 00 00 02 9d 03
- 15 01 00 00 00 00 02 9e 62
- 15 01 00 00 00 00 02 9f 03
- 15 01 00 00 00 00 02 a0 6c
- 15 01 00 00 00 00 02 a2 03
- 15 01 00 00 00 00 02 a3 74
- 15 01 00 00 00 00 02 a4 03
- 15 01 00 00 00 00 02 a5 80
- 15 01 00 00 00 00 02 a6 03
- 15 01 00 00 00 00 02 a7 89
- 15 01 00 00 00 00 02 a9 03
- 15 01 00 00 00 00 02 aa 8b
- 15 01 00 00 00 00 02 ab 03
- 15 01 00 00 00 00 02 ac 8d
- 15 01 00 00 00 00 02 ad 03
- 15 01 00 00 00 00 02 ae 8e
- 15 01 00 00 00 00 02 af 00
- 15 01 00 00 00 00 02 b0 71
- 15 01 00 00 00 00 02 b1 00
- 15 01 00 00 00 00 02 b2 84
- 15 01 00 00 00 00 02 b3 00
- 15 01 00 00 00 00 02 b4 a5
- 15 01 00 00 00 00 02 b5 00
- 15 01 00 00 00 00 02 b6 bb
- 15 01 00 00 00 00 02 b7 00
- 15 01 00 00 00 00 02 b8 ce
- 15 01 00 00 00 00 02 b9 00
- 15 01 00 00 00 00 02 ba e0
- 15 01 00 00 00 00 02 bb 00
- 15 01 00 00 00 00 02 bc ef
- 15 01 00 00 00 00 02 bd 00
- 15 01 00 00 00 00 02 be ff
- 15 01 00 00 00 00 02 bf 01
- 15 01 00 00 00 00 02 c0 0b
- 15 01 00 00 00 00 02 c1 01
- 15 01 00 00 00 00 02 c2 38
- 15 01 00 00 00 00 02 c3 01
- 15 01 00 00 00 00 02 c4 5b
- 15 01 00 00 00 00 02 c5 01
- 15 01 00 00 00 00 02 c6 95
- 15 01 00 00 00 00 02 c7 01
- 15 01 00 00 00 00 02 c8 c4
- 15 01 00 00 00 00 02 c9 02
- 15 01 00 00 00 00 02 ca 0d
- 15 01 00 00 00 00 02 cb 02
- 15 01 00 00 00 00 02 cc 4a
- 15 01 00 00 00 00 02 cd 02
- 15 01 00 00 00 00 02 ce 4c
- 15 01 00 00 00 00 02 cf 02
- 15 01 00 00 00 00 02 d0 85
- 15 01 00 00 00 00 02 d1 02
- 15 01 00 00 00 00 02 d2 c3
- 15 01 00 00 00 00 02 d3 02
- 15 01 00 00 00 00 02 d4 e9
- 15 01 00 00 00 00 02 d5 03
- 15 01 00 00 00 00 02 d6 16
- 15 01 00 00 00 00 02 d7 03
- 15 01 00 00 00 00 02 d8 34
- 15 01 00 00 00 00 02 d9 03
- 15 01 00 00 00 00 02 da 56
- 15 01 00 00 00 00 02 db 03
- 15 01 00 00 00 00 02 dc 62
- 15 01 00 00 00 00 02 dd 03
- 15 01 00 00 00 00 02 de 6c
- 15 01 00 00 00 00 02 df 03
- 15 01 00 00 00 00 02 e0 74
- 15 01 00 00 00 00 02 e1 03
- 15 01 00 00 00 00 02 e2 80
- 15 01 00 00 00 00 02 e3 03
- 15 01 00 00 00 00 02 e4 89
- 15 01 00 00 00 00 02 e5 03
- 15 01 00 00 00 00 02 e6 8b
- 15 01 00 00 00 00 02 e7 03
- 15 01 00 00 00 00 02 e8 8d
- 15 01 00 00 00 00 02 e9 03
- 15 01 00 00 00 00 02 ea 8e
- 15 01 00 00 00 00 02 FF 10
- 05 01 00 00 00 00 01 29];
- qcom,mdss-dsi-off-command = [15 01 00 00 00 00 02 ff 10
- 05 01 00 00 10 00 01 28
- 15 01 00 00 00 00 02 b0 00
- 05 01 00 00 40 00 01 10
- 15 01 00 00 00 00 02 4f 01];
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -623,10 +40,594 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
- qcom,config-select = <&dsi_dual_sharp_cmd_config0>;
-
- dsi_dual_sharp_cmd_config0: config0 {
- qcom,split-mode = "dualctl-split";
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <540>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <28>;
+ qcom,mdss-dsi-h-back-porch = <4>;
+ qcom,mdss-dsi-h-pulse-width = <4>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <12>;
+ qcom,mdss-dsi-v-front-porch = <12>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <120>;
+ qcom,mdss-dsi-on-command =
+ [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 07
+ 15 01 00 00 00 00 02 c0 00
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 d9 00
+ 15 01 00 00 00 00 02 ef 70
+ 15 01 00 00 00 00 02 f7 80
+ 39 01 00 00 00 00 06 3b 03 0e 0c 08 1c
+ 15 01 00 00 00 00 02 e9 0e
+ 15 01 00 00 00 00 02 ea 0c
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 c0 00
+ 15 01 00 00 00 00 02 ff 20
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 59 6a
+ 15 01 00 00 00 00 02 0b 1b
+ 15 01 00 00 00 00 02 61 f7
+ 15 01 00 00 00 00 02 62 6c
+ 15 01 00 00 00 00 02 00 01
+ 15 01 00 00 00 00 02 01 55
+ 15 01 00 00 00 00 02 04 c8
+ 15 01 00 00 00 00 02 05 1a
+ 15 01 00 00 00 00 02 0d 93
+ 15 01 00 00 00 00 02 0e 93
+ 15 01 00 00 00 00 02 0f 7e
+ 15 01 00 00 00 00 02 06 69
+ 15 01 00 00 00 00 02 07 bc
+ 15 01 00 00 00 00 02 10 03
+ 15 01 00 00 00 00 02 11 64
+ 15 01 00 00 00 00 02 12 5a
+ 15 01 00 00 00 00 02 13 40
+ 15 01 00 00 00 00 02 14 40
+ 15 01 00 00 00 00 02 15 00
+ 15 01 00 00 00 00 02 33 13
+ 15 01 00 00 00 00 02 5a 40
+ 15 01 00 00 00 00 02 5b 40
+ 15 01 00 00 00 00 02 5e 80
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 80
+ 15 01 00 00 00 00 02 14 80
+ 15 01 00 00 00 00 02 01 80
+ 15 01 00 00 00 00 02 15 80
+ 15 01 00 00 00 00 02 02 80
+ 15 01 00 00 00 00 02 16 80
+ 15 01 00 00 00 00 02 03 0a
+ 15 01 00 00 00 00 02 17 0c
+ 15 01 00 00 00 00 02 04 06
+ 15 01 00 00 00 00 02 18 08
+ 15 01 00 00 00 00 02 05 80
+ 15 01 00 00 00 00 02 19 80
+ 15 01 00 00 00 00 02 06 80
+ 15 01 00 00 00 00 02 1a 80
+ 15 01 00 00 00 00 02 07 80
+ 15 01 00 00 00 00 02 1b 80
+ 15 01 00 00 00 00 02 08 80
+ 15 01 00 00 00 00 02 1c 80
+ 15 01 00 00 00 00 02 09 80
+ 15 01 00 00 00 00 02 1d 80
+ 15 01 00 00 00 00 02 0a 80
+ 15 01 00 00 00 00 02 1e 80
+ 15 01 00 00 00 00 02 0b 1a
+ 15 01 00 00 00 00 02 1f 1b
+ 15 01 00 00 00 00 02 0c 16
+ 15 01 00 00 00 00 02 20 17
+ 15 01 00 00 00 00 02 0d 1c
+ 15 01 00 00 00 00 02 21 1d
+ 15 01 00 00 00 00 02 0e 18
+ 15 01 00 00 00 00 02 22 19
+ 15 01 00 00 00 00 02 0f 0e
+ 15 01 00 00 00 00 02 23 10
+ 15 01 00 00 00 00 02 10 80
+ 15 01 00 00 00 00 02 24 80
+ 15 01 00 00 00 00 02 11 80
+ 15 01 00 00 00 00 02 25 80
+ 15 01 00 00 00 00 02 12 80
+ 15 01 00 00 00 00 02 26 80
+ 15 01 00 00 00 00 02 13 80
+ 15 01 00 00 00 00 02 27 80
+ 15 01 00 00 00 00 02 74 ff
+ 15 01 00 00 00 00 02 75 ff
+ 15 01 00 00 00 00 02 8d 00
+ 15 01 00 00 00 00 02 8e 00
+ 15 01 00 00 00 00 02 8f 9c
+ 15 01 00 00 00 00 02 90 0c
+ 15 01 00 00 00 00 02 91 0e
+ 15 01 00 00 00 00 02 d6 00
+ 15 01 00 00 00 00 02 d7 20
+ 15 01 00 00 00 00 02 d8 00
+ 15 01 00 00 00 00 02 d9 88
+ 15 01 00 00 00 00 02 e5 05
+ 15 01 00 00 00 00 02 e6 10
+ 15 01 00 00 00 00 02 54 06
+ 15 01 00 00 00 00 02 55 05
+ 15 01 00 00 00 00 02 56 04
+ 15 01 00 00 00 00 02 58 03
+ 15 01 00 00 00 00 02 59 33
+ 15 01 00 00 00 00 02 5a 33
+ 15 01 00 00 00 00 02 5b 01
+ 15 01 00 00 00 00 02 5c 00
+ 15 01 00 00 00 00 02 5d 01
+ 15 01 00 00 00 00 02 5e 0a
+ 15 01 00 00 00 00 02 5f 0a
+ 15 01 00 00 00 00 02 60 0a
+ 15 01 00 00 00 00 02 61 0a
+ 15 01 00 00 00 00 02 62 10
+ 15 01 00 00 00 00 02 63 01
+ 15 01 00 00 00 00 02 64 00
+ 15 01 00 00 00 00 02 65 00
+ 15 01 00 00 00 00 02 ef 00
+ 15 01 00 00 00 00 02 f0 00
+ 15 01 00 00 00 00 02 6d 20
+ 15 01 00 00 00 00 02 66 44
+ 15 01 00 00 00 00 02 68 01
+ 15 01 00 00 00 00 02 69 00
+ 15 01 00 00 00 00 02 67 11
+ 15 01 00 00 00 00 02 6a 06
+ 15 01 00 00 00 00 02 6b 31
+ 15 01 00 00 00 00 02 6c 90
+ 15 01 00 00 00 00 02 ab c3
+ 15 01 00 00 00 00 02 b1 49
+ 15 01 00 00 00 00 02 aa 80
+ 15 01 00 00 00 00 02 b0 90
+ 15 01 00 00 00 00 02 b2 a4
+ 15 01 00 00 00 00 02 b3 00
+ 15 01 00 00 00 00 02 b4 23
+ 15 01 00 00 00 00 02 b5 00
+ 15 01 00 00 00 00 02 b6 00
+ 15 01 00 00 00 00 02 b7 00
+ 15 01 00 00 00 00 02 b8 00
+ 15 01 00 00 00 00 02 b9 00
+ 15 01 00 00 00 00 02 ba 00
+ 15 01 00 00 00 00 02 bb 00
+ 15 01 00 00 00 00 02 bc 00
+ 15 01 00 00 00 00 02 bd 00
+ 15 01 00 00 00 00 02 be 00
+ 15 01 00 00 00 00 02 bf 00
+ 15 01 00 00 00 00 02 c0 00
+ 15 01 00 00 00 00 02 c7 40
+ 15 01 00 00 00 00 02 c9 00
+ 15 01 00 00 00 00 02 c1 2a
+ 15 01 00 00 00 00 02 c2 2a
+ 15 01 00 00 00 00 02 c3 00
+ 15 01 00 00 00 00 02 c4 00
+ 15 01 00 00 00 00 02 c5 00
+ 15 01 00 00 00 00 02 c6 00
+ 15 01 00 00 00 00 02 c8 ab
+ 15 01 00 00 00 00 02 ca 00
+ 15 01 00 00 00 00 02 cb 00
+ 15 01 00 00 00 00 02 cc 20
+ 15 01 00 00 00 00 02 cd 40
+ 15 01 00 00 00 00 02 ce a8
+ 15 01 00 00 00 00 02 cf a8
+ 15 01 00 00 00 00 02 d0 00
+ 15 01 00 00 00 00 02 d1 00
+ 15 01 00 00 00 00 02 d2 00
+ 15 01 00 00 00 00 02 d3 00
+ 15 01 00 00 00 00 02 af 01
+ 15 01 00 00 00 00 02 a4 1e
+ 15 01 00 00 00 00 02 95 41
+ 15 01 00 00 00 00 02 96 03
+ 15 01 00 00 00 00 02 98 00
+ 15 01 00 00 00 00 02 9a 9a
+ 15 01 00 00 00 00 02 9b 03
+ 15 01 00 00 00 00 02 9d 80
+ 15 01 00 00 00 00 02 ff 26
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 fa d0
+ 15 01 00 00 00 00 02 6b 80
+ 15 01 00 00 00 00 02 6c 5c
+ 15 01 00 00 00 00 02 6d 0c
+ 15 01 00 00 00 00 02 6e 0e
+ 15 01 00 00 00 00 02 58 01
+ 15 01 00 00 00 00 02 59 15
+ 15 01 00 00 00 00 02 5a 01
+ 15 01 00 00 00 00 02 5b 00
+ 15 01 00 00 00 00 02 5c 01
+ 15 01 00 00 00 00 02 5d 2b
+ 15 01 00 00 00 00 02 74 00
+ 15 01 00 00 00 00 02 75 ba
+ 15 01 00 00 00 00 02 81 0a
+ 15 01 00 00 00 00 02 4e 81
+ 15 01 00 00 00 00 02 4f 83
+ 15 01 00 00 00 00 02 51 00
+ 15 01 00 00 00 00 02 53 4d
+ 15 01 00 00 00 00 02 54 03
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 b2 81
+ 15 01 00 00 00 00 02 62 28
+ 15 01 00 00 00 00 02 a2 09
+ 15 01 00 00 00 00 02 b3 01
+ 15 01 00 00 00 00 02 ed 00
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 01 11
+ 15 01 00 00 00 00 02 ff 20
+ 15 01 00 00 00 00 02 75 00
+ 15 01 00 00 00 00 02 76 71
+ 15 01 00 00 00 00 02 77 00
+ 15 01 00 00 00 00 02 78 84
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 7a a5
+ 15 01 00 00 00 00 02 7b 00
+ 15 01 00 00 00 00 02 7c bb
+ 15 01 00 00 00 00 02 7d 00
+ 15 01 00 00 00 00 02 7e ce
+ 15 01 00 00 00 00 02 7f 00
+ 15 01 00 00 00 00 02 80 e0
+ 15 01 00 00 00 00 02 81 00
+ 15 01 00 00 00 00 02 82 ef
+ 15 01 00 00 00 00 02 83 00
+ 15 01 00 00 00 00 02 84 ff
+ 15 01 00 00 00 00 02 85 01
+ 15 01 00 00 00 00 02 86 0b
+ 15 01 00 00 00 00 02 87 01
+ 15 01 00 00 00 00 02 88 38
+ 15 01 00 00 00 00 02 89 01
+ 15 01 00 00 00 00 02 8a 5b
+ 15 01 00 00 00 00 02 8b 01
+ 15 01 00 00 00 00 02 8c 95
+ 15 01 00 00 00 00 02 8d 01
+ 15 01 00 00 00 00 02 8e c4
+ 15 01 00 00 00 00 02 8f 02
+ 15 01 00 00 00 00 02 90 0d
+ 15 01 00 00 00 00 02 91 02
+ 15 01 00 00 00 00 02 92 4a
+ 15 01 00 00 00 00 02 93 02
+ 15 01 00 00 00 00 02 94 4c
+ 15 01 00 00 00 00 02 95 02
+ 15 01 00 00 00 00 02 96 85
+ 15 01 00 00 00 00 02 97 02
+ 15 01 00 00 00 00 02 98 c3
+ 15 01 00 00 00 00 02 99 02
+ 15 01 00 00 00 00 02 9a e9
+ 15 01 00 00 00 00 02 9b 03
+ 15 01 00 00 00 00 02 9c 16
+ 15 01 00 00 00 00 02 9d 03
+ 15 01 00 00 00 00 02 9e 34
+ 15 01 00 00 00 00 02 9f 03
+ 15 01 00 00 00 00 02 a0 56
+ 15 01 00 00 00 00 02 a2 03
+ 15 01 00 00 00 00 02 a3 62
+ 15 01 00 00 00 00 02 a4 03
+ 15 01 00 00 00 00 02 a5 6c
+ 15 01 00 00 00 00 02 a6 03
+ 15 01 00 00 00 00 02 a7 74
+ 15 01 00 00 00 00 02 a9 03
+ 15 01 00 00 00 00 02 aa 80
+ 15 01 00 00 00 00 02 ab 03
+ 15 01 00 00 00 00 02 ac 89
+ 15 01 00 00 00 00 02 ad 03
+ 15 01 00 00 00 00 02 ae 8b
+ 15 01 00 00 00 00 02 af 03
+ 15 01 00 00 00 00 02 b0 8d
+ 15 01 00 00 00 00 02 b1 03
+ 15 01 00 00 00 00 02 b2 8e
+ 15 01 00 00 00 00 02 b3 00
+ 15 01 00 00 00 00 02 b4 71
+ 15 01 00 00 00 00 02 b5 00
+ 15 01 00 00 00 00 02 b6 84
+ 15 01 00 00 00 00 02 b7 00
+ 15 01 00 00 00 00 02 b8 a5
+ 15 01 00 00 00 00 02 b9 00
+ 15 01 00 00 00 00 02 ba bb
+ 15 01 00 00 00 00 02 bb 00
+ 15 01 00 00 00 00 02 bc ce
+ 15 01 00 00 00 00 02 bd 00
+ 15 01 00 00 00 00 02 be e0
+ 15 01 00 00 00 00 02 bf 00
+ 15 01 00 00 00 00 02 c0 ef
+ 15 01 00 00 00 00 02 c1 00
+ 15 01 00 00 00 00 02 c2 ff
+ 15 01 00 00 00 00 02 c3 01
+ 15 01 00 00 00 00 02 c4 0b
+ 15 01 00 00 00 00 02 c5 01
+ 15 01 00 00 00 00 02 c6 38
+ 15 01 00 00 00 00 02 c7 01
+ 15 01 00 00 00 00 02 c8 5b
+ 15 01 00 00 00 00 02 c9 01
+ 15 01 00 00 00 00 02 ca 95
+ 15 01 00 00 00 00 02 cb 01
+ 15 01 00 00 00 00 02 cc c4
+ 15 01 00 00 00 00 02 cd 02
+ 15 01 00 00 00 00 02 ce 0d
+ 15 01 00 00 00 00 02 cf 02
+ 15 01 00 00 00 00 02 d0 4a
+ 15 01 00 00 00 00 02 d1 02
+ 15 01 00 00 00 00 02 d2 4c
+ 15 01 00 00 00 00 02 d3 02
+ 15 01 00 00 00 00 02 d4 85
+ 15 01 00 00 00 00 02 d5 02
+ 15 01 00 00 00 00 02 d6 c3
+ 15 01 00 00 00 00 02 d7 02
+ 15 01 00 00 00 00 02 d8 e9
+ 15 01 00 00 00 00 02 d9 03
+ 15 01 00 00 00 00 02 da 16
+ 15 01 00 00 00 00 02 db 03
+ 15 01 00 00 00 00 02 dc 34
+ 15 01 00 00 00 00 02 dd 03
+ 15 01 00 00 00 00 02 de 56
+ 15 01 00 00 00 00 02 df 03
+ 15 01 00 00 00 00 02 e0 62
+ 15 01 00 00 00 00 02 e1 03
+ 15 01 00 00 00 00 02 e2 6c
+ 15 01 00 00 00 00 02 e3 03
+ 15 01 00 00 00 00 02 e4 74
+ 15 01 00 00 00 00 02 e5 03
+ 15 01 00 00 00 00 02 e6 80
+ 15 01 00 00 00 00 02 e7 03
+ 15 01 00 00 00 00 02 e8 89
+ 15 01 00 00 00 00 02 e9 03
+ 15 01 00 00 00 00 02 ea 8b
+ 15 01 00 00 00 00 02 eb 03
+ 15 01 00 00 00 00 02 ec 8d
+ 15 01 00 00 00 00 02 ed 03
+ 15 01 00 00 00 00 02 ee 8e
+ 15 01 00 00 00 00 02 ef 00
+ 15 01 00 00 00 00 02 f0 71
+ 15 01 00 00 00 00 02 f1 00
+ 15 01 00 00 00 00 02 f2 84
+ 15 01 00 00 00 00 02 f3 00
+ 15 01 00 00 00 00 02 f4 a5
+ 15 01 00 00 00 00 02 f5 00
+ 15 01 00 00 00 00 02 f6 bb
+ 15 01 00 00 00 00 02 f7 00
+ 15 01 00 00 00 00 02 f8 ce
+ 15 01 00 00 00 00 02 f9 00
+ 15 01 00 00 00 00 02 fa e0
+ 15 01 00 00 00 00 02 ff 21
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 00 00
+ 15 01 00 00 00 00 02 01 ef
+ 15 01 00 00 00 00 02 02 00
+ 15 01 00 00 00 00 02 03 ff
+ 15 01 00 00 00 00 02 04 01
+ 15 01 00 00 00 00 02 05 0b
+ 15 01 00 00 00 00 02 06 01
+ 15 01 00 00 00 00 02 07 38
+ 15 01 00 00 00 00 02 08 01
+ 15 01 00 00 00 00 02 09 5b
+ 15 01 00 00 00 00 02 0a 01
+ 15 01 00 00 00 00 02 0b 95
+ 15 01 00 00 00 00 02 0c 01
+ 15 01 00 00 00 00 02 0d c4
+ 15 01 00 00 00 00 02 0e 02
+ 15 01 00 00 00 00 02 0f 0d
+ 15 01 00 00 00 00 02 10 02
+ 15 01 00 00 00 00 02 11 4a
+ 15 01 00 00 00 00 02 12 02
+ 15 01 00 00 00 00 02 13 4c
+ 15 01 00 00 00 00 02 14 02
+ 15 01 00 00 00 00 02 15 85
+ 15 01 00 00 00 00 02 16 02
+ 15 01 00 00 00 00 02 17 c3
+ 15 01 00 00 00 00 02 18 02
+ 15 01 00 00 00 00 02 19 e9
+ 15 01 00 00 00 00 02 1a 03
+ 15 01 00 00 00 00 02 1b 16
+ 15 01 00 00 00 00 02 1c 03
+ 15 01 00 00 00 00 02 1d 34
+ 15 01 00 00 00 00 02 1e 03
+ 15 01 00 00 00 00 02 1f 56
+ 15 01 00 00 00 00 02 20 03
+ 15 01 00 00 00 00 02 21 62
+ 15 01 00 00 00 00 02 22 03
+ 15 01 00 00 00 00 02 23 6c
+ 15 01 00 00 00 00 02 24 03
+ 15 01 00 00 00 00 02 25 74
+ 15 01 00 00 00 00 02 26 03
+ 15 01 00 00 00 00 02 27 80
+ 15 01 00 00 00 00 02 28 03
+ 15 01 00 00 00 00 02 29 89
+ 15 01 00 00 00 00 02 2a 03
+ 15 01 00 00 00 00 02 2b 8b
+ 15 01 00 00 00 00 02 2d 03
+ 15 01 00 00 00 00 02 2f 8d
+ 15 01 00 00 00 00 02 30 03
+ 15 01 00 00 00 00 02 31 8e
+ 15 01 00 00 00 00 02 32 00
+ 15 01 00 00 00 00 02 33 71
+ 15 01 00 00 00 00 02 34 00
+ 15 01 00 00 00 00 02 35 84
+ 15 01 00 00 00 00 02 36 00
+ 15 01 00 00 00 00 02 37 a5
+ 15 01 00 00 00 00 02 38 00
+ 15 01 00 00 00 00 02 39 bb
+ 15 01 00 00 00 00 02 3a 00
+ 15 01 00 00 00 00 02 3b ce
+ 15 01 00 00 00 00 02 3d 00
+ 15 01 00 00 00 00 02 3f e0
+ 15 01 00 00 00 00 02 40 00
+ 15 01 00 00 00 00 02 41 ef
+ 15 01 00 00 00 00 02 42 00
+ 15 01 00 00 00 00 02 43 ff
+ 15 01 00 00 00 00 02 44 01
+ 15 01 00 00 00 00 02 45 0b
+ 15 01 00 00 00 00 02 46 01
+ 15 01 00 00 00 00 02 47 38
+ 15 01 00 00 00 00 02 48 01
+ 15 01 00 00 00 00 02 49 5b
+ 15 01 00 00 00 00 02 4a 01
+ 15 01 00 00 00 00 02 4b 95
+ 15 01 00 00 00 00 02 4c 01
+ 15 01 00 00 00 00 02 4d c4
+ 15 01 00 00 00 00 02 4e 02
+ 15 01 00 00 00 00 02 4f 0d
+ 15 01 00 00 00 00 02 50 02
+ 15 01 00 00 00 00 02 51 4a
+ 15 01 00 00 00 00 02 52 02
+ 15 01 00 00 00 00 02 53 4c
+ 15 01 00 00 00 00 02 54 02
+ 15 01 00 00 00 00 02 55 85
+ 15 01 00 00 00 00 02 56 02
+ 15 01 00 00 00 00 02 58 c3
+ 15 01 00 00 00 00 02 59 02
+ 15 01 00 00 00 00 02 5a e9
+ 15 01 00 00 00 00 02 5b 03
+ 15 01 00 00 00 00 02 5c 16
+ 15 01 00 00 00 00 02 5d 03
+ 15 01 00 00 00 00 02 5e 34
+ 15 01 00 00 00 00 02 5f 03
+ 15 01 00 00 00 00 02 60 56
+ 15 01 00 00 00 00 02 61 03
+ 15 01 00 00 00 00 02 62 62
+ 15 01 00 00 00 00 02 63 03
+ 15 01 00 00 00 00 02 64 6c
+ 15 01 00 00 00 00 02 65 03
+ 15 01 00 00 00 00 02 66 74
+ 15 01 00 00 00 00 02 67 03
+ 15 01 00 00 00 00 02 68 80
+ 15 01 00 00 00 00 02 69 03
+ 15 01 00 00 00 00 02 6a 89
+ 15 01 00 00 00 00 02 6b 03
+ 15 01 00 00 00 00 02 6c 8b
+ 15 01 00 00 00 00 02 6d 03
+ 15 01 00 00 00 00 02 6e 8d
+ 15 01 00 00 00 00 02 6f 03
+ 15 01 00 00 00 00 02 70 8e
+ 15 01 00 00 00 00 02 71 00
+ 15 01 00 00 00 00 02 72 71
+ 15 01 00 00 00 00 02 73 00
+ 15 01 00 00 00 00 02 74 84
+ 15 01 00 00 00 00 02 75 00
+ 15 01 00 00 00 00 02 76 a5
+ 15 01 00 00 00 00 02 77 00
+ 15 01 00 00 00 00 02 78 bb
+ 15 01 00 00 00 00 02 79 00
+ 15 01 00 00 00 00 02 7a ce
+ 15 01 00 00 00 00 02 7b 00
+ 15 01 00 00 00 00 02 7c e0
+ 15 01 00 00 00 00 02 7d 00
+ 15 01 00 00 00 00 02 7e ef
+ 15 01 00 00 00 00 02 7f 00
+ 15 01 00 00 00 00 02 80 ff
+ 15 01 00 00 00 00 02 81 01
+ 15 01 00 00 00 00 02 82 0b
+ 15 01 00 00 00 00 02 83 01
+ 15 01 00 00 00 00 02 84 38
+ 15 01 00 00 00 00 02 85 01
+ 15 01 00 00 00 00 02 86 5b
+ 15 01 00 00 00 00 02 87 01
+ 15 01 00 00 00 00 02 88 95
+ 15 01 00 00 00 00 02 89 01
+ 15 01 00 00 00 00 02 8a c4
+ 15 01 00 00 00 00 02 8b 02
+ 15 01 00 00 00 00 02 8c 0d
+ 15 01 00 00 00 00 02 8d 02
+ 15 01 00 00 00 00 02 8e 4a
+ 15 01 00 00 00 00 02 8f 02
+ 15 01 00 00 00 00 02 90 4c
+ 15 01 00 00 00 00 02 91 02
+ 15 01 00 00 00 00 02 92 85
+ 15 01 00 00 00 00 02 93 02
+ 15 01 00 00 00 00 02 94 c3
+ 15 01 00 00 00 00 02 95 02
+ 15 01 00 00 00 00 02 96 e9
+ 15 01 00 00 00 00 02 97 03
+ 15 01 00 00 00 00 02 98 16
+ 15 01 00 00 00 00 02 99 03
+ 15 01 00 00 00 00 02 9a 34
+ 15 01 00 00 00 00 02 9b 03
+ 15 01 00 00 00 00 02 9c 56
+ 15 01 00 00 00 00 02 9d 03
+ 15 01 00 00 00 00 02 9e 62
+ 15 01 00 00 00 00 02 9f 03
+ 15 01 00 00 00 00 02 a0 6c
+ 15 01 00 00 00 00 02 a2 03
+ 15 01 00 00 00 00 02 a3 74
+ 15 01 00 00 00 00 02 a4 03
+ 15 01 00 00 00 00 02 a5 80
+ 15 01 00 00 00 00 02 a6 03
+ 15 01 00 00 00 00 02 a7 89
+ 15 01 00 00 00 00 02 a9 03
+ 15 01 00 00 00 00 02 aa 8b
+ 15 01 00 00 00 00 02 ab 03
+ 15 01 00 00 00 00 02 ac 8d
+ 15 01 00 00 00 00 02 ad 03
+ 15 01 00 00 00 00 02 ae 8e
+ 15 01 00 00 00 00 02 af 00
+ 15 01 00 00 00 00 02 b0 71
+ 15 01 00 00 00 00 02 b1 00
+ 15 01 00 00 00 00 02 b2 84
+ 15 01 00 00 00 00 02 b3 00
+ 15 01 00 00 00 00 02 b4 a5
+ 15 01 00 00 00 00 02 b5 00
+ 15 01 00 00 00 00 02 b6 bb
+ 15 01 00 00 00 00 02 b7 00
+ 15 01 00 00 00 00 02 b8 ce
+ 15 01 00 00 00 00 02 b9 00
+ 15 01 00 00 00 00 02 ba e0
+ 15 01 00 00 00 00 02 bb 00
+ 15 01 00 00 00 00 02 bc ef
+ 15 01 00 00 00 00 02 bd 00
+ 15 01 00 00 00 00 02 be ff
+ 15 01 00 00 00 00 02 bf 01
+ 15 01 00 00 00 00 02 c0 0b
+ 15 01 00 00 00 00 02 c1 01
+ 15 01 00 00 00 00 02 c2 38
+ 15 01 00 00 00 00 02 c3 01
+ 15 01 00 00 00 00 02 c4 5b
+ 15 01 00 00 00 00 02 c5 01
+ 15 01 00 00 00 00 02 c6 95
+ 15 01 00 00 00 00 02 c7 01
+ 15 01 00 00 00 00 02 c8 c4
+ 15 01 00 00 00 00 02 c9 02
+ 15 01 00 00 00 00 02 ca 0d
+ 15 01 00 00 00 00 02 cb 02
+ 15 01 00 00 00 00 02 cc 4a
+ 15 01 00 00 00 00 02 cd 02
+ 15 01 00 00 00 00 02 ce 4c
+ 15 01 00 00 00 00 02 cf 02
+ 15 01 00 00 00 00 02 d0 85
+ 15 01 00 00 00 00 02 d1 02
+ 15 01 00 00 00 00 02 d2 c3
+ 15 01 00 00 00 00 02 d3 02
+ 15 01 00 00 00 00 02 d4 e9
+ 15 01 00 00 00 00 02 d5 03
+ 15 01 00 00 00 00 02 d6 16
+ 15 01 00 00 00 00 02 d7 03
+ 15 01 00 00 00 00 02 d8 34
+ 15 01 00 00 00 00 02 d9 03
+ 15 01 00 00 00 00 02 da 56
+ 15 01 00 00 00 00 02 db 03
+ 15 01 00 00 00 00 02 dc 62
+ 15 01 00 00 00 00 02 dd 03
+ 15 01 00 00 00 00 02 de 6c
+ 15 01 00 00 00 00 02 df 03
+ 15 01 00 00 00 00 02 e0 74
+ 15 01 00 00 00 00 02 e1 03
+ 15 01 00 00 00 00 02 e2 80
+ 15 01 00 00 00 00 02 e3 03
+ 15 01 00 00 00 00 02 e4 89
+ 15 01 00 00 00 00 02 e5 03
+ 15 01 00 00 00 00 02 e6 8b
+ 15 01 00 00 00 00 02 e7 03
+ 15 01 00 00 00 00 02 e8 8d
+ 15 01 00 00 00 00 02 e9 03
+ 15 01 00 00 00 00 02 ea 8e
+ 15 01 00 00 00 00 02 FF 10
+ 05 01 00 00 00 00 01 29];
+ qcom,mdss-dsi-off-command =
+ [15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 10 00 01 28
+ 15 01 00 00 00 00 02 b0 00
+ 05 01 00 00 40 00 01 10
+ 15 01 00 00 00 00 02 4f 01];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 1f08294..50da1bf 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -14,27 +14,12 @@
dsi_sim_cmd: qcom,mdss_dsi_sim_cmd{
qcom,mdss-dsi-panel-name = "Simulator cmd mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <640>;
- qcom,mdss-dsi-panel-height = <480>;
- qcom,mdss-dsi-h-front-porch = <20>;
- qcom,mdss-dsi-h-back-porch = <20>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <16>;
- qcom,mdss-dsi-v-front-porch = <4>;
- qcom,mdss-dsi-v-pulse-width = <1>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -42,11 +27,6 @@
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
- qcom,mdss-dsi-hor-line-idle = <0 40 256>,
- <40 120 128>,
- <120 240 64>;
- qcom,mdss-dsi-panel-timings = [cd 32 22 00 60 64 26 34 29 03
- 04 00];
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-t-clk-post = <0x03>;
qcom,mdss-dsi-t-clk-pre = <0x27>;
@@ -65,33 +45,60 @@
17000 15500 30000 8000 3000>;
qcom,mdss-dsi-panel-peak-brightness = <4200000>;
qcom,mdss-dsi-panel-blackness-level = <3230>;
- qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
- 05 01 00 00 0a 00 01 00
- /* Soft reset, wait 10ms */
- 15 01 00 00 0a 00 02 3a 77
- /* Set Pixel format (24 bpp) */
- 39 01 00 00 0a 00 05 2a 00 00 04 ff
- /* Set Column address */
- 39 01 00 00 0a 00 05 2b 00 00 05 9f
- /* Set page address */
- 15 01 00 00 0a 00 02 35 00
- /* Set tear on */
- 39 01 00 00 0a 00 03 44 00 00
- /* Set tear scan line */
- 15 01 00 00 0a 00 02 51 ff
- /* write display brightness */
- 15 01 00 00 0a 00 02 53 24
- /* write control brightness */
- 15 01 00 00 0a 00 02 55 00
- /* CABC brightness */
- 05 01 00 00 78 00 01 11
- /* exit sleep mode, wait 120ms */
- 05 01 00 00 10 00 01 29];
- /* Set display on, wait 16ms */
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,panel-ack-disabled;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <640>;
+ qcom,mdss-dsi-panel-height = <480>;
+ qcom,mdss-dsi-h-front-porch = <20>;
+ qcom,mdss-dsi-h-back-porch = <20>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <16>;
+ qcom,mdss-dsi-v-front-porch = <4>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-hor-line-idle = <0 40 256>,
+ <40 120 128>,
+ <120 240 64>;
+ qcom,mdss-dsi-panel-timings =
+ [cd 32 22 00 60 64 26 34 29 03 04 00];
+ qcom,mdss-dsi-on-command =
+ [29 01 00 00 00 00 02 b0 03
+ 05 01 00 00 0a 00 01 00
+ /* Soft reset, wait 10ms */
+ 15 01 00 00 0a 00 02 3a 77
+ /* Set Pixel format (24 bpp) */
+ 39 01 00 00 0a 00 05 2a 00 00 04 ff
+ /* Set Column address */
+ 39 01 00 00 0a 00 05 2b 00 00 05 9f
+ /* Set page address */
+ 15 01 00 00 0a 00 02 35 00
+ /* Set tear on */
+ 39 01 00 00 0a 00 03 44 00 00
+ /* Set tear scan line */
+ 15 01 00 00 0a 00 02 51 ff
+ /* write display brightness */
+ 15 01 00 00 0a 00 02 53 24
+ /* write control brightness */
+ 15 01 00 00 0a 00 02 55 00
+ /* CABC brightness */
+ 05 01 00 00 78 00 01 11
+ /* exit sleep mode, wait 120ms */
+ 05 01 00 00 10 00 01 29];
+ /* Set display on, wait 16ms */
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 36f36fb..a93deb5 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -14,27 +14,12 @@
dsi_dual_sim_cmd: qcom,mdss_dsi_dual_sim_cmd {
qcom,mdss-dsi-panel-name = "Sim dual cmd mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1280>;
- qcom,mdss-dsi-panel-height = <1440>;
- qcom,mdss-dsi-h-front-porch = <120>;
- qcom,mdss-dsi-h-back-porch = <44>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <4>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <4>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -57,33 +42,55 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-wd;
qcom,mdss-dsi-te-using-te-pin;
- qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
- 05 01 00 00 0a 00 01 00
- /* Soft reset, wait 10ms */
- 15 01 00 00 0a 00 02 3a 77
- /* Set Pixel format (24 bpp) */
- 39 01 00 00 0a 00 05 2a 00 00 04 ff
- /* Set Column address */
- 39 01 00 00 0a 00 05 2b 00 00 05 9f
- /* Set page address */
- 15 01 00 00 0a 00 02 35 00
- /* Set tear on */
- 39 01 00 00 0a 00 03 44 00 00
- /* Set tear scan line */
- 15 01 00 00 0a 00 02 51 ff
- /* write display brightness */
- 15 01 00 00 0a 00 02 53 24
- /* write control brightness */
- 15 01 00 00 0a 00 02 55 00
- /* CABC brightness */
- 05 01 00 00 78 00 01 11
- /* exit sleep mode, wait 120ms */
- 05 01 00 00 10 00 01 29];
- /* Set display on, wait 16ms */
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,panel-ack-disabled;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1280>;
+ qcom,mdss-dsi-panel-height = <1440>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <44>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <4>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <4>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command =
+ [29 01 00 00 00 00 02 b0 03
+ 05 01 00 00 0a 00 01 00
+ /* Soft reset, wait 10ms */
+ 15 01 00 00 0a 00 02 3a 77
+ /* Set Pixel format (24 bpp) */
+ 39 01 00 00 0a 00 05 2a 00 00 04 ff
+ /* Set Column address */
+ 39 01 00 00 0a 00 05 2b 00 00 05 9f
+ /* Set page address */
+ 15 01 00 00 0a 00 02 35 00
+ /* Set tear on */
+ 39 01 00 00 0a 00 03 44 00 00
+ /* Set tear scan line */
+ 15 01 00 00 0a 00 02 51 ff
+ /* write display brightness */
+ 15 01 00 00 0a 00 02 53 24
+ /* write control brightness */
+ 15 01 00 00 0a 00 02 55 00
+ /* CABC brightness */
+ 05 01 00 00 78 00 01 11
+ /* exit sleep mode, wait 120ms */
+ 05 01 00 00 10 00 01 29];
+ /* Set display on, wait 16ms */
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
index cca28c7..dbfedb9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
@@ -14,26 +14,11 @@
dsi_dual_sim_vid: qcom,mdss_dsi_dual_sim_video {
qcom,mdss-dsi-panel-name = "Sim dual video mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <1280>;
- qcom,mdss-dsi-panel-height = <1440>;
- qcom,mdss-dsi-h-front-porch = <120>;
- qcom,mdss-dsi-h-back-porch = <44>;
- qcom,mdss-dsi-h-pulse-width = <16>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <4>;
- qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <4>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -45,11 +30,32 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
- qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
qcom,panel-ack-disabled;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1280>;
+ qcom,mdss-dsi-panel-height = <1440>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <44>;
+ qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <4>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <4>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
index 98a1f61..40bedd0 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
@@ -14,22 +14,8 @@
dsi_sim_vid: qcom,mdss_dsi_sim_video {
qcom,mdss-dsi-panel-name = "Simulator video mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
- qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-virtual-channel-id = <0>;
qcom,mdss-dsi-stream = <0>;
- qcom,mdss-dsi-panel-width = <640>;
- qcom,mdss-dsi-panel-height = <480>;
- qcom,mdss-dsi-h-front-porch = <8>;
- qcom,mdss-dsi-h-back-porch = <8>;
- qcom,mdss-dsi-h-pulse-width = <8>;
- qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <6>;
- qcom,mdss-dsi-v-front-porch = <6>;
- qcom,mdss-dsi-v-pulse-width = <2>;
- qcom,mdss-dsi-h-left-border = <0>;
- qcom,mdss-dsi-h-right-border = <0>;
- qcom,mdss-dsi-v-top-border = <0>;
- qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
@@ -38,11 +24,6 @@
17000 15500 30000 8000 3000>;
qcom,mdss-dsi-panel-peak-brightness = <4200000>;
qcom,mdss-dsi-panel-blackness-level = <3230>;
- qcom,mdss-dsi-on-command = [32 01 00 00 00 00 02 00 00];
- qcom,mdss-dsi-off-command = [22 01 00 00 00 00 02 00 00];
- qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
@@ -50,13 +31,39 @@
qcom,mdss-dsi-lane-1-state;
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
- qcom,mdss-dsi-panel-timings =
- [00 00 00 00 00 00 00 00 00 00 00 00];
qcom,mdss-dsi-t-clk-post = <0x04>;
qcom,mdss-dsi-t-clk-pre = <0x1b>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 0>, <0 0>, <1 0>;
qcom,panel-ack-disabled;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <640>;
+ qcom,mdss-dsi-panel-height = <480>;
+ qcom,mdss-dsi-h-front-porch = <8>;
+ qcom,mdss-dsi-h-back-porch = <8>;
+ qcom,mdss-dsi-h-pulse-width = <8>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <6>;
+ qcom,mdss-dsi-v-front-porch = <6>;
+ qcom,mdss-dsi-v-pulse-width = <2>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-timings =
+ [00 00 00 00 00 00 00 00 00 00 00 00];
+ qcom,mdss-dsi-on-command =
+ [32 01 00 00 00 00 02 00 00];
+ qcom,mdss-dsi-off-command =
+ [22 01 00 00 00 00 02 00 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 1a2ca5b..56e74be 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -34,14 +34,10 @@
<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
- clock-names = "gcc_ddrss_gpu_axi_clk",
- "gcc_gpu_memnoc_gfx_clk",
- "gpu_cc_ahb_clk",
- "gpu_cc_cx_gmu_clk";
- clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
- <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
- <&clock_gpucc GPU_CC_AHB_CLK>,
- <&clock_gpucc GPU_CC_CX_GMU_CLK>;
+ clock-names = "gcc_gpu_memnoc_gfx_clk",
+ "gpu_cc_ahb_clk";
+ clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>;
attach-impl-defs =
<0x6000 0x2378>,
<0x6060 0x1055>,
@@ -65,6 +61,7 @@
#iommu-cells = <2>;
qcom,skip-init;
qcom,use-3-lvl-tables;
+ qcom,no-asid-retention;
#global-interrupts = <1>;
#size-cells = <1>;
#address-cells = <1>;
@@ -339,10 +336,10 @@
apps_iommu_coherent_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to QUP1-DMA. We can't use a fake SID for
+ * This SID belongs to TSIF. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x3 0>;
+ iommus = <&apps_smmu 0x20 0>;
dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index bc0b118..0d2f9e8 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -85,7 +85,7 @@
compatible = "qcom,msm-pcm-loopback";
};
- qcom,msm-dai-mi2s {
+ msm_dai_mi2s: qcom,msm-dai-mi2s {
compatible = "qcom,msm-dai-mi2s";
dai_mi2s0: qcom,msm-dai-q6-mi2s-prim {
compatible = "qcom,msm-dai-q6-mi2s";
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
index 2194a42..dcc646c93b 100644
--- a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
@@ -196,28 +196,15 @@
hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
qcom,no-status-check-on-disable;
qcom,gds-timeout = <500>;
+ qcom,clk-dis-wait-val = <8>;
status = "disabled";
};
- gpu_gx_domain_addr: syscon@0x5091508 {
- compatible = "syscon";
- reg = <0x5091508 0x4>;
- };
-
- gpu_gx_sw_reset: syscon@0x5091008 {
- compatible = "syscon";
- reg = <0x5091008 0x4>;
- };
-
gpu_gx_gdsc: qcom,gdsc@0x509100c {
compatible = "qcom,gdsc";
regulator-name = "gpu_gx_gdsc";
reg = <0x509100c 0x4>;
- domain-addr = <&gpu_gx_domain_addr>;
- sw-reset = <&gpu_gx_sw_reset>;
- qcom,reset-aon-logic;
qcom,poll-cfg-gdscr;
- qcom,toggle-sw-collapse-in-disable;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
new file mode 100644
index 0000000..d9d1be4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi
@@ -0,0 +1,106 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_2_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+ gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_2_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_2_out";
+ gpios = <&smp2pgpio_rdbg_2_out 0 0>;
+ };
+
+ smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_1_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_1_in";
+ gpios = <&smp2pgpio_rdbg_1_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_1_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_1_out";
+ gpios = <&smp2pgpio_rdbg_1_out 0 0>;
+ };
+
+ smp2pgpio_rdbg_5_in: qcom,smp2pgpio-rdbg-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_in";
+ gpios = <&smp2pgpio_rdbg_5_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_5_out: qcom,smp2pgpio-rdbg-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_5_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_5_out";
+ gpios = <&smp2pgpio_rdbg_5_out 0 0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 4abf260..5d71f2d 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -488,6 +488,7 @@
qcom,adc_tm-vadc = <&pm660_vadc>;
qcom,decimation = <0>;
qcom,fast-avg-setup = <0>;
+ #thermal-sensor-cells = <1>;
chan@83 {
label = "vph_pwr";
@@ -531,6 +532,28 @@
qcom,btm-channel-number = <0x78>;
qcom,thermal-node;
};
+
+ chan@4e {
+ label = "emmc_therm";
+ reg = <0x4e>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x80>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "pa_therm0";
+ reg = <0x4f>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,btm-channel-number = <0x88>;
+ qcom,vadc-thermal-node;
+ };
};
pm660_rradc: rradc@4500 {
@@ -631,3 +654,80 @@
#size-cells = <0>;
};
};
+
+&thermal_zones {
+ xo-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4c>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ msm-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4d>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ emmc-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4e>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ pa-therm0-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x4f>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ quiet-therm-adc {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm660_adc_tm 0x51>;
+ thermal-governor = "user_space";
+
+ trips {
+ active-config0 {
+ temperature = <65000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
index 0f18ba5..075eaef2 100644
--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -250,9 +250,8 @@
<0xd900 0x100>;
reg-names = "qpnp-wled-ctrl-base",
"qpnp-wled-sink-base";
- interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
- <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "ovp-irq", "sc-irq";
+ interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ovp-irq";
linux,name = "wled";
linux,default-trigger = "bkl-trigger";
qcom,fdbk-output = "auto";
@@ -268,9 +267,9 @@
qcom,fs-curr-ua = <25000>;
qcom,cons-sync-write-delay-us = <1000>;
qcom,led-strings-list = [00 01 02];
- qcom,en-ext-pfet-sc-pro;
qcom,loop-auto-gm-en;
qcom,pmic-revid = <&pm660l_revid>;
+ qcom,auto-calibration-enable;
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 71eee1f..12b469c 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -97,6 +97,7 @@
qcom,boost-threshold-ua = <100000>;
qcom,wipower-max-uw = <5000000>;
+ dpdm-supply = <&qusb_phy0>;
qcom,thermal-mitigation
= <3000000 1500000 1000000 500000>;
@@ -436,7 +437,6 @@
qcom,max-current = <12>;
qcom,default-state = "off";
linux,name = "red";
- linux,default-trigger = "battery-charging";
};
green_led: qcom,rgb_1 {
@@ -448,7 +448,6 @@
qcom,max-current = <12>;
qcom,default-state = "off";
linux,name = "green";
- linux,default-trigger = "battery-full";
};
blue_led: qcom,rgb_2 {
@@ -460,7 +459,6 @@
qcom,max-current = <12>;
qcom,default-state = "off";
linux,name = "blue";
- linux,default-trigger = "boot-indication";
};
};
@@ -565,6 +563,7 @@
qcom,en-ext-pfet-sc-pro;
qcom,pmic-revid = <&pmi8998_revid>;
qcom,loop-auto-gm-en;
+ qcom,auto-calibration-enable;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
new file mode 100644
index 0000000..3bd0350
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm-audio-lpass.dtsi"
+#include "sdm670-wsa881x.dtsi"
+#include "sdm670-wcd.dtsi"
+#include "sdm670-lpi.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&msm_audio_ion {
+ iommus = <&apps_smmu 0x1801 0x0>;
+ qcom,smmu-sid-mask = /bits/ 64 <0xf>;
+};
+
+&soc {
+ qcom,avtimer@62cf700c {
+ compatible = "qcom,avtimer";
+ reg = <0x62cf700c 0x4>,
+ <0x62cf7010 0x4>;
+ reg-names = "avtimer_lsb_addr", "avtimer_msb_addr";
+ qcom,clk-div = <192>;
+ qcom,clk-mult = <10>;
+ };
+
+ tavil_snd: sound-tavil {
+ status = "disabled";
+ compatible = "qcom,sdm670-asoc-snd-tavil";
+ qcom,model = "sdm670-tavil-snd-card";
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "hifi amp", "LINEOUT1",
+ "hifi amp", "LINEOUT2",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,hph-en0-gpio = <&tavil_hph_en0>;
+ qcom,hph-en1-gpio = <&tavil_hph_en1>;
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+ <&pcm_noirq>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-cpe-lsm",
+ "msm-compr-dsp", "msm-pcm-dsp-noirq";
+ asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ <&dai_mi2s2>, <&dai_mi2s3>,
+ <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+ <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+ <&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>,
+ <&sb_6_rx>, <&sb_7_rx>, <&sb_7_tx>,
+ <&sb_8_rx>, <&sb_8_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+ asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+ "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.16394", "msm-dai-q6-dev.16395",
+ "msm-dai-q6-dev.16396",
+ "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399",
+ "msm-dai-q6-dev.16400", "msm-dai-q6-dev.16401",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+ <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+ };
+
+int_codec: sound {
+ status = "okay";
+ compatible = "qcom,sdm670-asoc-snd";
+ qcom,model = "sdm670-snd-card";
+ qcom,wcn-btfm;
+ qcom,mi2s-audio-intf;
+ qcom,auxpcm-audio-intf;
+ qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
+ qcom,msm-mclk-freq = <9600000>;
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,msm-micbias2-ext-cap;
+ qcom,msm-hs-micbias-type = "external";
+ qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+ qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+ qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS External2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS External2",
+ "AMIC3", "MIC BIAS External",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC2", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic2",
+ "DMIC3", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic3",
+ "DMIC4", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic4",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&compr>,
+ <&pcm_noirq>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-compr-dsp",
+ "msm-pcm-dsp-noirq";
+ asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ <&dai_mi2s2>, <&dai_mi2s3>,
+ <&dai_int_mi2s0>, <&dai_int_mi2s1>,
+ <&dai_int_mi2s2>, <&dai_int_mi2s3>,
+ <&dai_int_mi2s4>, <&dai_int_mi2s5>,
+ <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
+ <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>, <&sb_7_rx>, <&sb_7_tx>,
+ <&sb_8_tx>, <&sb_8_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>,
+ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
+ <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
+ <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+ asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+ "msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
+ "msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
+ "msm-dai-q6-mi2s.11", "msm-dai-q6-mi2s.12",
+ "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
+ "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770",
+ "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399",
+ "msm-dai-q6-dev.16401", "msm-dai-q6-dev.16400",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673",
+ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
+ "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
+ "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
+ asoc-codec = <&stub_codec>, <&msm_digital_codec>,
+ <&pmic_analog_codec>, <&msm_sdw_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
+ "analog-codec", "msm_sdw_codec";
+
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
+ <&wsa881x_213_en>, <&wsa881x_214_en>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+ };
+
+ cdc_pdm_gpios: cdc_pdm_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_pdm_clk_active &cdc_pdm_sync_active
+ &cdc_pdm_rx0_active &cdc_pdm_rx1_2_active
+ &cdc_pdm_2_gpios_active>;
+ pinctrl-1 = <&cdc_pdm_clk_sleep &cdc_pdm_sync_sleep
+ &cdc_pdm_rx0_sleep &cdc_pdm_rx1_2_sleep
+ &cdc_pdm_2_gpios_sleep>;
+ qcom,lpi-gpios;
+ };
+
+ cdc_comp_gpios: cdc_comp_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_rx0_comp_active &cdc_rx1_comp_active>;
+ pinctrl-1 = <&cdc_rx0_comp_sleep &cdc_rx1_comp_sleep>;
+ qcom,lpi-gpios;
+ };
+
+ cdc_dmic_gpios: cdc_dmic_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_dmic12_gpios_active
+ &cdc_dmic34_gpios_active>;
+ pinctrl-1 = <&cdc_dmic12_gpios_sleep
+ &cdc_dmic34_gpios_sleep>;
+ qcom,lpi-gpios;
+ };
+
+ cdc_sdw_gpios: sdw_clk_data_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sdw_clk_active &sdw_data_active>;
+ pinctrl-1 = <&sdw_clk_sleep &sdw_data_sleep>;
+ };
+
+ wsa_spkr_en1: wsa_spkr_en1_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_sd_n_active>;
+ pinctrl-1 = <&spkr_1_sd_n_sleep>;
+ };
+
+ wsa_spkr_en2: wsa_spkr_en2_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_sd_n_active>;
+ pinctrl-1 = <&spkr_2_sd_n_sleep>;
+ };
+
+ msm_sdw_codec: msm-sdw-codec@62ec1000 {
+ status = "okay";
+ compatible = "qcom,msm-sdw-codec";
+ reg = <0x62ec1000 0x0>;
+ interrupts = <0 161 0>;
+ interrupt-names = "swr_master_irq";
+ qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_211_en: wsa881x_en@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ };
+
+ wsa881x_212_en: wsa881x_en@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ };
+
+ wsa881x_213_en: wsa881x_en@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ };
+
+ wsa881x_214_en: wsa881x_en@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ };
+ };
+ };
+
+ wcd9xxx_intc: wcd9xxx-irq {
+ status = "disabled";
+ compatible = "qcom,wcd9xxx-irq";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tlmm>;
+ qcom,gpio-connect = <&tlmm 80 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_intr_default>;
+ };
+
+ clock_audio_lnbb: audio_ext_clk_lnbb {
+ status = "disabled";
+ compatible = "qcom,audio-ref-clk";
+ clock-names = "osr_clk";
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
+ qcom,node_has_rpm_clock;
+ #clock-cells = <1>;
+ };
+
+ wcd_rst_gpio: msm_cdc_pinctrl@64 {
+ status = "disabled";
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&lpi_cdc_reset_active>;
+ pinctrl-1 = <&lpi_cdc_reset_sleep>;
+ qcom,lpi-gpios;
+ };
+
+ cpe: qcom,msm-cpe-lsm {
+ compatible = "qcom,msm-cpe-lsm";
+ };
+
+ cpe3: qcom,msm-cpe-lsm@3 {
+ compatible = "qcom,msm-cpe-lsm";
+ qcom,msm-cpe-lsm-id = <3>;
+ };
+
+ wdsp_mgr: qcom,wcd-dsp-mgr {
+ compatible = "qcom,wcd-dsp-mgr";
+ qcom,wdsp-components = <&wcd934x_cdc 0>,
+ <&wcd_spi_0 1>,
+ <&glink_spi_xprt_wdsp 2>;
+ qcom,img-filename = "cpe_9340";
+ };
+
+ wdsp_glink: qcom,wcd-dsp-glink {
+ compatible = "qcom,wcd-dsp-glink";
+ };
+};
+
+&slim_aud {
+ status = "disabled";
+ dai_slim: msm_dai_slim {
+ status = "disabled";
+ compatible = "qcom,msm-dai-slim";
+ elemental-addr = [ff ff ff fe 17 02];
+ };
+
+ wcd934x_cdc: tavil_codec {
+ status = "disabled";
+ compatible = "qcom,tavil-slim-pgd";
+ elemental-addr = [00 01 50 02 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30 31>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk";
+ clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
+
+ cdc-vdd-mic-bias-supply = <&pm660l_bob>;
+ qcom,cdc-vdd-mic-bias-voltage = <3300000 3300000>;
+ qcom,cdc-vdd-mic-bias-current = <30400>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-mic-bias";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+ qcom,cdc-slim-ifd = "tavil-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+
+ qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+ wcd_spi_0: wcd_spi {
+ compatible = "qcom,wcd-spi-v2";
+ qcom,master-bus-num = <8>;
+ qcom,chip-select = <0>;
+ qcom,max-frequency = <24000000>;
+ qcom,mem-base-addr = <0x100000>;
+ };
+
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+ };
+ };
+};
+
+&msm_dai_mi2s {
+ dai_int_mi2s0: qcom,msm-dai-q6-int-mi2s0 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <7>;
+ qcom,msm-mi2s-rx-lines = <3>;
+ qcom,msm-mi2s-tx-lines = <0>;
+ };
+
+ dai_int_mi2s1: qcom,msm-dai-q6-int-mi2s1 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <8>;
+ qcom,msm-mi2s-rx-lines = <3>;
+ qcom,msm-mi2s-tx-lines = <0>;
+ };
+
+ dai_int_mi2s2: qcom,msm-dai-q6-int-mi2s2 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <9>;
+ qcom,msm-mi2s-rx-lines = <0>;
+ qcom,msm-mi2s-tx-lines = <3>;
+ };
+
+ dai_int_mi2s3: qcom,msm-dai-q6-int-mi2s3 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <10>;
+ qcom,msm-mi2s-rx-lines = <0>;
+ qcom,msm-mi2s-tx-lines = <3>;
+ };
+
+ dai_int_mi2s4: qcom,msm-dai-q6-int-mi2s4 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <11>;
+ qcom,msm-mi2s-rx-lines = <3>;
+ qcom,msm-mi2s-tx-lines = <0>;
+ };
+
+ dai_int_mi2s5: qcom,msm-dai-q6-int-mi2s5 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <12>;
+ qcom,msm-mi2s-rx-lines = <0>;
+ qcom,msm-mi2s-tx-lines = <3>;
+ };
+
+ dai_int_mi2s6: qcom,msm-dai-q6-int-mi2s6 {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <13>;
+ qcom,msm-mi2s-rx-lines = <0>;
+ qcom,msm-mi2s-tx-lines = <3>;
+ };
+};
+
+&pm660l_3 {
+ pmic_analog_codec: analog-codec@f000 {
+ status = "okay";
+ compatible = "qcom,pmic-analog-codec";
+ reg = <0xf000 0x200>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x3 0xf0 0x0 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x1 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x2 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x3 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x4 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x5 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x6 IRQ_TYPE_NONE>,
+ <0x3 0xf0 0x7 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x0 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x1 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x2 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x3 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x4 IRQ_TYPE_NONE>,
+ <0x3 0xf1 0x5 IRQ_TYPE_NONE>;
+ interrupt-names = "spk_cnp_int",
+ "spk_clip_int",
+ "spk_ocp_int",
+ "ins_rem_det1",
+ "but_rel_det",
+ "but_press_det",
+ "ins_rem_det",
+ "mbhc_int",
+ "ear_ocp_int",
+ "hphr_ocp_int",
+ "hphl_ocp_det",
+ "ear_cnp_int",
+ "hphr_cnp_int",
+ "hphl_cnp_int";
+
+
+ cdc-vdda-cp-supply = <&pm660_s4>;
+ qcom,cdc-vdda-cp-voltage = <1900000 2050000>;
+ qcom,cdc-vdda-cp-current = <50000>;
+
+ cdc-vdd-pa-supply = <&pm660_s4>;
+ qcom,cdc-vdd-pa-voltage = <2040000 2040000>;
+ qcom,cdc-vdd-pa-current = <260000>;
+
+ cdc-vdd-mic-bias-supply = <&pm660l_l7>;
+ qcom,cdc-vdd-mic-bias-voltage = <3088000 3088000>;
+ qcom,cdc-vdd-mic-bias-current = <5000>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+
+ qcom,cdc-static-supplies = "cdc-vdda-cp",
+ "cdc-vdd-pa";
+
+ qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+
+ /*
+ * Not marking address @ as driver searches this child
+ * with name msm-dig-codec
+ */
+ msm_digital_codec: msm-dig-codec {
+ compatible = "qcom,msm-digital-codec";
+ reg = <0x62ec0000 0x0>;
+ };
+ };
+};
+
+&pm660_gpios {
+ gpio@c200 {
+ status = "ok";
+ qcom,mode = <1>;
+ qcom,pull = <4>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <2>;
+ qcom,master-en = <1>;
+ qcom,out-strength = <2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 6ea92ee..0cf48a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -9,3 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+&qupv3_se9_2uart {
+ status = "disabled";
+};
+
+&qupv3_se12_2uart {
+ status = "ok";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "disabled";
+};
+
+&qupv3_se6_4uart {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
new file mode 100644
index 0000000..6e92f0e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-lpi.dtsi
@@ -0,0 +1,284 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ lpi_tlmm: lpi_pinctrl@62b40000 {
+ compatible = "qcom,lpi-pinctrl";
+ reg = <0x62b40000 0x0>;
+ qcom,num-gpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ cdc_pdm_clk_active: cdc_pdm_clk_active {
+ mux {
+ pins = "gpio18";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
+ cdc_pdm_clk_sleep: cdc_pdm_clk_sleep {
+ mux {
+ pins = "gpio18";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_pdm_sync_active: cdc_pdm_sync_active {
+ mux {
+ pins = "gpio19";
+ function = "func3";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
+ cdc_pdm_sync_sleep: cdc_pdm_sync_sleep {
+ mux {
+ pins = "gpio19";
+ function = "func3";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_pdm_rx0_active: cdc_pdm_rx0_active {
+ mux {
+ pins = "gpio21";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio21";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
+ cdc_pdm_rx0_sleep: cdc_pdm_rx0_sleep {
+ mux {
+ pins = "gpio21";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio21";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_pdm_rx1_2_active: cdc_pdm_rx1_2_active {
+ mux {
+ pins = "gpio23", "gpio25";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio23", "gpio25";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
+ cdc_pdm_rx1_2_sleep: cdc_pdm_rx1_2_sleep {
+ mux {
+ pins = "gpio23", "gpio25";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio23", "gpio25";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_pdm_2_gpios_active: cdc_pdm_2_gpios_active {
+ mux {
+ pins = "gpio20";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio20";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_pdm_2_gpios_sleep: cdc_pdm_2_gpios_sleep {
+ mux {
+ pins = "gpio20";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio20";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc_rx0_comp_active: cdc_pdm_rx0_comp_active {
+ mux {
+ pins = "gpio22";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio22";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_rx0_comp_sleep: cdc_pdm_rx0_comp_sleep {
+ mux {
+ pins = "gpio22";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio22";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc_rx1_comp_active: cdc_pdm_rx1_comp_active {
+ mux {
+ pins = "gpio24";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio24";
+ drive-strength = <8>;
+ };
+ };
+
+ cdc_rx1_comp_sleep: cdc_pdm_rx1_comp_sleep {
+ mux {
+ pins = "gpio24";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio24";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ lpi_cdc_reset_active: lpi_cdc_reset_active {
+ mux {
+ pins = "gpio29";
+ function = "func2";
+ };
+ config {
+ pins = "gpio29";
+ drive-strength = <16>;
+ output-high;
+ };
+ };
+
+ lpi_cdc_reset_sleep: lpi_cdc_reset_sleep {
+ mux {
+ pins = "gpio29";
+ function = "func2";
+ };
+
+ config {
+ pins = "gpio29";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_dmic12_gpios_active: dmic12_gpios_active {
+ mux {
+ pins = "gpio26", "gpio28";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio26", "gpio28";
+ drive-strength = <8>;
+ output-high;
+ };
+ };
+
+ cdc_dmic12_gpios_sleep: dmic12_gpios_sleep {
+ mux {
+ pins = "gpio26", "gpio28";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio26", "gpio28";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_dmic34_gpios_active: dmic34_gpios_active {
+ mux {
+ pins = "gpio27", "gpio29";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio27", "gpio29";
+ drive-strength = <8>;
+ input-enable;
+ };
+ };
+
+ cdc_dmic34_gpios_sleep: dmic34_gpios_sleep {
+ mux {
+ pins = "gpio27", "gpio29";
+ function = "func1";
+ };
+
+ config {
+ pins = "gpio27", "gpio29";
+ drive-strength = <2>;
+ pull-down;
+ input-enable;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 6ea92ee..0cf48a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -9,3 +9,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+&qupv3_se9_2uart {
+ status = "disabled";
+};
+
+&qupv3_se12_2uart {
+ status = "ok";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "disabled";
+};
+
+&qupv3_se6_4uart {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 09ce9d2..73df253 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -19,5 +19,1423 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ interrupt-parent = <&pdc>;
+
+ /* QUPv3 South SE mappings */
+ /* SE 0 pin mappings */
+ qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
+ qupv3_se0_i2c_active: qupv3_se0_i2c_active {
+ mux {
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ };
+
+ config {
+ pins = "gpio0", "gpio1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se0_i2c_sleep: qupv3_se0_i2c_sleep {
+ mux {
+ pins = "gpio0", "gpio1";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio0", "gpio1";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se0_spi_pins: qupv3_se0_spi_pins {
+ qupv3_se0_spi_active: qupv3_se0_spi_active {
+ mux {
+ pins = "gpio0", "gpio1", "gpio2",
+ "gpio3";
+ function = "qup0";
+ };
+
+ config {
+ pins = "gpio0", "gpio1", "gpio2",
+ "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se0_spi_sleep: qupv3_se0_spi_sleep {
+ mux {
+ pins = "gpio0", "gpio1", "gpio2",
+ "gpio3";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio0", "gpio1", "gpio2",
+ "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 1 pin mappings */
+ qupv3_se1_i2c_pins: qupv3_se1_i2c_pins {
+ qupv3_se1_i2c_active: qupv3_se1_i2c_active {
+ mux {
+ pins = "gpio17", "gpio18";
+ function = "qup1";
+ };
+
+ config {
+ pins = "gpio17", "gpio18";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se1_i2c_sleep: qupv3_se1_i2c_sleep {
+ mux {
+ pins = "gpio17", "gpio18";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio17", "gpio18";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se1_spi_pins: qupv3_se1_spi_pins {
+ qupv3_se1_spi_active: qupv3_se1_spi_active {
+ mux {
+ pins = "gpio17", "gpio18", "gpio19",
+ "gpio20";
+ function = "qup1";
+ };
+
+ config {
+ pins = "gpio17", "gpio18", "gpio19",
+ "gpio20";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se1_spi_sleep: qupv3_se1_spi_sleep {
+ mux {
+ pins = "gpio17", "gpio18", "gpio19",
+ "gpio20";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio17", "gpio18", "gpio19",
+ "gpio20";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 2 pin mappings */
+ qupv3_se2_i2c_pins: qupv3_se2_i2c_pins {
+ qupv3_se2_i2c_active: qupv3_se2_i2c_active {
+ mux {
+ pins = "gpio27", "gpio28";
+ function = "qup2";
+ };
+
+ config {
+ pins = "gpio27", "gpio28";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se2_i2c_sleep: qupv3_se2_i2c_sleep {
+ mux {
+ pins = "gpio27", "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio28";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se2_spi_pins: qupv3_se2_spi_pins {
+ qupv3_se2_spi_active: qupv3_se2_spi_active {
+ mux {
+ pins = "gpio27", "gpio28", "gpio29",
+ "gpio30";
+ function = "qup2";
+ };
+
+ config {
+ pins = "gpio27", "gpio28", "gpio29",
+ "gpio30";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se2_spi_sleep: qupv3_se2_spi_sleep {
+ mux {
+ pins = "gpio27", "gpio28", "gpio29",
+ "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio28", "gpio29",
+ "gpio30";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 3 pin mappings */
+ qupv3_se3_i2c_pins: qupv3_se3_i2c_pins {
+ qupv3_se3_i2c_active: qupv3_se3_i2c_active {
+ mux {
+ pins = "gpio41", "gpio42";
+ function = "qup3";
+ };
+
+ config {
+ pins = "gpio41", "gpio42";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se3_i2c_sleep: qupv3_se3_i2c_sleep {
+ mux {
+ pins = "gpio41", "gpio42";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se3_spi_pins: qupv3_se3_spi_pins {
+ qupv3_se3_spi_active: qupv3_se3_spi_active {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ function = "qup3";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se3_spi_sleep: qupv3_se3_spi_sleep {
+ mux {
+ pins = "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 4 pin mappings */
+ qupv3_se4_i2c_pins: qupv3_se4_i2c_pins {
+ qupv3_se4_i2c_active: qupv3_se4_i2c_active {
+ mux {
+ pins = "gpio89", "gpio90";
+ function = "qup4";
+ };
+
+ config {
+ pins = "gpio89", "gpio90";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se4_i2c_sleep: qupv3_se4_i2c_sleep {
+ mux {
+ pins = "gpio89", "gpio90";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89", "gpio90";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se4_spi_pins: qupv3_se4_spi_pins {
+ qupv3_se4_spi_active: qupv3_se4_spi_active {
+ mux {
+ pins = "gpio89", "gpio90", "gpio91",
+ "gpio92";
+ function = "qup4";
+ };
+
+ config {
+ pins = "gpio89", "gpio90", "gpio91",
+ "gpio92";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se4_spi_sleep: qupv3_se4_spi_sleep {
+ mux {
+ pins = "gpio89", "gpio90", "gpio91",
+ "gpio92";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio89", "gpio90", "gpio91",
+ "gpio92";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 5 pin mappings */
+ qupv3_se5_i2c_pins: qupv3_se5_i2c_pins {
+ qupv3_se5_i2c_active: qupv3_se5_i2c_active {
+ mux {
+ pins = "gpio85", "gpio86";
+ function = "qup5";
+ };
+
+ config {
+ pins = "gpio85", "gpio86";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se5_i2c_sleep: qupv3_se5_i2c_sleep {
+ mux {
+ pins = "gpio85", "gpio86";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio85", "gpio86";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se5_spi_pins: qupv3_se5_spi_pins {
+ qupv3_se5_spi_active: qupv3_se5_spi_active {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87",
+ "gpio88";
+ function = "qup5";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87",
+ "gpio88";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se5_spi_sleep: qupv3_se5_spi_sleep {
+ mux {
+ pins = "gpio85", "gpio86", "gpio87",
+ "gpio88";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio85", "gpio86", "gpio87",
+ "gpio88";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 6 pin mappings */
+ qupv3_se6_i2c_pins: qupv3_se6_i2c_pins {
+ qupv3_se6_i2c_active: qupv3_se6_i2c_active {
+ mux {
+ pins = "gpio45", "gpio46";
+ function = "qup6";
+ };
+
+ config {
+ pins = "gpio45", "gpio46";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se6_i2c_sleep: qupv3_se6_i2c_sleep {
+ mux {
+ pins = "gpio45", "gpio46";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45", "gpio46";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se6_4uart_pins: qupv3_se6_4uart_pins {
+ qupv3_se6_4uart_active: qupv3_se6_4uart_active {
+ mux {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ function = "qup6";
+ };
+
+ config {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se6_4uart_sleep: qupv3_se6_4uart_sleep {
+ mux {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ qupv3_se6_spi_pins: qupv3_se6_spi_pins {
+ qupv3_se6_spi_active: qupv3_se6_spi_active {
+ mux {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ function = "qup6";
+ };
+
+ config {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se6_spi_sleep: qupv3_se6_spi_sleep {
+ mux {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45", "gpio46", "gpio47",
+ "gpio48";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 7 pin mappings */
+ qupv3_se7_i2c_pins: qupv3_se7_i2c_pins {
+ qupv3_se7_i2c_active: qupv3_se7_i2c_active {
+ mux {
+ pins = "gpio93", "gpio94";
+ function = "qup7";
+ };
+
+ config {
+ pins = "gpio93", "gpio94";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se7_i2c_sleep: qupv3_se7_i2c_sleep {
+ mux {
+ pins = "gpio93", "gpio94";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio93", "gpio94";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se7_4uart_pins: qupv3_se7_4uart_pins {
+ qupv3_se7_4uart_active: qupv3_se7_4uart_active {
+ mux {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ function = "qup7";
+ };
+
+ config {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se7_4uart_sleep: qupv3_se7_4uart_sleep {
+ mux {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ qupv3_se7_spi_pins: qupv3_se7_spi_pins {
+ qupv3_se7_spi_active: qupv3_se7_spi_active {
+ mux {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ function = "qup7";
+ };
+
+ config {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se7_spi_sleep: qupv3_se7_spi_sleep {
+ mux {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio93", "gpio94", "gpio95",
+ "gpio96";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* QUPv3 North instances */
+ /* SE 8 pin mappings */
+ qupv3_se8_i2c_pins: qupv3_se8_i2c_pins {
+ qupv3_se8_i2c_active: qupv3_se8_i2c_active {
+ mux {
+ pins = "gpio65", "gpio66";
+ function = "qup8";
+ };
+
+ config {
+ pins = "gpio65", "gpio66";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se8_i2c_sleep: qupv3_se8_i2c_sleep {
+ mux {
+ pins = "gpio65", "gpio66";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65", "gpio66";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se8_spi_pins: qupv3_se8_spi_pins {
+ qupv3_se8_spi_active: qupv3_se8_spi_active {
+ mux {
+ pins = "gpio65", "gpio66", "gpio67",
+ "gpio68";
+ function = "qup8";
+ };
+
+ config {
+ pins = "gpio65", "gpio66", "gpio67",
+ "gpio68";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se8_spi_sleep: qupv3_se8_spi_sleep {
+ mux {
+ pins = "gpio65", "gpio66", "gpio67",
+ "gpio68";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio65", "gpio66", "gpio67",
+ "gpio68";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 9 pin mappings */
+ qupv3_se9_i2c_pins: qupv3_se9_i2c_pins {
+ qupv3_se9_i2c_active: qupv3_se9_i2c_active {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se9_i2c_sleep: qupv3_se9_i2c_sleep {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se9_2uart_pins: qupv3_se9_2uart_pins {
+ qupv3_se9_2uart_active: qupv3_se9_2uart_active {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se9_2uart_sleep: qupv3_se9_2uart_sleep {
+ mux {
+ pins = "gpio4", "gpio5";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ qupv3_se9_spi_pins: qupv3_se9_spi_pins {
+ qupv3_se9_spi_active: qupv3_se9_spi_active {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6",
+ "gpio7";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6",
+ "gpio7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se9_spi_sleep: qupv3_se9_spi_sleep {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6",
+ "gpio7";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6",
+ "gpio7";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 10 pin mappings */
+ qupv3_se10_i2c_pins: qupv3_se10_i2c_pins {
+ qupv3_se10_i2c_active: qupv3_se10_i2c_active {
+ mux {
+ pins = "gpio55", "gpio56";
+ function = "qup10";
+ };
+
+ config {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se10_i2c_sleep: qupv3_se10_i2c_sleep {
+ mux {
+ pins = "gpio55", "gpio56";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se10_2uart_pins: qupv3_se10_2uart_pins {
+ qupv3_se10_2uart_active: qupv3_se10_2uart_active {
+ mux {
+ pins = "gpio53", "gpio54";
+ function = "qup10";
+ };
+
+ config {
+ pins = "gpio53", "gpio54";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se10_2uart_sleep: qupv3_se10_2uart_sleep {
+ mux {
+ pins = "gpio53", "gpio54";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio53", "gpio54";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ qupv3_se10_spi_pins: qupv3_se10_spi_pins {
+ qupv3_se10_spi_active: qupv3_se10_spi_active {
+ mux {
+ pins = "gpio53", "gpio54", "gpio55",
+ "gpio56";
+ function = "qup10";
+ };
+
+ config {
+ pins = "gpio53", "gpio54", "gpio55",
+ "gpio56";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se10_spi_sleep: qupv3_se10_spi_sleep {
+ mux {
+ pins = "gpio53", "gpio54", "gpio55",
+ "gpio56";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio53", "gpio54", "gpio55",
+ "gpio56";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 11 pin mappings */
+ qupv3_se11_i2c_pins: qupv3_se11_i2c_pins {
+ qupv3_se11_i2c_active: qupv3_se11_i2c_active {
+ mux {
+ pins = "gpio31", "gpio32";
+ function = "qup11";
+ };
+
+ config {
+ pins = "gpio31", "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se11_i2c_sleep: qupv3_se11_i2c_sleep {
+ mux {
+ pins = "gpio31", "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio31", "gpio32";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se11_spi_pins: qupv3_se11_spi_pins {
+ qupv3_se11_spi_active: qupv3_se11_spi_active {
+ mux {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ function = "qup11";
+ };
+
+ config {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se11_spi_sleep: qupv3_se11_spi_sleep {
+ mux {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 12 pin mappings */
+ qupv3_se12_i2c_pins: qupv3_se12_i2c_pins {
+ qupv3_se12_i2c_active: qupv3_se12_i2c_active {
+ mux {
+ pins = "gpio49", "gpio50";
+ function = "qup12";
+ };
+
+ config {
+ pins = "gpio49", "gpio50";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se12_i2c_sleep: qupv3_se12_i2c_sleep {
+ mux {
+ pins = "gpio49", "gpio50";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio49", "gpio50";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se12_2uart_pins: qupv3_se12_2uart_pins {
+ qupv3_se12_2uart_active: qupv3_se12_2uart_active {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se12_2uart_sleep: qupv3_se12_2uart_sleep {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ qupv3_se12_spi_pins: qupv3_se12_spi_pins {
+ qupv3_se12_spi_active: qupv3_se12_spi_active {
+ mux {
+ pins = "gpio49", "gpio50", "gpio51",
+ "gpio52";
+ function = "qup12";
+ };
+
+ config {
+ pins = "gpio49", "gpio50", "gpio51",
+ "gpio52";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se12_spi_sleep: qupv3_se12_spi_sleep {
+ mux {
+ pins = "gpio49", "gpio50", "gpio51",
+ "gpio52";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio49", "gpio50", "gpio51",
+ "gpio52";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 13 pin mappings */
+ qupv3_se13_i2c_pins: qupv3_se13_i2c_pins {
+ qupv3_se13_i2c_active: qupv3_se13_i2c_active {
+ mux {
+ pins = "gpio105", "gpio106";
+ function = "qup13";
+ };
+
+ config {
+ pins = "gpio105", "gpio106";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se13_i2c_sleep: qupv3_se13_i2c_sleep {
+ mux {
+ pins = "gpio105", "gpio106";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio105", "gpio106";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se13_spi_pins: qupv3_se13_spi_pins {
+ qupv3_se13_spi_active: qupv3_se13_spi_active {
+ mux {
+ pins = "gpio105", "gpio106", "gpio107",
+ "gpio108";
+ function = "qup13";
+ };
+
+ config {
+ pins = "gpio105", "gpio106", "gpio107",
+ "gpio108";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se13_spi_sleep: qupv3_se13_spi_sleep {
+ mux {
+ pins = "gpio105", "gpio106", "gpio107",
+ "gpio108";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio105", "gpio106", "gpio107",
+ "gpio108";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 14 pin mappings */
+ qupv3_se14_i2c_pins: qupv3_se14_i2c_pins {
+ qupv3_se14_i2c_active: qupv3_se14_i2c_active {
+ mux {
+ pins = "gpio33", "gpio34";
+ function = "qup14";
+ };
+
+ config {
+ pins = "gpio33", "gpio34";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se14_i2c_sleep: qupv3_se14_i2c_sleep {
+ mux {
+ pins = "gpio33", "gpio34";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio33", "gpio34";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se14_spi_pins: qupv3_se14_spi_pins {
+ qupv3_se14_spi_active: qupv3_se14_spi_active {
+ mux {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ function = "qup14";
+ };
+
+ config {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se14_spi_sleep: qupv3_se14_spi_sleep {
+ mux {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio31", "gpio32", "gpio33",
+ "gpio34";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* SE 15 pin mappings */
+ qupv3_se15_i2c_pins: qupv3_se15_i2c_pins {
+ qupv3_se15_i2c_active: qupv3_se15_i2c_active {
+ mux {
+ pins = "gpio81", "gpio82";
+ function = "qup15";
+ };
+
+ config {
+ pins = "gpio81", "gpio82";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se15_i2c_sleep: qupv3_se15_i2c_sleep {
+ mux {
+ pins = "gpio81", "gpio82";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio81", "gpio82";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qupv3_se15_spi_pins: qupv3_se15_spi_pins {
+ qupv3_se15_spi_active: qupv3_se15_spi_active {
+ mux {
+ pins = "gpio81", "gpio82", "gpio83",
+ "gpio84";
+ function = "qup15";
+ };
+
+ config {
+ pins = "gpio81", "gpio82", "gpio83",
+ "gpio84";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ qupv3_se15_spi_sleep: qupv3_se15_spi_sleep {
+ mux {
+ pins = "gpio81", "gpio82", "gpio83",
+ "gpio84";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio81", "gpio82", "gpio83",
+ "gpio84";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+ };
+ /* SDC pin type */
+ sdc1_clk_on: sdc1_clk_on {
+ config {
+ pins = "sdc1_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc1_clk_off: sdc1_clk_off {
+ config {
+ pins = "sdc1_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc1_cmd_on: sdc1_cmd_on {
+ config {
+ pins = "sdc1_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc1_cmd_off: sdc1_cmd_off {
+ config {
+ pins = "sdc1_cmd";
+ num-grp-pins = <1>;
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc1_data_on: sdc1_data_on {
+ config {
+ pins = "sdc1_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc1_data_off: sdc1_data_off {
+ config {
+ pins = "sdc1_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio49";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio49";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ sdw_clk_pin {
+ sdw_clk_sleep: sdw_clk_sleep {
+ mux {
+ pins = "gpio65";
+ function = "wsa_clk";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <2>;
+ bias-bus-hold;
+ };
+ };
+
+ sdw_clk_active: sdw_clk_active {
+ mux {
+ pins = "gpio65";
+ function = "wsa_clk";
+ };
+
+ config {
+ pins = "gpio65";
+ drive-strength = <2>;
+ bias-bus-hold;
+ };
+ };
+ };
+
+ sdw_data_pin {
+ sdw_data_sleep: sdw_data_sleep {
+ mux {
+ pins = "gpio66";
+ function = "wsa_data";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <4>;
+ bias-bus-hold;
+ };
+ };
+
+ sdw_data_active: sdw_data_active {
+ mux {
+ pins = "gpio66";
+ function = "wsa_data";
+ };
+
+ config {
+ pins = "gpio66";
+ drive-strength = <4>;
+ bias-bus-hold;
+ };
+ };
+ };
+
+ /* WSA speaker reset pins */
+ spkr_1_sd_n {
+ spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ spkr_1_sd_n_active: spkr_1_sd_n_active {
+ mux {
+ pins = "gpio67";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio67";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ spkr_2_sd_n {
+ spkr_2_sd_n_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ input-enable;
+ };
+ };
+
+ spkr_2_sd_n_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio68";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio68";
+ drive-strength = <16>; /* 16 mA */
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_gnd_mic_swap {
+ wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+ mux {
+ pins = "gpio40";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio40";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+ mux {
+ pins = "gpio40";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio40";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd9xxx_intr {
+ wcd_intr_default: wcd_intr_default{
+ mux {
+ pins = "gpio80";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-enable;
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
new file mode 100644
index 0000000..f03d9c2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -0,0 +1,206 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,lpm-levels {
+ compatible = "qcom,lpm-levels";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,pm-cluster@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ label = "L3";
+ qcom,psci-mode-shift = <4>;
+ qcom,psci-mode-mask = <0xfff>;
+
+ qcom,pm-cluster-level@0 { /* D1 */
+ reg = <0>;
+ label = "l3-wfi";
+ qcom,psci-mode = <0x1>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <452>;
+ qcom,energy-overhead = <69355>;
+ qcom,time-overhead = <99>;
+ };
+
+ qcom,pm-cluster-level@1 { /* D2 */
+ reg = <1>;
+ label = "l3-dyn-ret";
+ qcom,psci-mode = <0x2>;
+ qcom,latency-us = <659>;
+ qcom,ss-power = <434>;
+ qcom,energy-overhead = <465725>;
+ qcom,time-overhead = <976>;
+ qcom,min-child-idx = <1>;
+ };
+
+ qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
+ reg = <2>;
+ label = "l3-pc";
+ qcom,psci-mode = <0x4>;
+ qcom,latency-us = <3201>;
+ qcom,ss-power = <408>;
+ qcom,energy-overhead = <2421840>;
+ qcom,time-overhead = <5376>;
+ qcom,min-child-idx = <2>;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cluster-level@3 { /* Cx off */
+ reg = <3>;
+ label = "cx-off";
+ qcom,psci-mode = <0x224>;
+ qcom,latency-us = <5562>;
+ qcom,ss-power = <308>;
+ qcom,energy-overhead = <2521840>;
+ qcom,time-overhead = <6376>;
+ qcom,min-child-idx = <3>;
+ qcom,is-reset;
+ qcom,notify-rpm;
+ };
+
+ qcom,pm-cluster-level@4 { /* AOSS sleep */
+ reg = <4>;
+ label = "llcc-off";
+ qcom,psci-mode = <0xC24>;
+ qcom,latency-us = <6562>;
+ qcom,ss-power = <108>;
+ qcom,energy-overhead = <2621840>;
+ qcom,time-overhead = <7376>;
+ qcom,min-child-idx = <3>;
+ qcom,is-reset;
+ qcom,notify-rpm;
+ };
+
+ qcom,pm-cpu@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4
+ &CPU5>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,latency-us = <119>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <461>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <531>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+
+ qcom,pm-cpu@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU6 &CPU7>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,latency-us = <116>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <621>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <1061>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+ };
+ };
+
+ qcom,rpm-stats@c300000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
+ reg-names = "phys_addr_base", "offset_addr";
+ };
+
+ pdc: interrupt-controller@b220000{
+ compatible = "qcom,pdc-sdm670";
+ reg = <0xb220000 0x400>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
new file mode 100644
index 0000000..657363f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -0,0 +1,716 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+ /* QUPv3 South instances */
+ qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+ compatible = "qcom,qupv3-geni-se";
+ reg = <0x8c0000 0x6000>;
+ qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_1>;
+ qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,iommu-s1-bypass;
+
+ iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb {
+ compatible = "qcom,qupv3-geni-se-cb";
+ iommus = <&apps_smmu 0x003 0x0>;
+ };
+ };
+
+ /*
+ * HS UART instances. HS UART usecases can be supported on these
+ * instances only.
+ */
+ qupv3_se6_4uart: qcom,qup_uart@0x898000 {
+ compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+ reg = <0x898000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se6_4uart_active>;
+ pinctrl-1 = <&qupv3_se6_4uart_sleep>;
+ interrupts-extended = <&pdc GIC_SPI 607 0>,
+ <&tlmm 48 0>;
+ status = "disabled";
+ qcom,wakeup-byte = <0xFD>;
+ qcom,wrapper-core = <&qupv3_0>;
+ };
+
+ qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
+ compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+ reg = <0x89c000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se7_4uart_active>;
+ pinctrl-1 = <&qupv3_se7_4uart_sleep>;
+ interrupts-extended = <&pdc GIC_SPI 608 0>,
+ <&tlmm 96 0>;
+ status = "disabled";
+ qcom,wakeup-byte = <0xFD>;
+ qcom,wrapper-core = <&qupv3_0>;
+ };
+
+ /* I2C */
+ qupv3_se0_i2c: i2c@880000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x880000 0x4000>;
+ interrupts = <GIC_SPI 601 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se0_i2c_active>;
+ pinctrl-1 = <&qupv3_se0_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se1_i2c: i2c@884000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x884000 0x4000>;
+ interrupts = <GIC_SPI 602 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se1_i2c_active>;
+ pinctrl-1 = <&qupv3_se1_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se2_i2c: i2c@888000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x888000 0x4000>;
+ interrupts = <GIC_SPI 603 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se2_i2c_active>;
+ pinctrl-1 = <&qupv3_se2_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se3_i2c: i2c@88c000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x88c000 0x4000>;
+ interrupts = <GIC_SPI 604 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se3_i2c_active>;
+ pinctrl-1 = <&qupv3_se3_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se4_i2c: i2c@890000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x890000 0x4000>;
+ interrupts = <GIC_SPI 605 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se4_i2c_active>;
+ pinctrl-1 = <&qupv3_se4_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se5_i2c: i2c@894000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x894000 0x4000>;
+ interrupts = <GIC_SPI 606 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se5_i2c_active>;
+ pinctrl-1 = <&qupv3_se5_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se6_i2c: i2c@898000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x898000 0x4000>;
+ interrupts = <GIC_SPI 607 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se6_i2c_active>;
+ pinctrl-1 = <&qupv3_se6_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se7_i2c: i2c@89c000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0x89c000 0x4000>;
+ interrupts = <GIC_SPI 608 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se7_i2c_active>;
+ pinctrl-1 = <&qupv3_se7_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ /* SPI */
+ qupv3_se0_spi: spi@880000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x880000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se0_spi_active>;
+ pinctrl-1 = <&qupv3_se0_spi_sleep>;
+ interrupts = <GIC_SPI 601 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se1_spi: spi@884000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x884000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se1_spi_active>;
+ pinctrl-1 = <&qupv3_se1_spi_sleep>;
+ interrupts = <GIC_SPI 602 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se2_spi: spi@888000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x888000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se2_spi_active>;
+ pinctrl-1 = <&qupv3_se2_spi_sleep>;
+ interrupts = <GIC_SPI 603 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se3_spi: spi@88c000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x88c000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se3_spi_active>;
+ pinctrl-1 = <&qupv3_se3_spi_sleep>;
+ interrupts = <GIC_SPI 604 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se4_spi: spi@890000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x890000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se4_spi_active>;
+ pinctrl-1 = <&qupv3_se4_spi_sleep>;
+ interrupts = <GIC_SPI 605 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se5_spi: spi@894000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x894000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se5_spi_active>;
+ pinctrl-1 = <&qupv3_se5_spi_sleep>;
+ interrupts = <GIC_SPI 606 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se6_spi: spi@898000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x898000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se6_spi_active>;
+ pinctrl-1 = <&qupv3_se6_spi_sleep>;
+ interrupts = <GIC_SPI 607 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ qupv3_se7_spi: spi@89c000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x89c000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se7_spi_active>;
+ pinctrl-1 = <&qupv3_se7_spi_sleep>;
+ interrupts = <GIC_SPI 608 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
+ /* QUPv3 North Instances */
+ qupv3_1: qcom,qupv3_1_geni_se@ac0000 {
+ compatible = "qcom,qupv3-geni-se";
+ reg = <0xac0000 0x6000>;
+ qcom,bus-mas-id = <MSM_BUS_MASTER_BLSP_2>;
+ qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,iommu-s1-bypass;
+
+ iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb {
+ compatible = "qcom,qupv3-geni-se-cb";
+ iommus = <&apps_smmu 0x6c3 0x0>;
+ };
+ };
+
+ /* 2-wire UART */
+
+ /* Debug UART Instance for CDP/MTP platform */
+ qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
+ compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+ reg = <0xa84000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se9_2uart_active>;
+ pinctrl-1 = <&qupv3_se9_2uart_sleep>;
+ interrupts = <GIC_SPI 354 0>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ /* Debug UART Instance for RUMI platform */
+ qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
+ compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+ reg = <0xa88000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se10_2uart_active>;
+ pinctrl-1 = <&qupv3_se10_2uart_sleep>;
+ interrupts = <GIC_SPI 355 0>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ /* Debug UART Instance for CDP/MTP platform on SDM670 */
+ qupv3_se12_2uart: qcom,qup_uart@0xa90000 {
+ compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+ reg = <0xa90000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se12_2uart_active>;
+ pinctrl-1 = <&qupv3_se12_2uart_sleep>;
+ interrupts = <GIC_SPI 357 0>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ /* I2C */
+ qupv3_se8_i2c: i2c@a80000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa80000 0x4000>;
+ interrupts = <GIC_SPI 353 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se8_i2c_active>;
+ pinctrl-1 = <&qupv3_se8_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se9_i2c: i2c@a84000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa84000 0x4000>;
+ interrupts = <GIC_SPI 354 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se9_i2c_active>;
+ pinctrl-1 = <&qupv3_se9_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se10_i2c: i2c@a88000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa88000 0x4000>;
+ interrupts = <GIC_SPI 355 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se10_i2c_active>;
+ pinctrl-1 = <&qupv3_se10_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se11_i2c: i2c@a8c000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa8c000 0x4000>;
+ interrupts = <GIC_SPI 356 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se11_i2c_active>;
+ pinctrl-1 = <&qupv3_se11_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se12_i2c: i2c@a90000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa90000 0x4000>;
+ interrupts = <GIC_SPI 357 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se12_i2c_active>;
+ pinctrl-1 = <&qupv3_se12_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se13_i2c: i2c@a94000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa94000 0x4000>;
+ interrupts = <GIC_SPI 358 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se13_i2c_active>;
+ pinctrl-1 = <&qupv3_se13_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se14_i2c: i2c@a98000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa98000 0x4000>;
+ interrupts = <GIC_SPI 359 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se14_i2c_active>;
+ pinctrl-1 = <&qupv3_se14_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se15_i2c: i2c@a9c000 {
+ compatible = "qcom,i2c-geni";
+ reg = <0xa9c000 0x4000>;
+ interrupts = <GIC_SPI 360 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se15_i2c_active>;
+ pinctrl-1 = <&qupv3_se15_i2c_sleep>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ /* SPI */
+ qupv3_se8_spi: spi@a80000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa80000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se8_spi_active>;
+ pinctrl-1 = <&qupv3_se8_spi_sleep>;
+ interrupts = <GIC_SPI 353 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se9_spi: spi@a84000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa84000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se9_spi_active>;
+ pinctrl-1 = <&qupv3_se9_spi_sleep>;
+ interrupts = <GIC_SPI 354 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se10_spi: spi@a88000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa88000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se10_spi_active>;
+ pinctrl-1 = <&qupv3_se10_spi_sleep>;
+ interrupts = <GIC_SPI 355 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se11_spi: spi@a8c000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa8c000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se11_spi_active>;
+ pinctrl-1 = <&qupv3_se11_spi_sleep>;
+ interrupts = <GIC_SPI 356 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se12_spi: spi@a90000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa90000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se12_spi_active>;
+ pinctrl-1 = <&qupv3_se12_spi_sleep>;
+ interrupts = <GIC_SPI 357 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se13_spi: spi@a94000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa94000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se13_spi_active>;
+ pinctrl-1 = <&qupv3_se13_spi_sleep>;
+ interrupts = <GIC_SPI 358 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se14_spi: spi@a98000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa98000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se14_spi_active>;
+ pinctrl-1 = <&qupv3_se14_spi_sleep>;
+ interrupts = <GIC_SPI 359 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+
+ qupv3_se15_spi: spi@a9c000 {
+ compatible = "qcom,spi-geni";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa9c000 0x4000>;
+ reg-names = "se_phys";
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se15_spi_active>;
+ pinctrl-1 = <&qupv3_se15_spi_sleep>;
+ interrupts = <GIC_SPI 360 0>;
+ spi-max-frequency = <50000000>;
+ qcom,wrapper-core = <&qupv3_1>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index b0c436f..0a8c49f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -15,331 +15,6 @@
/* Stub regulators */
/ {
- pm660_s4: regulator-pm660-s4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_s4";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <2040000>;
- regulator-max-microvolt = <2040000>;
- };
-
- /* pm660 S5 - VDD_MODEM supply */
- pm660_s5_level: regulator-pm660-s5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_s5_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660_s6: regulator-pm660-s6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_s6";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <1352000>;
- regulator-max-microvolt = <1352000>;
- };
-
- /* pm660l S1 - VDD_MX supply */
- pm660l_s1_level: regulator-pm660l-s1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s1_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_s1_floor_level: regulator-pm660l-s1-floor-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s1_floor_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_s1_level_ao: regulator-pm660l-s1-level-ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s1_level_ao";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- /* pm660l S2 - VDD_GFX supply */
- pm660l_s2_level: regulator-pm660l-s2 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s2_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- /* pm660l S3 + S4 - VDD_CX supply */
- pm660l_s3_level: regulator-pm660l-s3-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s3_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_s3_floor_level: regulator-pm660l-s3-floor-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_s3_floor_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_s3_level_ao: regulator-pm660l-s3-level-ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_s3_level_ao";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660_l1: regulator-pm660-l1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l1";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1250000>;
- };
-
- pm660_l2: regulator-pm660-l2 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l2";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- };
-
- pm660_l3: regulator-pm660-l3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l3";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
- };
-
- pm660_l5: regulator-pm660-l5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l5";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
- };
-
- pm660_l6: regulator-pm660-l6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l6";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1304000>;
- regulator-max-microvolt = <1304000>;
- };
-
- pm660_l7: regulator-pm660-l7 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l7";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- };
-
- pm660_l8: regulator-pm660-l8 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l8";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l9: regulator-pm660-l9 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l9";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l10: regulator-pm660-l10 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l10";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l11: regulator-pm660-l11 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l11";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l12: regulator-pm660-l12 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l12";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l13: regulator-pm660-l13 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l13";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l14: regulator-pm660-l14 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l14";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
- pm660_l15: regulator-pm660-l15 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l15";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- pm660_l16: regulator-pm660-l16 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l16";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- };
-
- pm660_l17: regulator-pm660-l17 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l17";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2950000>;
- };
-
- pm660_l19: regulator-pm660-l19 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660_l19";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3312000>;
- regulator-max-microvolt = <3312000>;
- };
-
- pm660l_l1: regulator-pm660l-l1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l1";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <880000>;
- regulator-max-microvolt = <900000>;
- };
-
- pm660l_l2: regulator-pm660l-l2 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l2";
- qcom,hpm-min-load = <5000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2960000>;
- };
-
- pm660l_l3: regulator-pm660l-l3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l3";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2850000>;
- regulator-max-microvolt = <3008000>;
- };
-
- pm660l_l4: regulator-pm660l-l4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l4";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2960000>;
- regulator-max-microvolt = <2960000>;
- };
-
- pm660l_l5: regulator-pm660l-l5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l5";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <2960000>;
- regulator-max-microvolt = <2960000>;
- };
-
- pm660l_l6: regulator-pm660l-l6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l6";
- qcom,hpm-min-load = <5000>;
- regulator-min-microvolt = <3008000>;
- regulator-max-microvolt = <3300000>;
- };
-
- pm660l_l7: regulator-pm660l-l7 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l7";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3088000>;
- regulator-max-microvolt = <3100000>;
- };
-
- pm660l_l8: regulator-pm660l-l8 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l8";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3312000>;
- };
-
- /* pm660l L9 = VDD_LPI_CX supply */
- pm660l_l9_level: regulator-pm660l-l9-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l9_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_l9_floor_level: regulator-pm660l-l9-floor-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l9_floor_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- /* pm660l L10 = VDD_LPI_MX supply */
- pm660l_l10_level: regulator-pm660l-l10-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l10_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_l10_floor_level: regulator-pm660l-l10-floor-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_l10_floor_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
- };
-
- pm660l_bob: regulator-pm660l-bob {
- compatible = "qcom,stub-regulator";
- regulator-name = "pm660l_bob";
- regulator-min-microvolt = <3312000>;
- regulator-max-microvolt = <3312000>;
- };
-
apc0_pwrcl_vreg: regulator-pwrcl {
compatible = "qcom,stub-regulator";
regulator-name = "apc0_pwrcl_corner";
@@ -362,6 +37,598 @@
};
};
+&soc {
+ /* RPMh regulators: */
+ rpmh-regulator-smpa4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "smpa4";
+ pm660_s4: regulator-pm660-s4 {
+ regulator-name = "pm660_s4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2040000>;
+ regulator-max-microvolt = <2040000>;
+ qcom,init-voltage = <2040000>;
+ };
+ };
+
+ /* pm660 S5 - VDD_MODEM supply */
+ rpmh-regulator-modemlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mss.lvl";
+ pm660_s5_level: regulator-pm660-s5 {
+ regulator-name = "pm660_s5_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+ };
+
+ rpmh-regulator-smpa6 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "smpa6";
+ pm660_s6: regulator-pm660-s6 {
+ regulator-name = "pm660_s6";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ qcom,init-voltage = <1352000>;
+ };
+ };
+
+ /* pm660l S1 - VDD_MX supply */
+ rpmh-regulator-mxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mx.lvl";
+ pm660l_s1_level: regulator-pm660l-s1 {
+ regulator-name = "pm660l_s1_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pm660l_s1_level_ao: regulator-pm660l-s1-level-ao {
+ regulator-name = "pm660l_s1_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+ };
+
+ /* pm660l S2 - VDD_GFX supply */
+ rpmh-regulator-gfxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "gfx.lvl";
+ pm660l_s2_level: regulator-pm660l-s2 {
+ regulator-name = "pm660l_s2_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt
+ = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ regulator-max-microvolt
+ = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,init-voltage-level
+ = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+ };
+
+ /* pm660l S3 + S4 - VDD_CX supply */
+ rpmh-regulator-cxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "cx.lvl";
+ pm660l_s3_level-parent-supply = <&pm660l_s1_level>;
+ pm660l_s3_level_ao-parent-supply = <&pm660l_s1_level_ao>;
+ pm660l_s3_level: regulator-pm660l-s3-level {
+ regulator-name = "pm660l_s3_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
+
+ pm660l_s3_level_ao: regulator-pm660l-s3-level-ao {
+ regulator-name = "pm660l_s3_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
+ };
+
+ rpmh-regulator-ldoa1 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa1";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l1: regulator-pm660-l1 {
+ regulator-name = "pm660_l1";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1250000>;
+ qcom,init-voltage = <1200000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa2 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa2";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l2: regulator-pm660-l2 {
+ regulator-name = "pm660_l2";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ qcom,init-voltage = <1000000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa3 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa3";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l3: regulator-pm660-l3 {
+ regulator-name = "pm660_l3";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ qcom,init-voltage = <1000000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa5 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa5";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l5: regulator-pm660-l5 {
+ regulator-name = "pm660_l5";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ qcom,init-voltage = <800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa6 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa6";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l6: regulator-pm660-l6 {
+ regulator-name = "pm660_l6";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ qcom,init-voltage = <1304000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa7 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa7";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l7: regulator-pm660-l7 {
+ regulator-name = "pm660_l7";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,init-voltage = <1200000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa8 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa8";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l8: regulator-pm660-l8 {
+ regulator-name = "pm660_l8";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa9 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa9";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l9: regulator-pm660-l9 {
+ regulator-name = "pm660_l9";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa10 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa10";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l10: regulator-pm660-l10 {
+ regulator-name = "pm660_l10";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa11 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa11";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l11: regulator-pm660-l11 {
+ regulator-name = "pm660_l11";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa12 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa12";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l12: regulator-pm660-l12 {
+ regulator-name = "pm660_l12";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa13 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa13";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l13: regulator-pm660-l13 {
+ regulator-name = "pm660_l13";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa14 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa14";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l14: regulator-pm660-l14 {
+ regulator-name = "pm660_l14";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa15 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa15";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l15: regulator-pm660-l15 {
+ regulator-name = "pm660_l15";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa16 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa16";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l16: regulator-pm660-l16 {
+ regulator-name = "pm660_l16";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ qcom,init-voltage = <2700000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa17 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa17";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l17: regulator-pm660-l17 {
+ regulator-name = "pm660_l17";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa19 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa19";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660_l19: regulator-pm660-l19 {
+ regulator-name = "pm660_l19";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3312000>;
+ qcom,init-voltage = <3312000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob1 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob1";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l1: regulator-pm660l-l1 {
+ regulator-name = "pm660l_l1";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <900000>;
+ qcom,init-voltage = <880000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob2 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob2";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l2: regulator-pm660l-l2 {
+ regulator-name = "pm660l_l2";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,init-voltage = <1800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob3 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob3";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l3: regulator-pm660l-l3 {
+ regulator-name = "pm660l_l3";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3008000>;
+ qcom,init-voltage = <2850000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob4";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l4: regulator-pm660l-l4 {
+ regulator-name = "pm660l_l4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,init-voltage = <2960000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob5 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob5";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l5: regulator-pm660l-l5 {
+ regulator-name = "pm660l_l5";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ qcom,init-voltage = <2960000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob6 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob6";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l6: regulator-pm660l-l6 {
+ regulator-name = "pm660l_l6";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,init-voltage = <3008000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob7 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob7";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l7: regulator-pm660l-l7 {
+ regulator-name = "pm660l_l7";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3100000>;
+ qcom,init-voltage = <3088000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldob8 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldob8";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pm660l_l8: regulator-pm660l-l8 {
+ regulator-name = "pm660l_l8";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3312000>;
+ qcom,init-voltage = <3300000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ /* pm660l L9 = VDD_LPI_CX supply */
+ rpmh-regulator-lcxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "lcx.lvl";
+ pm660l_l9_level: regulator-pm660l-l9-level {
+ regulator-name = "pm660l_l9_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+ };
+
+ /* pm660l L10 = VDD_LPI_MX supply */
+ rpmh-regulator-lmxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "lmx.lvl";
+ pm660l_l10_level: regulator-pm660l-l10-level {
+ regulator-name = "pm660l_l10_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+ };
+
+ rpmh-regulator-bobb1 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "bobb1";
+ pm660l_bob: regulator-pm660l-bob {
+ regulator-name = "pm660l_bob";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3312000>;
+ qcom,init-voltage = <3312000>;
+ };
+ };
+};
+
&pm660_charger {
smb2_vbus: qcom,smb2-vbus {
regulator-name = "smb2-vbus";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 6ea92ee..f2f41fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -9,3 +9,92 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+/{
+ aliases {
+ serial0 = &qupv3_se10_2uart;
+ serial1 = &qupv3_se9_2uart;
+ spi0 = &qupv3_se8_spi;
+ i2c0 = &qupv3_se10_i2c;
+ i2c1 = &qupv3_se3_i2c;
+ hsuart0 = &qupv3_se6_4uart;
+ };
+
+};
+
+&qupv3_se9_2uart {
+ status = "disabled";
+};
+
+&qupv3_se8_spi {
+ status = "disabled";
+};
+
+&qupv3_se10_2uart {
+ status = "ok";
+};
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "disabled";
+};
+
+&qupv3_se6_4uart {
+ status = "disabled";
+};
+
+&ufsphy_mem {
+ compatible = "qcom,ufs-phy-qrbtc-sdm845";
+
+ vdda-phy-supply = <&pm660l_l1>; /* 0.88v */
+ vdda-pll-supply = <&pm660_l1>; /* 1.2v */
+ vdda-phy-max-microamp = <62900>;
+ vdda-pll-max-microamp = <18300>;
+
+ status = "ok";
+};
+
+&ufshc_mem {
+ limit-tx-hs-gear = <1>;
+ limit-rx-hs-gear = <1>;
+ scsi-cmd-timeout = <300000>;
+
+ vdd-hba-supply = <&ufs_phy_gdsc>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm660l_l4>;
+ vccq2-supply = <&pm660_l8>;
+ vcc-max-microamp = <600000>;
+ vccq2-max-microamp = <600000>;
+
+ qcom,vddp-ref-clk-supply = <&pm660_l1>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
+ qcom,disable-lpm;
+ rpm-level = <0>;
+ spm-level = <0>;
+ status = "ok";
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm660l_l4>;
+ qcom,vdd-voltage-level = <2960000 2960000>;
+ qcom,vdd-current-level = <200 570000>;
+
+ vdd-io-supply = <&pm660_l8>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 325000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000 50000000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
new file mode 100644
index 0000000..f3e5ddb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
@@ -0,0 +1,271 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+ qcom,smp2p-modem@1799000c {
+ compatible = "qcom,smp2p";
+ reg = <0x1799000c 0x4>;
+ qcom,remote-pid = <1>;
+ qcom,irq-bitmask = <0x4000>;
+ interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ qcom,smp2p-adsp@1799000c {
+ compatible = "qcom,smp2p";
+ reg = <0x1799000c 0x4>;
+ qcom,remote-pid = <2>;
+ qcom,irq-bitmask = <0x4000000>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ qcom,smp2p-cdsp@1799000c {
+ compatible = "qcom,smp2p";
+ reg = <0x1799000c 0x4>;
+ qcom,remote-pid = <5>;
+ qcom,irq-bitmask = <0x40>;
+ interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+ };
+
+
+ smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <15>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_15_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+ gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <15>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_15_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+ gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+ };
+
+ smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_1_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+ gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_1_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+ gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+ };
+
+ smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_2_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+ gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_2_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+ gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+ };
+
+ smp2pgpio_sleepstate_2_out: qcom,smp2pgpio-sleepstate-gpio-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "sleepstate";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio-sleepstate-2-out {
+ compatible = "qcom,smp2pgpio-sleepstate-out";
+ gpios = <&smp2pgpio_sleepstate_2_out 0 0>;
+ };
+
+ smp2pgpio_smp2p_5_in: qcom,smp2pgpio-smp2p-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_5_in {
+ compatible = "qcom,smp2pgpio_test_smp2p_5_in";
+ gpios = <&smp2pgpio_smp2p_5_in 0 0>;
+ };
+
+ smp2pgpio_smp2p_5_out: qcom,smp2pgpio-smp2p-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "smp2p";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_test_smp2p_5_out {
+ compatible = "qcom,smp2pgpio_test_smp2p_5_out";
+ gpios = <&smp2pgpio_smp2p_5_out 0 0>;
+ };
+
+ /* ssr - inbound entry from lpass */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ipa - outbound entry to mss */
+ smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ipa - inbound entry from mss */
+ smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "ipa";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - inbound entry from mss */
+ smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to mss */
+ smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - inbound entry from cdsp */
+ smp2pgpio_ssr_smp2p_5_in: qcom,smp2pgpio-ssr-smp2p-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to cdsp */
+ smp2pgpio_ssr_smp2p_5_out: qcom,smp2pgpio-ssr-smp2p-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
new file mode 100644
index 0000000..f8d2a04
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-wcd.dtsi
@@ -0,0 +1,167 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&slim_aud {
+ tavil_codec {
+ wcd: wcd_pinctrl@5 {
+ compatible = "qcom,wcd-pinctrl";
+ qcom,num-gpios = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ output-high;
+ };
+ };
+
+ us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ output-low;
+ };
+ };
+
+ spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ output-high;
+ };
+ };
+
+ spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ input-enable;
+ };
+ };
+
+ spkr_2_wcd_en_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ output-high;
+ };
+ };
+
+ spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ input-enable;
+ };
+ };
+
+ hph_en0_wcd_active: hph_en0_wcd_active {
+ mux {
+ pins = "gpio4";
+ };
+
+ config {
+ pins = "gpio4";
+ output-high;
+ };
+ };
+
+ hph_en0_wcd_sleep: hph_en0_wcd_sleep {
+ mux {
+ pins = "gpio4";
+ };
+
+ config {
+ pins = "gpio4";
+ output-low;
+ };
+ };
+
+ hph_en1_wcd_active: hph_en1_wcd_active {
+ mux {
+ pins = "gpio5";
+ };
+
+ config {
+ pins = "gpio5";
+ output-high;
+ };
+ };
+
+ hph_en1_wcd_sleep: hph_en1_wcd_sleep {
+ mux {
+ pins = "gpio5";
+ };
+
+ config {
+ pins = "gpio5";
+ output-low;
+ };
+ };
+ };
+
+ wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_wcd_en_active>;
+ pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+ };
+
+ wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_wcd_en_active>;
+ pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+ };
+
+ tavil_us_euro_sw: msm_cdc_pinctrl_us_euro_sw {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&us_euro_sw_wcd_active>;
+ pinctrl-1 = <&us_euro_sw_wcd_sleep>;
+ };
+
+ tavil_hph_en0: msm_cdc_pinctrl_hph_en0 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&hph_en0_wcd_active>;
+ pinctrl-1 = <&hph_en0_wcd_sleep>;
+ };
+
+ tavil_hph_en1: msm_cdc_pinctrl_hph_en1 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&hph_en1_wcd_active>;
+ pinctrl-1 = <&hph_en1_wcd_sleep>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
new file mode 100644
index 0000000..c35850d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-wsa881x.dtsi
@@ -0,0 +1,45 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&slim_aud {
+ tavil_codec {
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_0211: wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0212: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+
+ wsa881x_0213: wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0214: wsa881x@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x0 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index bb5217e..0dec428 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -19,15 +19,27 @@
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. SDM670";
compatible = "qcom,sdm670";
qcom,msm-id = <336 0x0>;
- interrupt-parent = <&intc>;
+ interrupt-parent = <&pdc>;
- aliases { };
+ aliases {
+ ufshc1 = &ufshc_mem; /* Embedded UFS slot */
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ };
+
+ aliases {
+ serial0 = &qupv3_se12_2uart;
+ spi0 = &qupv3_se8_spi;
+ i2c0 = &qupv3_se10_i2c;
+ i2c1 = &qupv3_se3_i2c;
+ hsuart0 = &qupv3_se6_4uart;
+ };
cpus {
#address-cells = <2>;
@@ -315,37 +327,37 @@
pil_cdsp_mem: cdsp_regions@93300000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x93300000 0 0x600000>;
+ reg = <0 0x93300000 0 0x800000>;
};
- pil_mba_mem: pil_mba_region@0x93900000 {
+ pil_mba_mem: pil_mba_region@0x93b00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x93900000 0 0x200000>;
+ reg = <0 0x93b00000 0 0x200000>;
};
- pil_adsp_mem: pil_adsp_region@93b00000 {
+ pil_adsp_mem: pil_adsp_region@93d00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x93b00000 0 0x1e00000>;
+ reg = <0 0x93d00000 0 0x1e00000>;
};
- pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+ pil_ipa_fw_mem: pil_ipa_fw_region@95b00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x95900000 0 0x10000>;
+ reg = <0 0x95b00000 0 0x10000>;
};
- pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+ pil_ipa_gsi_mem: pil_ipa_gsi_region@95b10000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x95910000 0 0x5000>;
+ reg = <0 0x95b10000 0 0x5000>;
};
- pil_gpu_mem: pil_gpu_region@95915000 {
+ pil_gpu_mem: pil_gpu_region@95b15000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x95915000 0 0x1000>;
+ reg = <0 0x95b15000 0 0x1000>;
};
adsp_mem: adsp_region {
@@ -394,6 +406,10 @@
#include "sdm670-ion.dtsi"
+#include "sdm670-smp2p.dtsi"
+
+#include "sdm670-qupv3.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -409,6 +425,7 @@
reg = <0x17a00000 0x10000>, /* GICD */
<0x17a60000 0x100000>; /* GICR * 8 */
interrupts = <1 9 4>;
+ interrupt-parent = <&intc>;
};
timer {
@@ -425,6 +442,324 @@
qcom,pipe-attr-ee;
};
+ thermal_zones: thermal-zones {
+ aoss0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu2-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu3-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu4-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu5-silver-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ kryo-l3-1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu0-gold-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu1-gold-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 11>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ gpu1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 12>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ aoss1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 0>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-dsp-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 1>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ ddr-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 2>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ wlan-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 3>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ compute-hvx-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 4>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ camera-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 5>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmss-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 6>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-core-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens1 7>;
+ thermal-governor = "user_space";
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ tsens0: tsens@c222000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc222000 0x4>,
+ <0xc263000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 506 0>, <0 508 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: tsens@c223000 {
+ compatible = "qcom,tsens24xx";
+ reg = <0xc223000 0x4>,
+ <0xc265000 0x1ff>;
+ reg-names = "tsens_srot_physical",
+ "tsens_tm_physical";
+ interrupts = <0 507 0>, <0 509 0>;
+ interrupt-names = "tsens-upper-lower", "tsens-critical";
+ #thermal-sensor-cells = <1>;
+ };
+
timer@0x17c90000{
#address-cells = <1>;
#size-cells = <1>;
@@ -491,6 +826,12 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ aop-msg-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
+
clock_rpmh: qcom,rpmhclk {
compatible = "qcom,dummycc";
clock-output-names = "rpmh_clocks";
@@ -546,6 +887,30 @@
#reset-cells = <1>;
};
+ slim_aud: slim@62dc0000 {
+ cell-index = <1>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x62dc0000 0x2c000>,
+ <0x62d84000 0x2a000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 163 0>, <0 164 0>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ qcom,apps-ch-pipes = <0x780000>;
+ qcom,ea-pc = <0x290>;
+ status = "disabled";
+ };
+
+ slim_qca: slim@62e40000 {
+ cell-index = <3>;
+ compatible = "qcom,slim-ngd";
+ reg = <0x62e40000 0x2c000>,
+ <0x62e04000 0x20000>;
+ reg-names = "slimbus_physical", "slimbus_bam_physical";
+ interrupts = <0 291 0>, <0 292 0>;
+ interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ status = "disabled";
+ };
+
wdog: qcom,wdt@17980000{
compatible = "qcom,msm-watchdog";
reg = <0x17980000 0x1000>;
@@ -590,6 +955,38 @@
};
};
+ gpi_dma0: qcom,gpi-dma@0x800000 {
+ #dma-cells = <6>;
+ compatible = "qcom,gpi-dma";
+ reg = <0x800000 0x60000>;
+ reg-names = "gpi-top";
+ interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>,
+ <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>,
+ <0 252 0>, <0 253 0>, <0 254 0>, <0 255 0>,
+ <0 256 0>;
+ qcom,max-num-gpii = <13>;
+ qcom,gpii-mask = <0xfa>;
+ qcom,ev-factor = <2>;
+ iommus = <&apps_smmu 0x0016 0x0>;
+ status = "ok";
+ };
+
+ gpi_dma1: qcom,gpi-dma@0xa00000 {
+ #dma-cells = <6>;
+ compatible = "qcom,gpi-dma";
+ reg = <0xa00000 0x60000>;
+ reg-names = "gpi-top";
+ interrupts = <0 279 0>, <0 280 0>, <0 281 0>, <0 282 0>,
+ <0 283 0>, <0 284 0>, <0 293 0>, <0 294 0>,
+ <0 295 0>, <0 296 0>, <0 297 0>, <0 298 0>,
+ <0 299 0>;
+ qcom,max-num-gpii = <13>;
+ qcom,gpii-mask = <0xfa>;
+ qcom,ev-factor = <2>;
+ iommus = <&apps_smmu 0x06d6 0x0>;
+ status = "ok";
+ };
+
cpuss_dump {
compatible = "qcom,cpuss-dump";
qcom,l1_i_cache0 {
@@ -679,6 +1076,226 @@
"l3-scu-faultirq";
};
+ qcom,ipc-spinlock@1f40000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x1f40000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>,
+ <0x778000 0x7000>,
+ <0x1fd4000 0x8>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1",
+ "smem_targ_info_reg";
+ qcom,mpu-enabled;
+ };
+
+ qmp_aop: qcom,qmp-aop@c300000 {
+ compatible = "qcom,qmp-mbox";
+ label = "aop";
+ reg = <0xc300000 0x100000>,
+ <0x1799000c 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+ qcom,irq-mask = <0x1>;
+ interrupts = <0 389 1>;
+ priority = <0>;
+ mbox-desc-offset = <0x0>;
+ #mbox-cells = <1>;
+ };
+
+ qcom,glink-smem-native-xprt-modem@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x1799000c 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x1000>;
+ interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+ label = "mpss";
+ };
+
+ qcom,glink-smem-native-xprt-adsp@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x1799000c 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x1000000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>;
+ label = "lpass";
+ qcom,qos-config = <&glink_qos_adsp>;
+ qcom,ramp-time = <0xaf>;
+ };
+
+ glink_qos_adsp: qcom,glink-qos-config-adsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
+
+ glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+ compatible = "qcom,glink-spi-xprt";
+ label = "wdsp";
+ qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+ qcom,qos-config = <&glink_qos_wdsp>;
+ qcom,ramp-time = <0x10>,
+ <0x20>,
+ <0x30>,
+ <0x40>;
+ };
+
+ glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+ compatible = "qcom,glink-fifo-config";
+ qcom,out-read-idx-reg = <0x12000>;
+ qcom,out-write-idx-reg = <0x12004>;
+ qcom,in-read-idx-reg = <0x1200C>;
+ qcom,in-write-idx-reg = <0x12010>;
+ };
+
+ glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x80 0x0>,
+ <0x70 0x1>,
+ <0x60 0x2>,
+ <0x50 0x3>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
+
+ qcom,glink-smem-native-xprt-cdsp@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x1799000c 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x10>;
+ interrupts = <GIC_SPI 574 IRQ_TYPE_EDGE_RISING>;
+ label = "cdsp";
+ };
+
+ glink_mpss: qcom,glink-ssr-modem {
+ compatible = "qcom,glink_ssr";
+ label = "modem";
+ qcom,edge = "mpss";
+ qcom,notify-edges = <&glink_lpass>, <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_lpass: qcom,glink-ssr-adsp {
+ compatible = "qcom,glink_ssr";
+ label = "adsp";
+ qcom,edge = "lpass";
+ qcom,notify-edges = <&glink_mpss>, <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_cdsp: qcom,glink-ssr-cdsp {
+ compatible = "qcom,glink_ssr";
+ label = "cdsp";
+ qcom,edge = "cdsp";
+ qcom,notify-edges = <&glink_mpss>, <&glink_lpass>;
+ qcom,xprt = "smem";
+ };
+
+ qcom,ipc_router {
+ compatible = "qcom,ipc_router";
+ qcom,node-id = <1>;
+ };
+
+ qcom,ipc_router_modem_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "mpss";
+ qcom,glink-xprt = "smem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,ipc_router_q6_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "lpass";
+ qcom,glink-xprt = "smem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,ipc_router_cdsp_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "cdsp";
+ qcom,glink-xprt = "smem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,glink_pkt {
+ compatible = "qcom,glinkpkt";
+
+ qcom,glinkpkt-at-mdm0 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DS";
+ qcom,glinkpkt-dev-name = "at_mdm0";
+ };
+
+ qcom,glinkpkt-loopback_cntl {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+ };
+
+ qcom,glinkpkt-loopback_data {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+ };
+
+ qcom,glinkpkt-apr-apps2 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "adsp";
+ qcom,glinkpkt-ch-name = "apr_apps2";
+ qcom,glinkpkt-dev-name = "apr_apps2";
+ };
+
+ qcom,glinkpkt-data40-cntl {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA40_CNTL";
+ qcom,glinkpkt-dev-name = "smdcntl8";
+ };
+
+ qcom,glinkpkt-data1 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA1";
+ qcom,glinkpkt-dev-name = "smd7";
+ };
+
+ qcom,glinkpkt-data4 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA4";
+ qcom,glinkpkt-dev-name = "smd8";
+ };
+
+ qcom,glinkpkt-data11 {
+ qcom,glinkpkt-transport = "smem";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DATA11";
+ qcom,glinkpkt-dev-name = "smd11";
+ };
+ };
+
qcom,chd_sliver {
compatible = "qcom,core-hang-detect";
label = "silver";
@@ -741,6 +1358,42 @@
};
};
+ cmd_db: qcom,cmd-db@c3f000c {
+ compatible = "qcom,cmd-db";
+ reg = <0xc3f000c 0x8>;
+ };
+
+ apps_rsc: mailbox@179e0000 {
+ compatible = "qcom,tcs-drv";
+ label = "apps_rsc";
+ reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
+ interrupts = <0 5 0>;
+ #mbox-cells = <1>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
+ };
+
+ disp_rsc: mailbox@af20000 {
+ compatible = "qcom,tcs-drv";
+ label = "display_rsc";
+ reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
+ interrupts = <0 129 0>;
+ #mbox-cells = <1>;
+ qcom,drv-id = <0>;
+ qcom,tcs-config = <SLEEP_TCS 1>,
+ <WAKE_TCS 1>,
+ <ACTIVE_TCS 0>,
+ <CONTROL_TCS 1>;
+ };
+
+ system_pm {
+ compatible = "qcom,system-pm";
+ mboxes = <&apps_rsc 0>;
+ };
+
dcc: dcc_v2@10a2000 {
compatible = "qcom,dcc_v2";
reg = <0x10a2000 0x1000>,
@@ -766,11 +1419,491 @@
#interrupt-cells = <4>;
cell-index = <0>;
};
+
+ ufsphy_mem: ufsphy_mem@1d87000 {
+ reg = <0x1d87000 0xe00>; /* PHY regs */
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ lanes-per-direction = <1>;
+
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+ <&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
+
+ status = "disabled";
+ };
+
+ ufshc_mem: ufshc@1d84000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1d84000 0x3000>;
+ interrupts = <0 265 0>;
+ phys = <&ufsphy_mem>;
+ phy-names = "ufsphy";
+
+ lanes-per-direction = <1>;
+ dev-ref-clk-freq = <0>; /* 19.2 MHz */
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+ <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ resets = <&clock_gcc GCC_UFS_PHY_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
+ qcom,lpass@62400000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x62400000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pm660l_l9_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ memory-region = <&pil_adsp_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ status = "ok";
+ };
+
+ qcom,rmnet-ipa {
+ compatible = "qcom,rmnet-ipa3";
+ qcom,rmnet-ipa-ssr;
+ qcom,ipa-loaduC;
+ qcom,ipa-advertise-sg-support;
+ qcom,ipa-napi-enable;
+ };
+
+ ipa_hw: qcom,ipa@01e00000 {
+ compatible = "qcom,ipa";
+ reg = <0x1e00000 0x34000>,
+ <0x1e04000 0x2c000>;
+ reg-names = "ipa-base", "gsi-base";
+ interrupts =
+ <0 311 0>,
+ <0 432 0>;
+ interrupt-names = "ipa-irq", "gsi-irq";
+ qcom,ipa-hw-ver = <13>; /* IPA core version = IPAv3.5.1 */
+ qcom,ipa-hw-mode = <1>;
+ qcom,ee = <0>;
+ qcom,use-ipa-tethering-bridge;
+ qcom,modem-cfg-emb-pipe-flt;
+ qcom,ipa-wdi2;
+ qcom,use-64-bit-dma-mask;
+ qcom,arm-smmu;
+ qcom,smmu-s1-bypass;
+ qcom,bandwidth-vote-for-ipa;
+ qcom,msm-bus,name = "ipa";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <4>;
+ qcom,msm-bus,vectors-KBps =
+ /* No vote */
+ <90 512 0 0>,
+ <90 585 0 0>,
+ <1 676 0 0>,
+ <143 777 0 0>,
+ /* SVS */
+ <90 512 80000 640000>,
+ <90 585 80000 640000>,
+ <1 676 80000 80000>,
+ <143 777 0 150000>,
+ /* NOMINAL */
+ <90 512 206000 960000>,
+ <90 585 206000 960000>,
+ <1 676 206000 160000>,
+ <143 777 0 300000>,
+ /* TURBO */
+ <90 512 206000 3600000>,
+ <90 585 206000 3600000>,
+ <1 676 206000 300000>,
+ <143 777 0 355333>;
+ qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+
+ /* IPA RAM mmap */
+ qcom,ipa-ram-mmap = <
+ 0x280 /* ofst_start; */
+ 0x0 /* nat_ofst; */
+ 0x0 /* nat_size; */
+ 0x288 /* v4_flt_hash_ofst; */
+ 0x78 /* v4_flt_hash_size; */
+ 0x4000 /* v4_flt_hash_size_ddr; */
+ 0x308 /* v4_flt_nhash_ofst; */
+ 0x78 /* v4_flt_nhash_size; */
+ 0x4000 /* v4_flt_nhash_size_ddr; */
+ 0x388 /* v6_flt_hash_ofst; */
+ 0x78 /* v6_flt_hash_size; */
+ 0x4000 /* v6_flt_hash_size_ddr; */
+ 0x408 /* v6_flt_nhash_ofst; */
+ 0x78 /* v6_flt_nhash_size; */
+ 0x4000 /* v6_flt_nhash_size_ddr; */
+ 0xf /* v4_rt_num_index; */
+ 0x0 /* v4_modem_rt_index_lo; */
+ 0x7 /* v4_modem_rt_index_hi; */
+ 0x8 /* v4_apps_rt_index_lo; */
+ 0xe /* v4_apps_rt_index_hi; */
+ 0x488 /* v4_rt_hash_ofst; */
+ 0x78 /* v4_rt_hash_size; */
+ 0x4000 /* v4_rt_hash_size_ddr; */
+ 0x508 /* v4_rt_nhash_ofst; */
+ 0x78 /* v4_rt_nhash_size; */
+ 0x4000 /* v4_rt_nhash_size_ddr; */
+ 0xf /* v6_rt_num_index; */
+ 0x0 /* v6_modem_rt_index_lo; */
+ 0x7 /* v6_modem_rt_index_hi; */
+ 0x8 /* v6_apps_rt_index_lo; */
+ 0xe /* v6_apps_rt_index_hi; */
+ 0x588 /* v6_rt_hash_ofst; */
+ 0x78 /* v6_rt_hash_size; */
+ 0x4000 /* v6_rt_hash_size_ddr; */
+ 0x608 /* v6_rt_nhash_ofst; */
+ 0x78 /* v6_rt_nhash_size; */
+ 0x4000 /* v6_rt_nhash_size_ddr; */
+ 0x688 /* modem_hdr_ofst; */
+ 0x140 /* modem_hdr_size; */
+ 0x7c8 /* apps_hdr_ofst; */
+ 0x0 /* apps_hdr_size; */
+ 0x800 /* apps_hdr_size_ddr; */
+ 0x7d0 /* modem_hdr_proc_ctx_ofst; */
+ 0x200 /* modem_hdr_proc_ctx_size; */
+ 0x9d0 /* apps_hdr_proc_ctx_ofst; */
+ 0x200 /* apps_hdr_proc_ctx_size; */
+ 0x0 /* apps_hdr_proc_ctx_size_ddr; */
+ 0x0 /* modem_comp_decomp_ofst; diff */
+ 0x0 /* modem_comp_decomp_size; diff */
+ 0xbd8 /* modem_ofst; */
+ 0x1024 /* modem_size; */
+ 0x2000 /* apps_v4_flt_hash_ofst; */
+ 0x0 /* apps_v4_flt_hash_size; */
+ 0x2000 /* apps_v4_flt_nhash_ofst; */
+ 0x0 /* apps_v4_flt_nhash_size; */
+ 0x2000 /* apps_v6_flt_hash_ofst; */
+ 0x0 /* apps_v6_flt_hash_size; */
+ 0x2000 /* apps_v6_flt_nhash_ofst; */
+ 0x0 /* apps_v6_flt_nhash_size; */
+ 0x80 /* uc_info_ofst; */
+ 0x200 /* uc_info_size; */
+ 0x2000 /* end_ofst; */
+ 0x2000 /* apps_v4_rt_hash_ofst; */
+ 0x0 /* apps_v4_rt_hash_size; */
+ 0x2000 /* apps_v4_rt_nhash_ofst; */
+ 0x0 /* apps_v4_rt_nhash_size; */
+ 0x2000 /* apps_v6_rt_hash_ofst; */
+ 0x0 /* apps_v6_rt_hash_size; */
+ 0x2000 /* apps_v6_rt_nhash_ofst; */
+ 0x0 /* apps_v6_rt_nhash_size; */
+ 0x1c00 /* uc_event_ring_ofst; */
+ 0x400 /* uc_event_ring_size; */
+ >;
+
+ /* smp2p gpio information */
+ qcom,smp2pgpio_map_ipa_1_out {
+ compatible = "qcom,smp2pgpio-map-ipa-1-out";
+ gpios = <&smp2pgpio_ipa_1_out 0 0>;
+ };
+
+ qcom,smp2pgpio_map_ipa_1_in {
+ compatible = "qcom,smp2pgpio-map-ipa-1-in";
+ gpios = <&smp2pgpio_ipa_1_in 0 0>;
+ };
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&apps_smmu 0x720 0x0>;
+ qcom,iova-mapping = <0x20000000 0x40000000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&apps_smmu 0x721 0x0>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&apps_smmu 0x722 0x0>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
+ };
+
+ qcom,ipa_fws {
+ compatible = "qcom,pil-tz-generic";
+ qcom,pas-id = <0xf>;
+ qcom,firmware-name = "ipa_fws";
+ };
+
+ pil_modem: qcom,mss@4080000 {
+ compatible = "qcom,pil-q6v55-mss";
+ reg = <0x4080000 0x100>,
+ <0x1f63000 0x008>,
+ <0x1f65000 0x008>,
+ <0x1f64000 0x008>,
+ <0x4180000 0x020>,
+ <0xc2b0000 0x004>,
+ <0xb2e0100 0x004>,
+ <0x4180044 0x004>;
+ reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+ "halt_nc", "rmb_base", "restart_reg",
+ "pdc_sync", "alt_reset";
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+ <&clock_gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+ <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&clock_gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
+ <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&clock_gcc GCC_PRNG_AHB_CLK>;
+ clock-names = "xo", "iface_clk", "bus_clk",
+ "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk", "prng_clk";
+ qcom,proxy-clock-names = "xo", "prng_clk";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+ "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk";
+
+ interrupts = <0 266 1>;
+ vdd_cx-supply = <&pm660l_s3_level>;
+ vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
+ vdd_mx-supply = <&pm660l_s1_level>;
+ vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+ qcom,sysmon-id = <0>;
+ qcom,ssctl-instance-id = <0x12>;
+ qcom,override-acc;
+ qcom,qdsp6v65-1-0;
+ status = "ok";
+ memory-region = <&pil_modem_mem>;
+ qcom,mem-protect-id = <0xF>;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+ qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&pil_mba_mem>;
+ };
+ };
+
+ qcom,venus@aae0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xaae0000 0x4000>;
+
+ vdd-supply = <&venus_gdsc>;
+ qcom,proxy-reg-names = "vdd";
+
+ clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>;
+ clock-names = "core_clk", "iface_clk", "bus_clk";
+ qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk";
+
+ qcom,pas-id = <9>;
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+ qcom,proxy-timeout-ms = <100>;
+ qcom,firmware-name = "venus";
+ memory-region = <&pil_video_mem>;
+ status = "ok";
+ };
+
+ qcom,turing@8300000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x8300000 0x100000>;
+ interrupts = <0 578 1>;
+
+ vdd_cx-supply = <&pm660l_s3_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <18>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <601>;
+ qcom,sysmon-id = <7>;
+ qcom,ssctl-instance-id = <0x17>;
+ qcom,firmware-name = "cdsp";
+ memory-region = <&pil_cdsp_mem>;
+
+ /* GPIO inputs from turing */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_5_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_5_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_5_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_5_in 3 0>;
+
+ /* GPIO output to turing*/
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+ status = "ok";
+ };
+
+ sdhc_1: sdhci@7c4000 {
+ compatible = "qcom,sdhci-msm-v5";
+ reg = <0x7C4000 0x1000>, <0x7C5000 0x1000>;
+ reg-names = "hc_mem", "cmdq_mem";
+
+ interrupts = <0 641 0>, <0 644 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+ qcom,large-address-bus;
+
+ clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+ <&clock_gcc GCC_SDCC1_APPS_CLK>;
+ clock-names = "iface_clk", "core_clk";
+
+ qcom,nonremovable;
+
+ qcom,scaling-lower-bus-speed-mode = "DDR52";
+ status = "disabled";
+ };
+
+ qcom,msm-cdsp-loader {
+ compatible = "qcom,cdsp-loader";
+ qcom,proc-img-to-load = "cdsp";
+ };
+
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-compute";
+
+ qcom,msm_fastrpc_compute_cb1 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1421 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb2 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1422 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb3 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1423 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb4 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1424 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb5 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1425 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb6 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ iommus = <&apps_smmu 0x1426 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb7 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x1429 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb8 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "cdsprpc-smd";
+ qcom,secure-context-bank;
+ iommus = <&apps_smmu 0x142A 0x30>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb9 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1803 0x0>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb10 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1804 0x0>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb11 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&apps_smmu 0x1805 0x0>;
+ dma-coherent;
+ };
+ };
};
#include "sdm670-pinctrl.dtsi"
#include "msm-arm-smmu-sdm670.dtsi"
#include "msm-gdsc-sdm845.dtsi"
+#include "sdm670-pm.dtsi"
&usb30_prim_gdsc {
status = "ok";
@@ -792,6 +1925,18 @@
status = "ok";
};
+&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc {
+ status = "ok";
+};
+
+&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc {
+ status = "ok";
+};
+
+&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc {
+ status = "ok";
+};
+
&bps_gdsc {
status = "ok";
};
@@ -828,6 +1973,7 @@
clock-names = "core_root_clk";
clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
qcom,force-enable-root-clk;
+ parent-supply = <&pm660l_s2_level>;
status = "ok";
};
@@ -848,3 +1994,4 @@
#include "pm660.dtsi"
#include "pm660l.dtsi"
#include "sdm670-regulator.dtsi"
+#include "sdm670-audio.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts
new file mode 100644
index 0000000..a78672d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,msm-id = <321 0x10000>;
+ qcom,board-id = <1 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 4b7a680..faf09c4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
@@ -22,7 +23,7 @@
qcom,board-id = <1 1>;
};
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
/delete-property/ qcom,dsi-display-active;
};
@@ -41,6 +42,17 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
&dsi_sharp_4k_dsc_video_display {
qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts
new file mode 100644
index 0000000..a776d42
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,msm-id = <321 0x10000>;
+ qcom,board-id = <8 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 67c3bcd..2ae9345 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
@@ -22,7 +23,7 @@
qcom,board-id = <8 1>;
};
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
/delete-property/ qcom,dsi-display-active;
};
@@ -41,6 +42,17 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
&dsi_sharp_4k_dsc_video_display {
qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
new file mode 100644
index 0000000..c6622d4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,msm-id = <321 0x10000>;
+ qcom,board-id = <11 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
index 6171c7b..20f80c9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
@@ -21,3 +22,35 @@
compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
qcom,board-id = <11 1>;
};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
new file mode 100644
index 0000000..9208302
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-wcd.dtsi"
+#include "msm-wsa881x.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&snd_934x {
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "hifi amp", "LINEOUT1",
+ "hifi amp", "LINEOUT2",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,hph-en0-gpio = <&tavil_hph_en0>;
+ qcom,hph-en1-gpio = <&tavil_hph_en1>;
+ qcom,tavil-mclk-clk-freq = <9600000>;
+
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+
+ qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+ <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+};
+
+&soc {
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+ };
+
+ wcd9xxx_intc: wcd9xxx-irq {
+ status = "ok";
+ compatible = "qcom,wcd9xxx-irq";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tlmm>;
+ qcom,gpio-connect = <&tlmm 54 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_intr_default>;
+ };
+
+ clock_audio_lnbb: audio_ext_clk_lnbb {
+ status = "ok";
+ compatible = "qcom,audio-ref-clk";
+ clock-names = "osr_clk";
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
+ qcom,node_has_rpm_clock;
+ #clock-cells = <1>;
+ };
+
+ wcd_rst_gpio: msm_cdc_pinctrl@64 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_reset_active>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ };
+
+ qocm,wcd-dsp-glink {
+ compatible = "qcom,wcd-dsp-glink";
+ };
+
+ qcom,wcd-dsp-mgr {
+ compatible = "qcom,wcd-dsp-mgr";
+ qcom,wdsp-components = <&wcd934x_cdc 0>,
+ <&wcd_spi_0 1>,
+ <&glink_spi_xprt_wdsp 2>;
+ qcom,img-filename = "cpe_9340";
+ };
+};
+
+&slim_aud {
+ wcd934x_cdc: tavil_codec {
+ compatible = "qcom,tavil-slim-pgd";
+ elemental-addr = [00 01 50 02 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30 31>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk";
+ clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
+
+ cdc-vdd-buck-supply = <&pm8998_s4>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ cdc-buck-sido-supply = <&pm8998_s4>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ cdc-vdd-tx-h-supply = <&pm8998_s4>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ cdc-vdd-rx-h-supply = <&pm8998_s4>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ cdc-vddpx-1-supply = <&pm8998_s4>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-buck-sido",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+ qcom,cdc-slim-ifd = "tavil-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+
+ qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+ wcd_spi_0: wcd_spi {
+ compatible = "qcom,wcd-spi-v2";
+ qcom,master-bus-num = <0>;
+ qcom,chip-select = <0>;
+ qcom,max-frequency = <9600000>;
+ qcom,mem-base-addr = <0x100000>;
+ };
+
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index ad046e9..dd82ad7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -12,9 +12,6 @@
*/
#include "msm-audio-lpass.dtsi"
-#include "sdm845-wcd.dtsi"
-#include "msm-wsa881x.dtsi"
-#include <dt-bindings/clock/qcom,audio-ext-clk.h>
&msm_audio_ion {
iommus = <&apps_smmu 0x1821 0x0>;
@@ -31,7 +28,7 @@
qcom,clk-mult = <10>;
};
- sound-tavil {
+ snd_934x: sound-tavil {
compatible = "qcom,sdm845-asoc-snd-tavil";
qcom,model = "sdm845-tavil-snd-card";
qcom,wcn-btfm;
@@ -48,43 +45,6 @@
"lpaif_tert_mode_muxsel",
"lpaif_quat_mode_muxsel";
- qcom,audio-routing =
- "AIF4 VI", "MCLK",
- "RX_BIAS", "MCLK",
- "MADINPUT", "MCLK",
- "hifi amp", "LINEOUT1",
- "hifi amp", "LINEOUT2",
- "AMIC2", "MIC BIAS2",
- "MIC BIAS2", "Headset Mic",
- "AMIC3", "MIC BIAS2",
- "MIC BIAS2", "ANCRight Headset Mic",
- "AMIC4", "MIC BIAS2",
- "MIC BIAS2", "ANCLeft Headset Mic",
- "AMIC5", "MIC BIAS3",
- "MIC BIAS3", "Handset Mic",
- "DMIC0", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic0",
- "DMIC1", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic1",
- "DMIC2", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic2",
- "DMIC3", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic3",
- "DMIC4", "MIC BIAS4",
- "MIC BIAS4", "Digital Mic4",
- "DMIC5", "MIC BIAS4",
- "MIC BIAS4", "Digital Mic5",
- "SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT";
-
- qcom,msm-mbhc-hphl-swh = <1>;
- qcom,msm-mbhc-gnd-swh = <1>;
- qcom,hph-en0-gpio = <&tavil_hph_en0>;
- qcom,hph-en1-gpio = <&tavil_hph_en1>;
- qcom,tavil-mclk-clk-freq = <9600000>;
-
- qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
-
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -136,65 +96,6 @@
"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
- qcom,wsa-max-devs = <2>;
- qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
- <&wsa881x_0213>, <&wsa881x_0214>;
- qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
- "SpkrLeft", "SpkrRight";
-
- qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&wcd_usbc_analog_en2_active>;
- pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
- };
-
- wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
- compatible = "qcom,msm-cdc-pinctrl";
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&wcd_usbc_analog_en1_active>;
- pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
- };
-
- wcd9xxx_intc: wcd9xxx-irq {
- status = "ok";
- compatible = "qcom,wcd9xxx-irq";
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&tlmm>;
- qcom,gpio-connect = <&tlmm 54 0>;
- pinctrl-names = "default";
- pinctrl-0 = <&wcd_intr_default>;
- };
-
- clock_audio_lnbb: audio_ext_clk_lnbb {
- status = "ok";
- compatible = "qcom,audio-ref-clk";
- clock-names = "osr_clk";
- clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
- qcom,node_has_rpm_clock;
- #clock-cells = <1>;
- };
-
- wcd_rst_gpio: msm_cdc_pinctrl@64 {
- compatible = "qcom,msm-cdc-pinctrl";
- qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&cdc_reset_active>;
- pinctrl-1 = <&cdc_reset_sleep>;
- };
-
- qocm,wcd-dsp-glink {
- compatible = "qcom,wcd-dsp-glink";
- };
-
- qcom,wcd-dsp-mgr {
- compatible = "qcom,wcd-dsp-mgr";
- qcom,wdsp-components = <&wcd934x_cdc 0>,
- <&wcd_spi_0 1>,
- <&glink_spi_xprt_wdsp 2>;
- qcom,img-filename = "cpe_9340";
};
};
@@ -203,66 +104,4 @@
compatible = "qcom,msm-dai-slim";
elemental-addr = [ff ff ff fe 17 02];
};
-
- wcd934x_cdc: tavil_codec {
- compatible = "qcom,tavil-slim-pgd";
- elemental-addr = [00 01 50 02 17 02];
-
- interrupt-parent = <&wcd9xxx_intc>;
- interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
- 17 18 19 20 21 22 23 24 25 26 27 28 29
- 30 31>;
-
- qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
-
- clock-names = "wcd_clk";
- clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
-
- cdc-vdd-buck-supply = <&pm8998_s4>;
- qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
- qcom,cdc-vdd-buck-current = <650000>;
-
- cdc-buck-sido-supply = <&pm8998_s4>;
- qcom,cdc-buck-sido-voltage = <1800000 1800000>;
- qcom,cdc-buck-sido-current = <250000>;
-
- cdc-vdd-tx-h-supply = <&pm8998_s4>;
- qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
- qcom,cdc-vdd-tx-h-current = <25000>;
-
- cdc-vdd-rx-h-supply = <&pm8998_s4>;
- qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
- qcom,cdc-vdd-rx-h-current = <25000>;
-
- cdc-vddpx-1-supply = <&pm8998_s4>;
- qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
- qcom,cdc-vddpx-1-current = <10000>;
-
- qcom,cdc-static-supplies = "cdc-vdd-buck",
- "cdc-buck-sido",
- "cdc-vdd-tx-h",
- "cdc-vdd-rx-h",
- "cdc-vddpx-1";
-
- qcom,cdc-micbias1-mv = <1800>;
- qcom,cdc-micbias2-mv = <1800>;
- qcom,cdc-micbias3-mv = <1800>;
- qcom,cdc-micbias4-mv = <1800>;
-
- qcom,cdc-mclk-clk-rate = <9600000>;
- qcom,cdc-slim-ifd = "tavil-slim-ifd";
- qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
- qcom,cdc-dmic-sample-rate = <4800000>;
- qcom,cdc-mad-dmic-rate = <600000>;
-
- qcom,wdsp-cmpnt-dev-name = "tavil_codec";
-
- wcd_spi_0: wcd_spi {
- compatible = "qcom,wcd-spi-v2";
- qcom,master-bus-num = <0>;
- qcom,chip-select = <0>;
- qcom,max-frequency = <9600000>;
- qcom,mem-base-addr = <0x100000>;
- };
- };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index b33b525..5fbb1db 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -572,6 +572,18 @@
qcom,prio = <2>;
};
+ mas_xm_pcie_0: mas-xm-pcie-0 {
+ cell-id = <MSM_BUS_MASTER_PCIE>;
+ label = "mas-xm-pcie-0";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <5>;
+ qcom,connections = <&slv_qns_pcie_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg {
cell-id = <MSM_BUS_MASTER_A2NOC_CFG>;
label = "mas-qhm-a2noc-cfg";
@@ -648,18 +660,6 @@
qcom,prio = <2>;
};
- mas_xm_pcie_0: mas-xm-pcie-0 {
- cell-id = <MSM_BUS_MASTER_PCIE>;
- label = "mas-xm-pcie-0";
- qcom,buswidth = <8>;
- qcom,agg-ports = <1>;
- qcom,qport = <5>;
- qcom,connections = <&slv_qns_pcie_snoc>;
- qcom,bus-dev = <&fab_aggre1_noc>;
- qcom,ap-owned;
- qcom,prio = <2>;
- };
-
mas_xm_qdss_etr: mas-xm-qdss-etr {
cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
label = "mas-xm-qdss-etr";
@@ -680,7 +680,14 @@
qcom,qport = <10>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
qcom,prio = <2>;
+ qcom,node-qos-clks {
+ clocks =
+ <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+ clock-names =
+ "clk-usb3-prim-axi-no-rate";
+ };
};
mas_xm_usb3_1: mas-xm-usb3-1 {
@@ -691,7 +698,14 @@
qcom,qport = <11>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
qcom,prio = <2>;
+ qcom,node-qos-clks {
+ clocks =
+ <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>;
+ clock-names =
+ "clk-usb3-sec-axi-no-rate";
+ };
};
mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
@@ -701,6 +715,7 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_qns_camnoc_uncomp>;
qcom,bus-dev = <&fab_camnoc_virt>;
+ qcom,bcms = <&bcm_mm1>;
};
mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
@@ -710,6 +725,7 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_qns_camnoc_uncomp>;
qcom,bus-dev = <&fab_camnoc_virt>;
+ qcom,bcms = <&bcm_mm1>;
};
mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
@@ -719,6 +735,7 @@
qcom,agg-ports = <1>;
qcom,connections = <&slv_qns_camnoc_uncomp>;
qcom,bus-dev = <&fab_camnoc_virt>;
+ qcom,bcms = <&bcm_mm1>;
};
mas_qhm_spdm: mas-qhm-spdm {
@@ -1208,6 +1225,19 @@
qcom,prio = <2>;
};
+ mas_xm_gic: mas-xm-gic {
+ cell-id = <MSM_BUS_MASTER_GIC>;
+ label = "mas-xm-gic";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <0>;
+ qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn12>;
+ qcom,ap-owned;
+ qcom,prio = <1>;
+ };
+
mas_alc: mas-alc {
cell-id = <MSM_BUS_MASTER_ALC>;
label = "mas-alc";
@@ -1301,6 +1331,15 @@
qcom,bcms = <&bcm_sn9>;
};
+ slv_qns_pcie_a1noc_snoc:slv-qns-pcie-a1noc-snoc {
+ cell-id = <MSM_BUS_SLAVE_ANOC_PCIE_A1NOC_SNOC>;
+ label = "slv-qns-pcie-a1noc-snoc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,connections = <&mas_qnm_pcie_anoc>;
+ };
+
slv_qns_a2noc_snoc:slv-qns-a2noc-snoc {
cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
label = "slv-qns-a2noc-snoc";
@@ -1334,7 +1373,6 @@
qcom,buswidth = <32>;
qcom,agg-ports = <1>;
qcom,bus-dev = <&fab_camnoc_virt>;
- qcom,bcms = <&bcm_mm1>;
};
slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index a715025..9d799cb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -16,9 +16,9 @@
cell-index = <0>;
reg = <0x00 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch0>;
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
status = "ok";
};
@@ -26,9 +26,9 @@
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash2>;
- qcom,torch-source = <&pmi8998_torch2>;
- qcom,switch-source = <&pmi8998_switch1>;
+ flash-source = <&pmi8998_flash2>;
+ torch-source = <&pmi8998_torch2>;
+ switch-source = <&pmi8998_switch1>;
status = "ok";
};
@@ -78,36 +78,39 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
actuator_front: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
- qcom,cci-master = <1>;
+ cci-master = <1>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,ois";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
status = "disabled";
};
@@ -119,12 +122,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -134,22 +138,23 @@
<&tlmm 80 0>,
<&tlmm 79 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA0",
"CAM_VAF";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +165,13 @@
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -174,20 +180,21 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
eeprom_front: qcom,eeprom@2 {
@@ -198,12 +205,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -213,46 +221,48 @@
<&tlmm 28 0>,
<&tlmm 8 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA2",
"CAM_VAF";
- qcom,sensor-position = <1>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,led-flash-src = <&led_flash_rear>;
- qcom,actuator-src = <&actuator_rear>;
- qcom,ois-src = <&ois_rear>;
- qcom,eeprom-src = <&eeprom_rear>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -261,40 +271,42 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@1 {
cell-index = <1>;
compatible = "qcom,cam-sensor";
reg = <0x1>;
- qcom,csiphy-sd-index = <1>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,eeprom-src = <&eeprom_rear_aux>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -303,42 +315,44 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@2 {
cell-index = <2>;
compatible = "qcom,cam-sensor";
reg = <0x02>;
- qcom,csiphy-sd-index = <2>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <0>;
- qcom,eeprom-src = <&eeprom_front>;
- qcom,actuator-src = <&actuator_front>;
- qcom,led-flash-src = <&led_flash_front>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -347,18 +361,19 @@
gpios = <&tlmm 14 0>,
<&tlmm 28 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index a715025..f18137c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -16,9 +16,9 @@
cell-index = <0>;
reg = <0x00 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
- qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
- qcom,switch-source = <&pmi8998_switch0>;
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
status = "ok";
};
@@ -26,9 +26,9 @@
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
- qcom,flash-source = <&pmi8998_flash2>;
- qcom,torch-source = <&pmi8998_torch2>;
- qcom,switch-source = <&pmi8998_switch1>;
+ flash-source = <&pmi8998_flash2>;
+ torch-source = <&pmi8998_torch2>;
+ switch-source = <&pmi8998_switch1>;
status = "ok";
};
@@ -78,36 +78,39 @@
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
actuator_front: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
- qcom,cci-master = <1>;
+ cci-master = <1>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
};
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
compatible = "qcom,ois";
- qcom,cci-master = <0>;
+ cci-master = <0>;
cam_vaf-supply = <&actuator_regulator>;
- qcom,cam-vreg-name = "cam_vaf";
- qcom,cam-vreg-min-voltage = <2800000>;
- qcom,cam-vreg-max-voltage = <2800000>;
- qcom,cam-vreg-op-mode = <0>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
status = "disabled";
};
@@ -119,12 +122,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -134,22 +138,23 @@
<&tlmm 80 0>,
<&tlmm 79 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA0",
"CAM_VAF";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
eeprom_rear_aux: qcom,eeprom@1 {
@@ -160,12 +165,13 @@
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -174,20 +180,21 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-position = <0>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
eeprom_front: qcom,eeprom@2 {
@@ -198,12 +205,13 @@
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -213,46 +221,48 @@
<&tlmm 28 0>,
<&tlmm 8 0>,
<&tlmm 27 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-vaf = <3>;
- qcom,gpio-req-tbl-num = <0 1 2 3>;
- qcom,gpio-req-tbl-flags = <1 0 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA2",
"CAM_VAF";
- qcom,sensor-position = <1>;
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
reg = <0x0>;
- qcom,csiphy-sd-index = <0>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,led-flash-src = <&led_flash_rear>;
- qcom,actuator-src = <&actuator_rear>;
- qcom,ois-src = <&ois_rear>;
- qcom,eeprom-src = <&eeprom_rear>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_rear_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
&cam_sensor_rear_active>;
@@ -261,40 +271,42 @@
gpios = <&tlmm 13 0>,
<&tlmm 80 0>,
<&tlmm 79 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
"CAM_RESET0",
"CAM_VANA";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@1 {
cell-index = <1>;
compatible = "qcom,cam-sensor";
reg = <0x1>;
- qcom,csiphy-sd-index = <1>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <180>;
- qcom,eeprom-src = <&eeprom_rear_aux>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana",
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
"cam_clk";
- qcom,cam-vreg-min-voltage = <1050000 0 3312000 0>;
- qcom,cam-vreg-max-voltage = <1050000 0 3600000 0>;
- qcom,cam-vreg-op-mode = <105000 0 80000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
&cam_sensor_rear2_active>;
@@ -303,42 +315,44 @@
gpios = <&tlmm 15 0>,
<&tlmm 9 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
"CAM_RESET1",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
qcom,cam-sensor@2 {
cell-index = <2>;
compatible = "qcom,cam-sensor";
reg = <0x02>;
- qcom,csiphy-sd-index = <2>;
- qcom,sensor-position-roll = <90>;
- qcom,sensor-position-pitch = <0>;
- qcom,sensor-position-yaw = <0>;
- qcom,eeprom-src = <&eeprom_front>;
- qcom,actuator-src = <&actuator_front>;
- qcom,led-flash-src = <&led_flash_front>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
cam_vdig-supply = <&camera_ldo>;
cam_clk-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
- qcom,cam-vreg-min-voltage = <0 3312000 1050000 0>;
- qcom,cam-vreg-max-voltage = <0 3600000 1050000 0>;
- qcom,cam-vreg-op-mode = <0 80000 105000 0>;
- qcom,gpio-no-mux = <0>;
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
&cam_sensor_front_active>;
@@ -347,18 +361,19 @@
gpios = <&tlmm 14 0>,
<&tlmm 28 0>,
<&tlmm 8 0>;
- qcom,gpio-reset = <1>;
- qcom,gpio-vana = <2>;
- qcom,gpio-req-tbl-num = <0 1 2>;
- qcom,gpio-req-tbl-flags = <1 0 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
"CAM_RESET2",
"CAM_VANA1";
- qcom,sensor-mode = <0>;
- qcom,cci-master = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
status = "ok";
clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
clock-names = "cam_clk";
- qcom,clock-rates = <24000000>;
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 91b8738..3fa0ab3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -22,12 +22,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0x0ac65000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x65000>;
interrupts = <0 477 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -35,9 +36,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY0_CLK>,
<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -45,11 +44,10 @@
"cphy_rx_clk_src",
"csiphy0_clk",
"csi0phytimer_clk_src",
- "csi0phytimer_clk",
- "ife_0_csid_clk",
- "ife_0_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ "csi0phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
@@ -58,12 +56,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac66000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x66000>;
interrupts = <0 478 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -71,9 +70,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY1_CLK>,
<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -81,11 +78,10 @@
"cphy_rx_clk_src",
"csiphy1_clk",
"csi1phytimer_clk_src",
- "csi1phytimer_clk",
- "ife_1_csid_clk",
- "ife_1_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ "csi1phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
@@ -95,12 +91,13 @@
compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
reg = <0xac67000 0x1000>;
reg-names = "csiphy";
+ reg-cam-base = <0x67000>;
interrupts = <0 479 0>;
interrupt-names = "csiphy";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
- qcom,csi-vdd-voltage = <1200000>;
- qcom,mipi-csi-vdd-supply = <&pm8998_l26>;
+ regulator-names = "gdscr";
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm8998_l26>;
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -108,9 +105,7 @@
<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
<&clock_camcc CAM_CC_CSIPHY2_CLK>,
<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
- <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>,
- <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
- <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>;
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>;
clock-names = "camnoc_axi_clk",
"soc_ahb_clk",
"slow_ahb_src_clk",
@@ -118,26 +113,26 @@
"cphy_rx_clk_src",
"csiphy2_clk",
"csi2phytimer_clk_src",
- "csi2phytimer_clk",
- "ife_lite_csid_clk",
- "ife_lite_csid_clk_src";
- qcom,clock-rates =
- <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+ "csi2phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 320000000 0 269333333 0>;
status = "ok";
};
cam_cci: qcom,cci@ac4a000 {
cell-index = <0>;
compatible = "qcom,cci";
- reg = <0xac4a000 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xac4a000 0x4000>;
reg-names = "cci";
- interrupts = <0 460 0>;
+ reg-cam-base = <0x4a000>;
interrupt-names = "cci";
+ interrupts = <0 460 0>;
status = "ok";
gdscr-supply = <&titan_top_gdsc>;
- qcom,cam-vreg-name = "gdscr";
+ regulator-names = "gdscr";
clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_SOC_AHB_CLK>,
<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -150,17 +145,19 @@
"cpas_ahb_clk",
"cci_clk",
"cci_clk_src";
- qcom,clock-rates = <0 0 80000000 0 0 37500000>;
- pinctrl-names = "cci_default", "cci_suspend";
+ src-clock-name = "cci_clk_src";
+ clock-cntl-level = "turbo";
+ clock-rates = <0 0 0 0 0 37500000>;
+ pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cci0_active &cci1_active>;
pinctrl-1 = <&cci0_suspend &cci1_suspend>;
gpios = <&tlmm 17 0>,
<&tlmm 18 0>,
<&tlmm 19 0>,
<&tlmm 20 0>;
- qcom,gpio-tbl-num = <0 1 2 3>;
- qcom,gpio-tbl-flags = <1 1 1 1>;
- qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 1 1 1>;
+ gpio-req-tbl-label = "CCI_I2C_DATA0",
"CCI_I2C_CLK0",
"CCI_I2C_DATA1",
"CCI_I2C_CLK1";
@@ -249,6 +246,23 @@
};
};
+ msm_cam_smmu_jpeg {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1060 0x8>,
+ <&apps_smmu 0x1068 0x8>;
+ label = "jpeg";
+ jpeg_iova_mem_map: iova-mem-map {
+ /* IO region is approximately 3.4 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
msm_cam_icp_fw {
compatible = "qcom,msm-cam-smmu-fw-dev";
label="icp";
@@ -353,14 +367,28 @@
<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
src-clock-name = "slow_ahb_clk_src";
- clock-rates = <0 0 0 80000000 0 0>;
+ clock-rates = <0 0 0 0 0 0>,
+ <0 0 0 19200000 0 0>,
+ <0 0 0 60000000 0 0>,
+ <0 0 0 66660000 0 0>,
+ <0 0 0 73840000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>;
+ clock-cntl-level = "suspend", "minsvs", "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
qcom,msm-bus,name = "cam_ahb";
- qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-cases = <7>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
<MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+ <MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
@@ -378,7 +406,7 @@
RPMH_REGULATOR_LEVEL_TURBO
RPMH_REGULATOR_LEVEL_TURBO_L1>;
vdd-corner-ahb-mapping = "suspend", "suspend",
- "svs", "svs", "svs", "svs",
+ "minsvs", "lowsvs", "svs", "svs_l1",
"nominal", "nominal", "nominal",
"turbo", "turbo";
client-id-based;
@@ -387,7 +415,7 @@
"csid0", "csid1", "csid2",
"ife0", "ife1", "ife2", "ipe0",
"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
- "icp0", "jpeg-dma0", "jpeg0", "fd0";
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
client-axi-port-names =
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_hf_1", "cam_hf_2", "cam_hf_2",
@@ -480,8 +508,8 @@
label = "cam-cdm-intf";
num-hw-cdm = <1>;
cdm-client-names = "vfe",
- "jpeg-dma",
- "jpeg",
+ "jpegdma",
+ "jpegenc",
"fd";
status = "ok";
};
@@ -508,6 +536,7 @@
<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
clock-rates = <0 0 0 0 0>;
+ clock-cntl-level = "svs";
cdm-client-names = "ife";
status = "ok";
};
@@ -555,7 +584,8 @@
<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+ clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -590,6 +620,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
@@ -634,7 +665,8 @@
<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 60000000 0 0>;
+ clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -669,6 +701,7 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
clock-rates = <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
@@ -710,7 +743,8 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 384000000 0 0 0 40400000 0>;
+ clock-rates = <0 0 0 0 0 0 384000000 0 0 0 404000000 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -741,7 +775,8 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- qcom,clock-rates = <0 0 0 0 0 0 404000000 0>;
+ clock-rates = <0 0 0 0 0 0 404000000 0>;
+ clock-cntl-level = "turbo";
src-clock-name = "ife_clk_src";
status = "ok";
};
@@ -790,6 +825,7 @@
<&clock_camcc CAM_CC_ICP_CLK_SRC>;
clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
fw_name = "CAMERA_ICP.elf";
status = "ok";
};
@@ -811,6 +847,7 @@
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
@@ -831,6 +868,7 @@
<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
@@ -851,6 +889,79 @@
<&clock_camcc CAM_CC_BPS_CLK_SRC>;
clock-rates = <0 0 0 0 600000000>;
+ clock-cntl-level = "turbo";
status = "ok";
};
+
+ qcom,cam-jpeg {
+ compatible = "qcom,cam-jpeg";
+ compat-hw-name = "qcom,jpegenc",
+ "qcom,jpegdma";
+ num-jpeg-enc = <1>;
+ num-jpeg-dma = <1>;
+ status = "ok";
+ };
+
+ cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_enc";
+ reg-names = "jpege_hw";
+ reg = <0xac4e000 0x4000>;
+ reg-cam-base = <0x4e000>;
+ interrupt-names = "jpeg";
+ interrupts = <0 474 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegenc_clk_src",
+ "jpegenc_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegenc_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
+ cam_jpeg_dma: qcom,jpegdma@0xac52000{
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_dma";
+ reg-names = "jpegdma_hw";
+ reg = <0xac52000 0x4000>;
+ reg-cam-base = <0x52000>;
+ interrupt-names = "jpegdma";
+ interrupts = <0 475 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegdma_clk_src",
+ "jpegdma_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegdma_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi
new file mode 100644
index 0000000..68f2e51
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&soc {
+ sound-tavil {
+ qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+ };
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index 7d7c9cf..7991aad 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -15,14 +15,17 @@
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
- qcom,msm-id = <321 0x0>;
+ qcom,msm-id = <321 0x10000>;
qcom,board-id = <1 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
index 22e3aea..0a6aa5e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 0430ea4..8fca29c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -13,7 +13,7 @@
#include <dt-bindings/gpio/gpio.h>
#include "sdm845-camera-sensor-cdp.dtsi"
-/ {
+&vendor {
bluetooth: bt_wcn3990 {
compatible = "qca,wcn3990";
qca,bt-vdd-io-supply = <&pm8998_s3>;
@@ -37,10 +37,6 @@
};
&soc {
- sound-tavil {
- qcom,us-euro-gpios = <&tavil_us_euro_sw>;
- };
-
gpio_keys {
compatible = "gpio-keys";
label = "gpio-keys";
@@ -173,10 +169,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
@@ -192,7 +184,7 @@
qcom,batteryless-platform;
};
-/ {
+&vendor {
extcon_usb1: extcon_usb1 {
compatible = "linux,extcon-usb-gpio";
id-gpio = <&pmi8998_gpios 9 GPIO_ACTIVE_HIGH>;
@@ -300,7 +292,7 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 04a332e..d2189a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -245,6 +245,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_swao1_out_tpda_swao: endpoint {
remote-endpoint = <&tpda_swao_in_tpdm_swao1>;
@@ -819,6 +821,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_lpass_out_funnel_lpass: endpoint {
remote-endpoint = <&funnel_lpass_in_tpdm_lpass>;
@@ -837,6 +841,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_center_out_tpda: endpoint {
remote-endpoint = <&tpda_in_tpdm_center>;
@@ -855,6 +861,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_north_out_tpda: endpoint {
remote-endpoint = <&tpda_in_tpdm_north>;
@@ -1090,6 +1098,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_mm_out_funnel_dl_mm: endpoint {
remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
@@ -1181,6 +1191,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_turing_out_funnel_turing: endpoint {
remote-endpoint =
@@ -1235,6 +1247,8 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
+ qcom,msr-fix-req;
+
port {
tpdm_ddr_out_funnel_ddr_0: endpoint {
remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
@@ -1376,7 +1390,6 @@
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
- qcom,msr-fix-req;
port{
tpdm_spss_out_tpda_spss: endpoint {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 1ce68e1..de50aec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -27,26 +27,20 @@
compatible = "qcom,devbw";
governor = "bw_vbif";
qcom,src-dst-ports = <26 512>;
- /*
- * active-only flag is used while registering the bus
- * governor.It helps release the bus vote when the CPU
- * subsystem is inactiv3
- */
- qcom,active-only;
qcom,bw-tbl =
< 0 /* off */ >,
- < 762 /* 100 MHz */ >,
- < 1144 /* 150 MHz */ >,
- < 1525 /* 200 MHz */ >,
- < 2288 /* 300 MHz */ >,
- < 3143 /* 412 MHz */ >,
- < 4173 /* 547 MHz */ >,
- < 5195 /* 681 MHz */ >,
- < 5859 /* 768 MHz */ >,
- < 7759 /* 1017 MHz */ >,
- < 9887 /* 1296 MHz */ >,
- < 11863 /* 1555 MHz */ >,
- < 13763 /* 1804 MHz */ >;
+ < 381 /* 100 MHz */ >,
+ < 572 /* 150 MHz */ >,
+ < 762 /* 200 MHz */ >,
+ < 1144 /* 300 MHz */ >,
+ < 1571 /* 412 MHz */ >,
+ < 2086 /* 547 MHz */ >,
+ < 2597 /* 681 MHz */ >,
+ < 2929 /* 768 MHz */ >,
+ < 3879 /* 1017 MHz */ >,
+ < 4943 /* 1296 MHz */ >,
+ < 5931 /* 1555 MHz */ >,
+ < 6881 /* 1804 MHz */ >;
};
msm_gpu: qcom,kgsl-3d0@5000000 {
@@ -61,7 +55,7 @@
qcom,chipid = <0x06030000>;
- qcom,initial-pwrlevel = <2>;
+ qcom,initial-pwrlevel = <5>;
qcom,gpu-quirk-hfi-use-reg;
@@ -99,23 +93,24 @@
qcom,gpubw-dev = <&gpubw>;
qcom,bus-control;
qcom,msm-bus,name = "grp3d";
+ qcom,bus-width = <32>;
qcom,msm-bus,num-cases = <13>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>,
- <26 512 0 800000>, // 1 bus=100
- <26 512 0 1200000>, // 2 bus=150
- <26 512 0 1600000>, // 3 bus=200
- <26 512 0 2400000>, // 4 bus=300
- <26 512 0 3296000>, // 5 bus=412
- <26 512 0 4376000>, // 6 bus=547
- <26 512 0 5448000>, // 7 bus=681
- <26 512 0 6144000>, // 8 bus=768
- <26 512 0 8136000>, // 9 bus=1017
- <26 512 0 10368000>, // 10 bus=1296
- <26 512 0 12440000>, // 11 bus=1555
- <26 512 0 14432000>; // 12 bus=1804
+ <26 512 0 400000>, // 1 bus=100
+ <26 512 0 600000>, // 2 bus=150
+ <26 512 0 800000>, // 3 bus=200
+ <26 512 0 1200000>, // 4 bus=300
+ <26 512 0 1648000>, // 5 bus=412
+ <26 512 0 2188000>, // 6 bus=547
+ <26 512 0 2724000>, // 7 bus=681
+ <26 512 0 3072000>, // 8 bus=768
+ <26 512 0 4068000>, // 9 bus=1017
+ <26 512 0 5184000>, // 10 bus=1296
+ <26 512 0 6220000>, // 11 bus=1555
+ <26 512 0 7216000>; // 12 bus=1804
/* GDSC regulator names */
regulator-names = "vddcx", "vdd";
@@ -170,35 +165,69 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
- qcom,gpu-freq = <280000000>;
- qcom,bus-freq = <4>;
- qcom,bus-min = <3>;
- qcom,bus-max = <5>;
+ qcom,gpu-freq = <600000000>;
+ qcom,bus-freq = <12>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <12>;
};
qcom,gpu-pwrlevel@1 {
reg = <1>;
- qcom,gpu-freq = <280000000>;
- qcom,bus-freq = <4>;
- qcom,bus-min = <3>;
- qcom,bus-max = <5>;
+ qcom,gpu-freq = <548000000>;
+ qcom,bus-freq = <12>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <12>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
+ qcom,gpu-freq = <487000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <11>;
+ };
+
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <425000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <338000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <9>;
+ };
+
+
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
qcom,gpu-freq = <280000000>;
+ qcom,bus-freq = <5>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <210000000>;
qcom,bus-freq = <4>;
qcom,bus-min = <3>;
qcom,bus-max = <5>;
};
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
- qcom,gpu-freq = <280000000>;
- qcom,bus-freq = <4>;
- qcom,bus-min = <3>;
- qcom,bus-max = <5>;
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
};
};
@@ -218,7 +247,6 @@
clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
qcom,secure_align_mask = <0xfff>;
- qcom,global_pt;
qcom,hyp_secure_alloc;
gfx3d_user: gfx3d_user {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
new file mode 100644
index 0000000..b9e9c34
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -0,0 +1,393 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+ /delete-node/regulator-pm8998-s4;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ /delete-property/ vddio-supply;
+};
+
+&dsi_sharp_4k_dsc_cmd_display {
+ /delete-property/ vddio-supply;
+};
+
+&dsi_sharp_1080_cmd_display {
+ /delete-property/ vddio-supply;
+};
+
+&dsi_dual_sharp_1080_120hz_cmd_display {
+ /delete-property/ vddio-supply;
+};
+
+&dsi_dual_nt35597_truly_video_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
+&dsi_dual_nt35597_truly_cmd_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
+&dsi_nt35597_truly_dsc_video_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
+&sde_dp {
+ status = "disabled";
+ /delete-property/ vdda-1p2-supply;
+ /delete-property/ vdda-0p9-supply;
+ /delete-property/ qcom,dp-usbpd-detection;
+};
+
+&mdss_dp_pll {
+ status = "disabled";
+};
+
+&bluetooth {
+ /delete-property/ qca,bt-vdd-io-supply;
+ /delete-property/ qca,bt-vdd-xtal-supply;
+ /delete-property/ qca,bt-vdd-core-supply;
+ /delete-property/ qca,bt-vdd-pa-supply;
+ /delete-property/ qca,bt-vdd-ldo-supply;
+};
+
+&ufsphy_mem {
+ /delete-property/ vdda-phy-supply;
+ /delete-property/ vdda-pll-supply;
+};
+
+&ufshc_mem {
+ /delete-property/ vcc-supply;
+ /delete-property/ vccq2-supply;
+ /delete-property/ qcom,vddp-ref-clk-supply;
+};
+
+&ufsphy_card {
+ /delete-property/ vdda-phy-supply;
+ /delete-property/ vdda-pll-supply;
+};
+
+&ufshc_card {
+ /delete-property/ vcc-supply;
+ /delete-property/ vccq2-supply;
+ /delete-property/ qcom,vddp-ref-clk-supply;
+};
+
+&sdhc_2 {
+ /delete-property/ vdd-supply;
+ /delete-property/ vdd-io-supply;
+};
+
+&vendor {
+ extcon_usb1 {
+ /delete-property/ id-gpio;
+ /delete-property/ vbus-gpio;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ };
+
+ usb1_vbus_vreg {
+ /delete-property/ gpio;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ };
+};
+
+&qupv3_se3_i2c {
+ nq@28 {
+ /delete-property/ qcom,nq-clkreq;
+ /* delete "nfc_clk_default" -- PMIC GPIO */
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ };
+};
+
+&pcie0 {
+ /delete-property/ vreg-1.8-supply;
+ /delete-property/ vreg-0.9-supply;
+ /delete-property/ vreg-cx-supply;
+};
+
+&cam_csiphy0 {
+ /delete-property/ mipi-csi-vdd-supply;
+};
+
+&cam_csiphy1 {
+ /delete-property/ mipi-csi-vdd-supply;
+};
+
+&cam_csiphy2 {
+ /delete-property/ mipi-csi-vdd-supply;
+};
+
+&led_flash_rear {
+ /delete-property/ flash-source;
+ /delete-property/ torch-source;
+ /delete-property/ switch-source;
+};
+
+&led_flash_front {
+ /delete-property/ flash-source;
+ /delete-property/ torch-source;
+ /delete-property/ switch-source;
+};
+
+&actuator_regulator {
+ /delete-property/ vin-supply;
+};
+
+
+&eeprom_rear {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+};
+
+&eeprom_rear_aux {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+};
+
+&eeprom_front {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+};
+
+&cam_cci {
+ qcom,cam-sensor@0 {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+ };
+
+ qcom,cam-sensor@1 {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+ };
+
+ qcom,cam-sensor@2 {
+ /delete-property/ cam_vio-supply;
+ /delete-property/ cam_vana-supply;
+ };
+};
+
+&clock_gcc {
+ /delete-property/ vdd_cx-supply;
+ /delete-property/ vdd_cx_ao-supply;
+};
+
+&clock_videocc {
+ /delete-property/ vdd_cx-supply;
+};
+
+&clock_camcc {
+ /delete-property/ vdd_cx-supply;
+ /delete-property/ vdd_mx-supply;
+};
+
+&clock_dispcc {
+ /delete-property/ vdd_cx-supply;
+};
+
+&clock_gpucc {
+ /delete-property/ vdd_cx-supply;
+};
+
+&clock_gfx {
+ /delete-property/ vdd_gfx-supply;
+ /delete-property/ vdd_mx-supply;
+};
+
+&pil_modem {
+ /delete-property/ vdd_cx-supply;
+ /delete-property/ vdd_mx-supply;
+};
+
+&gpu_gx_gdsc {
+ /delete-property/ parent-supply;
+};
+
+&soc {
+ /delete-node/ gpio_keys;
+
+ qcom,lpass@17300000 {
+ /delete-property/ vdd_cx-supply;
+ };
+
+ qcom,ssc@5c00000 {
+ /delete-property/ vdd_cx-supply;
+ };
+
+ qcom,spss@1880000 {
+ /delete-property/ vdd_cx-supply;
+ /delete-property/ vdd_mx-supply;
+ };
+
+ qcom,turing@8300000 {
+ /delete-property/ vdd_cx-supply;
+ };
+
+ qcom,qbt1000 {
+ /delete-property/ qcom,finger-detect-gpio;
+ };
+
+ qcom,icnss@18800000 {
+ /delete-property/ vdd-0.8-cx-mx-supply;
+ /delete-property/ vdd-1.8-xo-supply;
+ /delete-property/ vdd-1.3-rfa-supply;
+ /delete-property/ vdd-3.3-ch0-supply;
+ };
+
+ qcom,mdss_dsi_ctrl0@ae94000 {
+ vdda-1p2-supply = <&pm660_l1>;
+ };
+
+ qcom,mdss_dsi_ctrl1@ae96000 {
+ vdda-1p2-supply = <&pm660_l1>;
+ };
+
+ qcom,mdss_dsi_phy0@ae94400 {
+ vdda-0p9-supply = <&pm660l_l1>;
+ };
+
+ qcom,mdss_dsi_phy0@ae96400 {
+ vdda-0p9-supply = <&pm660l_l1>;
+ };
+
+ gpio-regulator@1 {
+ /delete-property/ gpio;
+ /delete-property/ vin-supply;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ };
+
+ gpio-regulator@2 {
+ /delete-property/ gpio;
+ /delete-property/ vin-supply;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ };
+
+ /delete-node/ qcom,spmi-debug@6b22000;
+
+};
+
+&wil6210 {
+ /delete-property/ vdd-supply;
+ /delete-property/ vddio-supply;
+};
+
+&usb0 {
+ /delete-property/ extcon;
+};
+
+&qusb_phy0 {
+ /delete-property/ vdd-supply;
+ /delete-property/ vdda18-supply;
+ /delete-property/ vdda33-supply;
+};
+
+&usb_qmp_dp_phy {
+ /delete-property/ vdd-supply;
+ /delete-property/ core-supply;
+};
+
+&qusb_phy1 {
+ /delete-property/ vdd-supply;
+ /delete-property/ vdda18-supply;
+ /delete-property/ vdda33-supply;
+};
+
+&usb_qmp_phy {
+ /delete-property/ vdd-supply;
+ /delete-property/ core-supply;
+};
+
+&soc {
+ /* Delete all regulators */
+ /delete-node/ cprh-ctrl@17dc0000;
+ /delete-node/ cprh-ctrl@17db0000;
+ /delete-node/ rpmh-regulator-ebilvl;
+ /delete-node/ rpmh-regulator-smpa2;
+ /delete-node/ rpmh-regulator-smpa3;
+ /delete-node/ rpmh-regulator-smpa5;
+ /delete-node/ rpmh-regulator-mxlvl;
+ /delete-node/ rpmh-regulator-smpa7;
+ /delete-node/ rpmh-regulator-cxlvl;
+ /delete-node/ rpmh-regulator-ldoa1;
+ /delete-node/ rpmh-regulator-ldoa2;
+ /delete-node/ rpmh-regulator-ldoa3;
+ /delete-node/ rpmh-regulator-lmxlvl;
+ /delete-node/ rpmh-regulator-ldoa5;
+ /delete-node/ rpmh-regulator-ldoa6;
+ /delete-node/ rpmh-regulator-ldoa7;
+ /delete-node/ rpmh-regulator-ldoa8;
+ /delete-node/ rpmh-regulator-ldoa9;
+ /delete-node/ rpmh-regulator-ldoa10;
+ /delete-node/ rpmh-regulator-ldoa11;
+ /delete-node/ rpmh-regulator-ldoa12;
+ /delete-node/ rpmh-regulator-ldoa13;
+ /delete-node/ rpmh-regulator-ldoa14;
+ /delete-node/ rpmh-regulator-ldoa15;
+ /delete-node/ rpmh-regulator-ldoa16;
+ /delete-node/ rpmh-regulator-ldoa17;
+ /delete-node/ rpmh-regulator-ldoa18;
+ /delete-node/ rpmh-regulator-ldoa19;
+ /delete-node/ rpmh-regulator-ldoa20;
+ /delete-node/ rpmh-regulator-ldoa21;
+ /delete-node/ rpmh-regulator-ldoa22;
+ /delete-node/ rpmh-regulator-ldoa23;
+ /delete-node/ rpmh-regulator-ldoa24;
+ /delete-node/ rpmh-regulator-ldoa25;
+ /delete-node/ rpmh-regulator-ldoa26;
+ /delete-node/ rpmh-regulator-lcxlvl;
+ /delete-node/ rpmh-regulator-ldoa28;
+ /delete-node/ rpmh-regulator-vsa1;
+ /delete-node/ rpmh-regulator-vsa2;
+ /delete-node/ rpmh-regulator-bobb1;
+ /delete-node/ rpmh-regulator-gfxlvl;
+ /delete-node/ rpmh-regulator-msslvl;
+ /delete-node/ rpmh-regulator-smpc3;
+ /delete-node/ ext_5v_boost;
+};
+
+&spmi_bus {
+ /delete-node/ qcom,pm8998@0;
+ /delete-node/ qcom,pm8998@1;
+ /delete-node/ qcom,pmi8998@2;
+ /delete-node/ qcom,pmi8998@3;
+ /delete-node/ qcom,pm8005@4;
+ /delete-node/ qcom,pm8005@5;
+};
+
+
+#include "pm660.dtsi"
+#include "pm660l.dtsi"
+#include "sdm670-regulator.dtsi"
+
+&soc {
+ /delete-node/ thermal-zones;
+};
+
+&pm660l_wled {
+ qcom,led-strings-list = [01 02];
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi
new file mode 100644
index 0000000..f861ca3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-audio.dtsi"
+
+&msm_audio_ion {
+ iommus = <&apps_smmu 0x1821 0x0>;
+ qcom,smmu-sid-mask = /bits/ 64 <0xf>;
+};
+
+&qupv3_se8_spi {
+ status = "okay";
+};
+
+&pm660l_3 {
+ /delete-node/analog-codec;
+};
+
+&soc {
+ /delete-node/msm-sdw-codec@62ec1000;
+ /delete-node/sound;
+ /delete-node/cdc_pdm_pinctrl;
+ /delete-node/wsa_spkr_en1_pinctrl;
+ /delete-node/wsa_spkr_en2_pinctrl;
+ /delete-node/sdw_clk_data_pinctrl;
+};
+
+&msm_audio_ion {
+ iommus = <&apps_smmu 0x1821 0x0>;
+};
+
+&wcd9xxx_intc {
+ status = "okay";
+ qcom,gpio-connect = <&tlmm 54 0>;
+};
+
+&wdsp_mgr {
+ status = "okay";
+};
+
+&wdsp_glink {
+ status = "okay";
+};
+
+&slim_aud {
+ status = "okay";
+};
+
+&dai_slim {
+ status = "okay";
+};
+
+&wcd934x_cdc {
+ status = "okay";
+};
+
+&clock_audio_lnbb {
+ status = "okay";
+};
+
+&wcd_rst_gpio {
+ status = "okay";
+};
+
+&wcd9xxx_intc {
+ status = "okay";
+};
+
+&tavil_snd {
+ status = "okay";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts
new file mode 100644
index 0000000..da59bcf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-interposer-sdm670-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v1 Interposer SDM670 CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,msm-id = <321 0x0>;
+ qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts
new file mode 100644
index 0000000..ebb5e8f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845-interposer-sdm670.dtsi"
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-interposer-sdm670-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM sdm845 v1 Interposer SDM670 CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,board-id = <1 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
new file mode 100644
index 0000000..ad15615
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
@@ -0,0 +1,15 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-cdp.dtsi"
+#include "sdm845-interposer-pm660.dtsi"
+#include "sdm845-interposer-sdm670-audio.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts
new file mode 100644
index 0000000..3ca15b9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts
@@ -0,0 +1,30 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-interposer-sdm670-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v1 Interposer SDM670 MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,msm-id = <321 0x0>;
+ qcom,board-id = <8 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts
new file mode 100644
index 0000000..39664f1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845-interposer-sdm670.dtsi"
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-interposer-sdm670-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM sdm845 v1 Interposer SDM670 MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,board-id = <8 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
new file mode 100644
index 0000000..c709770
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
@@ -0,0 +1,19 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-mtp.dtsi"
+#include "sdm845-interposer-pm660.dtsi"
+#include "sdm845-interposer-sdm670-audio.dtsi"
+
+&qupv3_se10_i2c {
+ /delete-node/ qcom,smb1355@8;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dts
new file mode 100644
index 0000000..c5e4ae1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dts
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm845-interposer-sdm670.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 Interposer SDM670";
+ compatible = "qcom,sdm845";
+ qcom,msm-id = <321 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dtsi
new file mode 100644
index 0000000..9341507
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 Interposer SDM670";
+ compatible = "qcom,sdm845";
+ qcom,msm-id = <321 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
index 2579819..829dfcc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,10 +33,10 @@
qcom,ion-heap-type = "DMA";
};
- qcom,ion-heap@13 { /* SPSS HEAP */
+ qcom,ion-heap@13 { /* SECURE SPSS HEAP */
reg = <13>;
- memory-region = <&sp_mem>;
- qcom,ion-heap-type = "DMA";
+ memory-region = <&secure_sp_mem>;
+ qcom,ion-heap-type = "HYP_CMA";
};
qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index e299744..2d1d9b6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -15,14 +15,17 @@
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
- qcom,msm-id = <321 0x0>;
+ qcom,msm-id = <321 0x10000>;
qcom,board-id = <8 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index f7af60c..e74b342 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index c3217e7..29d80a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -14,7 +14,7 @@
#include "sdm845-camera-sensor-mtp.dtsi"
#include "smb1355.dtsi"
-/ {
+&vendor {
bluetooth: bt_wcn3990 {
compatible = "qca,wcn3990";
qca,bt-vdd-io-supply = <&pm8998_s3>;
@@ -150,7 +150,7 @@
qcom,platform-reset-gpio = <&tlmm 6 0>;
};
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
qcom,dsi-display-active;
};
@@ -245,10 +245,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
@@ -260,7 +256,7 @@
pinctrl-1 = <&flash_led3_front_dis>;
};
-/{
+&vendor {
mtp_batterydata: qcom,battery-data {
qcom,batt-id-range-pct = <15>;
#include "fg-gen3-batterydata-itech-3000mah.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index dc58f9c..f691740 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1400,6 +1400,90 @@
};
};
+ quat_tdm {
+ quat_tdm_sleep: quat_tdm_sleep {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ quat_tdm_active: quat_tdm_active {
+ mux {
+ pins = "gpio58", "gpio59";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio58", "gpio59";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_tdm_dout {
+ quat_tdm_dout_sleep: quat_tdm_dout_sleep {
+ mux {
+ pins = "gpio61";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ quat_tdm_dout_active: quat_tdm_dout_active {
+ mux {
+ pins = "gpio61";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <2>; /* 2 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ quat_tdm_din {
+ quat_tdm_din_sleep: quat_tdm_din_sleep {
+ mux {
+ pins = "gpio60";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ quat_tdm_din_active: quat_tdm_din_active {
+ mux {
+ pins = "gpio60";
+ function = "qua_mi2s";
+ };
+
+ config {
+ pins = "gpio60";
+ drive-strength = <2>; /* 2 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
/* QUPv3 South SE mappings */
/* SE 0 pin mappings */
qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
@@ -2667,7 +2751,7 @@
config {
pins = "gpio80","gpio79";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
@@ -2723,7 +2807,7 @@
config {
pins = "gpio28";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
@@ -2780,7 +2864,7 @@
};
config {
pins = "gpio9","gpio8";
- bias-disable; /* No PULL */
+ bias-pull-down; /* PULL DOWN */
drive-strength = <2>; /* 2 MA */
};
};
@@ -2796,6 +2880,66 @@
bias-disable;
};
};
+
+ tsif0_signals_active: tsif0_signals_active {
+ tsif1_clk {
+ pins = "gpio89"; /* TSIF0 CLK */
+ function = "tsif1_clk";
+ };
+ tsif1_en {
+ pins = "gpio90"; /* TSIF0 Enable */
+ function = "tsif1_en";
+ };
+ tsif1_data {
+ pins = "gpio91"; /* TSIF0 DATA */
+ function = "tsif1_data";
+ };
+ signals_cfg {
+ pins = "gpio89", "gpio90", "gpio91";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif0_sync_active: tsif0_sync_active {
+ tsif1_sync {
+ pins = "gpio12"; /* TSIF0 SYNC */
+ function = "tsif1_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ tsif1_signals_active: tsif1_signals_active {
+ tsif2_clk {
+ pins = "gpio93"; /* TSIF1 CLK */
+ function = "tsif2_clk";
+ };
+ tsif2_en {
+ pins = "gpio94"; /* TSIF1 Enable */
+ function = "tsif2_en";
+ };
+ tsif2_data {
+ pins = "gpio95"; /* TSIF1 DATA */
+ function = "tsif2_data";
+ };
+ signals_cfg {
+ pins = "gpio93", "gpio94", "gpio95";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ /* sync signal is only used if configured to mode-2 */
+ tsif1_sync_active: tsif1_sync_active {
+ tsif2_sync {
+ pins = "gpio96"; /* TSIF1 SYNC */
+ function = "tsif2_sync";
+ drive_strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 6806145..c8698c28 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -23,8 +23,6 @@
#size-cells = <0>;
label = "L3";
qcom,spm-device-names = "L3";
- qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5 &CPU6
- &CPU7>;
qcom,psci-mode-shift = <4>;
qcom,psci-mode-mask = <0xfff>;
@@ -53,7 +51,7 @@
reg = <2>;
label = "l3-pc";
qcom,psci-mode = <0x4>;
- qcom,latency-us = <4562>;
+ qcom,latency-us = <3201>;
qcom,ss-power = <408>;
qcom,energy-overhead = <2421840>;
qcom,time-overhead = <5376>;
@@ -86,12 +84,12 @@
qcom,is-reset;
qcom,notify-rpm;
};
-
- qcom,pm-cpu {
+ qcom,pm-cpu@0 {
#address-cells = <1>;
#size-cells = <0>;
qcom,psci-mode-shift = <0>;
qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cpu-level@0 { /* C1 */
reg = <0>;
@@ -107,7 +105,7 @@
reg = <1>;
qcom,psci-cpu-mode = <0x2>;
qcom,spm-cpu-mode = "ret";
- qcom,latency-us = <86>;
+ qcom,latency-us = <119>;
qcom,ss-power = <449>;
qcom,energy-overhead = <78456>;
qcom,time-overhead = <167>;
@@ -117,7 +115,7 @@
reg = <2>;
qcom,spm-cpu-mode = "pc";
qcom,psci-cpu-mode = <0x3>;
- qcom,latency-us = <612>;
+ qcom,latency-us = <461>;
qcom,ss-power = <436>;
qcom,energy-overhead = <418225>;
qcom,time-overhead = <885>;
@@ -129,7 +127,59 @@
reg = <3>;
qcom,spm-cpu-mode = "rail-pc";
qcom,psci-cpu-mode = <0x4>;
- qcom,latency-us = <700>;
+ qcom,latency-us = <531>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+
+ qcom,pm-cpu@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,latency-us = <116>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <621>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <1061>;
qcom,ss-power = <400>;
qcom,energy-overhead = <428225>;
qcom,time-overhead = <1000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
new file mode 100644
index 0000000..b11c912
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&soc {
+ sound-tavil {
+ qcom,model = "sdm845-tavil-qrd-snd-card";
+
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+
+ qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+ };
+};
+
+&wcd934x_cdc {
+ wcd_pinctrl@5 {
+ us_euro_sw_wcd_active {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-high;
+ bias-high-impedance;
+ };
+ };
+
+ us_euro_sw_wcd_sleep {
+ mux {
+ pins = "gpio1";
+ };
+
+ config {
+ pins = "gpio1";
+ /delete-property/ output-low;
+ bias-high-impedance;
+ };
+ };
+ };
+
+ swr_master {
+ wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+
+ wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+ };
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
new file mode 100644
index 0000000..c8136de
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v1 QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,msm-id = <321 0x10000>;
+ qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
index 228b924..6cb7815 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index f14293b..c0afb74 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -13,7 +13,7 @@
#include "smb1355.dtsi"
#include <dt-bindings/gpio/gpio.h>
-/{
+&vendor {
bluetooth: bt_wcn3990 {
compatible = "qca,wcn3990";
qca,bt-vdd-io-supply = <&pm8998_s3>;
@@ -40,14 +40,6 @@
#include "fg-gen3-batterydata-itech-3000mah.dtsi"
#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
};
-
- aliases {
- serial0 = &qupv3_se9_2uart;
- spi0 = &qupv3_se8_spi;
- i2c0 = &qupv3_se10_i2c;
- i2c1 = &qupv3_se3_i2c;
- hsuart0 = &qupv3_se6_4uart;
- };
};
&qupv3_se9_2uart {
@@ -92,6 +84,7 @@
&pmi8998_fg {
qcom,battery-data = <&qrd_batterydata>;
+ qcom,fg-bmd-en-delay-ms = <300>;
};
&smb1355_charger {
@@ -102,49 +95,6 @@
#cooling-cells = <2>;
};
-&soc {
- sound-tavil {
- qcom,wsa-max-devs = <1>;
- qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
- qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
-
- qcom,msm-mbhc-usbc-audio-supported = <1>;
-
- qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&wcd_usbc_analog_en2_active>;
- pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
- };
-};
-
-&wcd934x_cdc {
- wcd: wcd_pinctrl@5 {
- us_euro_sw_wcd_active: us_euro_sw_wcd_active {
- mux {
- pins = "gpio1";
- };
-
- config {
- pins = "gpio1";
- /delete-property/ output-high;
- bias-high-impedance;
- };
- };
-
- us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
- mux {
- pins = "gpio1";
- };
-
- config {
- pins = "gpio1";
- /delete-property/ output-low;
- bias-high-impedance;
- };
- };
- };
-};
-
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -221,10 +171,6 @@
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
- qcom,clk-rates = <400000 20000000 25000000
- 50000000 100000000 200000000>;
- qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
extcon = <&extcon_storage_cd>;
status = "ok";
@@ -240,23 +186,42 @@
qcom,led-strings-list = [01 02];
};
+&pmi8998_haptics {
+ qcom,vmax-mv = <1800>;
+ qcom,wave-play-rate-us = <4347>;
+ qcom,lra-auto-mode;
+ status = "okay";
+};
+
&mdss_mdp {
connectors = <&sde_rscc &sde_wb>;
};
-&dsi_sharp_4k_dsc_video {
+&dsi_nt35597_truly_dsc_cmd {
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
- qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
qcom,panel-mode-gpio = <&tlmm 52 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
qcom,platform-reset-gpio = <&tlmm 6 0>;
qcom,mdss-dsi-panel-orientation = "180";
};
-&dsi_sharp_4k_dsc_video_display {
+&dsi_nt35597_truly_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
qcom,dsi-display-active;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 7befe3b..8350d90 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -131,19 +131,19 @@
qcom,cpr-voltage-floor =
/* Speed bin 0 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 996000 996000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 996000 996000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 712000 712000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
- 568000 568000 568000 568000 584000
- 584000 584000 632000 632000 632000
- 632000 672000 712000 712000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>;
qcom,cpr-floor-to-ceiling-max-range =
<32000 32000 32000 32000 32000
@@ -192,10 +192,62 @@
1950 2632>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- <100000 100000 100000 100000>;
+ /* Speed bin 0 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ /* Speed bin 1 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ /* Speed bin 2 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
- <100000 100000 100000 100000>;
+ /* Speed bin 0 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ /* Speed bin 1 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ /* Speed bin 2 */
+ <100000 100000 100000 100000>,
+ < 0 0 0 100000>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>,
+ < 0 0 0 0>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
@@ -265,16 +317,16 @@
qcom,cpr-voltage-floor =
/* Speed bin 0 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 996000
+ 568000 568000 568000 568000 996000
996000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 712000
- 712000>,
+ 568000 568000 568000 568000 568000
+ 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
- 584000 584000 632000 672000 712000
- 712000 712000 712000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000>;
qcom,cpr-floor-to-ceiling-max-range =
/* Speed bin 0 */
@@ -323,10 +375,62 @@
2501 2095>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- <100000 100000 100000 100000>;
+ /* Speed bin 0 */
+ <100000 100000 100000 100000>,
+ < 0 24000 4000 100000>,
+ < 0 24000 4000 0>,
+ < 0 24000 4000 0>,
+ < 0 24000 4000 0>,
+ < 0 24000 4000 0>,
+ < 0 24000 4000 0>,
+ < 0 24000 4000 0>,
+ /* Speed bin 1 */
+ <100000 100000 100000 100000>,
+ < 0 24000 4000 100000>,
+ < 0 24000 4000 20000>,
+ < 0 24000 4000 20000>,
+ < 0 24000 4000 20000>,
+ < 0 24000 4000 20000>,
+ < 0 24000 4000 20000>,
+ < 0 24000 4000 20000>,
+ /* Speed bin 2 */
+ <100000 100000 100000 100000>,
+ < 0 24000 4000 100000>,
+ < 0 24000 4000 40000>,
+ < 0 24000 4000 40000>,
+ < 0 24000 4000 40000>,
+ < 0 24000 4000 40000>,
+ < 0 24000 4000 40000>,
+ < 0 24000 4000 40000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
- <100000 100000 100000 100000>;
+ /* Speed bin 0 */
+ <100000 100000 100000 100000>,
+ < 0 29000 6000 100000>,
+ < 0 29000 6000 0>,
+ < 0 29000 6000 0>,
+ < 0 29000 6000 0>,
+ < 0 29000 6000 0>,
+ < 0 29000 6000 0>,
+ < 0 29000 6000 0>,
+ /* Speed bin 1 */
+ <100000 100000 100000 100000>,
+ < 0 29000 6000 100000>,
+ < 0 29000 6000 20000>,
+ < 0 29000 6000 20000>,
+ < 0 29000 6000 20000>,
+ < 0 29000 6000 20000>,
+ < 0 29000 6000 20000>,
+ < 0 29000 6000 20000>,
+ /* Speed bin 2 */
+ <100000 100000 100000 100000>,
+ < 0 29000 6000 100000>,
+ < 0 29000 6000 40000>,
+ < 0 29000 6000 40000>,
+ < 0 29000 6000 40000>,
+ < 0 29000 6000 40000>,
+ < 0 29000 6000 40000>,
+ < 0 29000 6000 40000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
@@ -349,7 +453,7 @@
};
apc1_cpr: cprh-ctrl@17db0000 {
- compatible = "qcom,cprh-sdm845-kbss-regulator";
+ compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
reg = <0x17db0000 0x4000>,
<0x00784000 0x1000>,
<0x17830000 0x1000>;
@@ -453,22 +557,22 @@
/* Speed bin 0 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000>,
/* Speed bin 1 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000 772000 772000>,
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>,
/* Speed bin 2 */
<568000 568000 568000 568000 568000
568000 568000 568000 568000 568000
- 584000 584000 632000 632000 632000
- 632000 632000 672000 712000 712000
- 772000 772000 772000 772000
- 772000>;
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000
+ 568000>;
qcom,cpr-floor-to-ceiling-max-range =
/* Speed bin 0 */
@@ -622,6 +726,13 @@
regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
};
+
+ ebi_cdev: regulator-cdev {
+ compatible = "qcom,rpmh-reg-cdev";
+ mboxes = <&qmp_aop 0>;
+ qcom,reg-resource-name = "ebi";
+ #cooling-cells = <2>;
+ };
};
rpmh-regulator-smpa2 {
@@ -718,6 +829,13 @@
regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
qcom,min-dropout-voltage-level = <(-1)>;
};
+
+ cx_cdev: regulator-cdev {
+ compatible = "qcom,rpmh-reg-cdev";
+ mboxes = <&qmp_aop 0>;
+ qcom,reg-resource-name = "cx";
+ #cooling-cells = <2>;
+ };
};
rpmh-regulator-ldoa1 {
@@ -736,6 +854,25 @@
qcom,init-voltage = <880000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
+
+ pm8998_l1_ao: regulator-l1-ao {
+ regulator-name = "pm8998_l1_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,init-voltage = <880000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+
+ regulator-l1-so {
+ regulator-name = "pm8998_l1_so";
+ qcom,set = <RPMH_REGULATOR_SET_SLEEP>;
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ qcom,init-voltage = <880000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ qcom,init-enable = <0>;
+ };
};
rpmh-regulator-ldoa2 {
@@ -786,6 +923,13 @@
regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
};
+
+ mx_cdev: regulator-cdev {
+ compatible = "qcom,rpmh-reg-cdev";
+ mboxes = <&qmp_aop 0>;
+ qcom,reg-resource-name = "mx";
+ #cooling-cells = <2>;
+ };
};
rpmh-regulator-ldoa5 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 726a63f..21aedbf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -399,19 +399,30 @@
<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
clock-names = "core_aux_clk", "core_usb_ref_clk_src",
"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
"core_usb_pipe_clk", "ctrl_link_clk",
- "ctrl_link_iface_clk", "ctrl_crypto_clk",
- "ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+ "ctrl_link_iface_clk", "ctrl_pixel_clk",
+ "crypto_clk", "pixel_clk_rcg", "pixel_parent";
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
- qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+ qcom,aux-cfg0-settings = [20 00];
+ qcom,aux-cfg1-settings = [24 13 23 1d];
+ qcom,aux-cfg2-settings = [28 24];
+ qcom,aux-cfg3-settings = [2c 00];
+ qcom,aux-cfg4-settings = [30 0a];
+ qcom,aux-cfg5-settings = [34 26];
+ qcom,aux-cfg6-settings = [38 0a];
+ qcom,aux-cfg7-settings = [3c 03];
+ qcom,aux-cfg8-settings = [40 bb];
+ qcom,aux-cfg9-settings = [44 03];
+
+ qcom,max-pclk-frequency-khz = <576000>;
qcom,core-supply-entries {
#address-cells = <1>;
@@ -471,96 +482,168 @@
};
&dsi_dual_nt35597_truly_video {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
- qcom,display-topology = <2 0 2>,
- <1 0 2>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_dual_nt35597_truly_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
- qcom,display-topology = <2 0 2>,
- <1 0 2>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_nt35597_truly_dsc_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
- qcom,display-topology = <1 1 1>,
- <2 2 1>, /* dsc merge */
- <2 1 1>; /* 3d mux */
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
+ 05 03 03 04 00];
+ qcom,display-topology = <1 1 1>,
+ <2 2 1>, /* dsc merge */
+ <2 1 1>; /* 3d mux */
+ qcom,default-topology-index = <1>;
+ };
+ };
};
&dsi_nt35597_truly_dsc_video {
- qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
- qcom,display-topology = <1 1 1>,
- <2 2 1>, /* dsc merge */
- <2 1 1>; /* 3d mux */
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
+ 04 03 03 04 00];
+ qcom,display-topology = <1 1 1>,
+ <2 2 1>, /* dsc merge */
+ <2 1 1>; /* 3d mux */
+ qcom,default-topology-index = <1>;
+ };
+ };
};
&dsi_sharp_4k_dsc_video {
- qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x27>;
- qcom,display-topology = <2 2 2>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06
+ 06 04 03 04 00];
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_sharp_4k_dsc_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06 06 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x27>;
- qcom,display-topology = <2 2 2>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06
+ 06 04 03 04 00];
+ qcom,display-topology = <2 2 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_dual_sharp_1080_120hz_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09 09 06 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0f>;
qcom,mdss-dsi-t-clk-pre = <0x36>;
- qcom,display-topology = <2 0 2>,
- <1 0 2>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09
+ 09 06 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_sharp_1080_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 07 04 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0c>;
qcom,mdss-dsi-t-clk-pre = <0x29>;
- qcom,display-topology = <1 0 1>;
- qcom,default-topology-index = <0>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07
+ 07 04 03 04 00];
+ qcom,display-topology = <1 0 1>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_sim_vid {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <1 0 1>,
+ <2 0 1>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_dual_sim_vid {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_sim_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <1 0 1>,
+ <2 0 1>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
&dsi_dual_sim_cmd {
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+ 07 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 2ae3832..121565e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -14,11 +14,9 @@
mdss_mdp: qcom,mdss_mdp@ae00000 {
compatible = "qcom,sde-kms";
reg = <0x0ae00000 0x81d40>,
- <0x0aeb0000 0x2008>,
- <0x0aeac000 0xf0>;
+ <0x0aeb0000 0x2008>;
reg-names = "mdp_phys",
- "vbif_phys",
- "regdma_phys";
+ "vbif_phys";
clocks =
<&clock_gcc GCC_DISP_AHB_CLK>,
@@ -30,7 +28,7 @@
clock-names = "gcc_iface", "gcc_bus", "iface_clk",
"bus_clk", "core_clk", "vsync_clk";
clock-rate = <0 0 0 0 300000000 19200000 0>;
- clock-max-rate = <0 0 0 0 430000000 19200000 0>;
+ clock-max-rate = <0 0 0 0 412500000 19200000 0>;
sde-vdd-supply = <&mdss_core_gdsc>;
@@ -57,14 +55,18 @@
0x48000 0x49000 0x4a000>;
qcom,sde-mixer-size = <0x320>;
+ qcom,sde-dspp-top-off = <0x1300>;
+ qcom,sde-dspp-top-size = <0xc>;
+
qcom,sde-dspp-off = <0x55000 0x57000 0x59000 0x5b000>;
qcom,sde-dspp-size = <0x17e0>;
qcom,sde-wb-off = <0x66000>;
qcom,sde-wb-size = <0x2c8>;
-
qcom,sde-wb-xin-id = <6>;
qcom,sde-wb-id = <2>;
+ qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+
qcom,sde-intf-off = <0x6b000 0x6b800
0x6c000 0x6c800>;
qcom,sde-intf-size = <0x280>;
@@ -82,7 +84,7 @@
qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
qcom,sde-dsc-size = <0x140>;
- qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>;
+ qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0 0x0>;
qcom,sde-dither-version = <0x00010000>;
qcom,sde-dither-size = <0x20>;
@@ -177,9 +179,6 @@
/* offsets are relative to "mdp_phys + qcom,sde-off */
qcom,sde-inline-rot-clk-ctrl = <0x2bc 0x8>, <0x2bc 0xc>;
- qcom,sde-reg-dma-off = <0>;
- qcom,sde-reg-dma-version = <0x1>;
- qcom,sde-reg-dma-trigger-off = <0x119c>;
qcom,sde-sspp-vig-blocks {
qcom,sde-vig-csc-off = <0x1a00>;
@@ -188,8 +187,10 @@
};
qcom,sde-dspp-blocks {
+ qcom,sde-dspp-igc = <0x0 0x00030001>;
qcom,sde-dspp-vlut = <0xa00 0x00010008>;
qcom,sde-dspp-gamut = <0x1000 0x00040000>;
+ qcom,sde-dspp-pcc = <0x1700 0x00040000>;
qcom,sde-dspp-gc = <0x17c0 0x00010008>;
};
@@ -207,15 +208,41 @@
};
};
+ smmu_sde_sec: qcom,smmu_sde_sec_cb {
+ compatible = "qcom,smmu_sde_sec";
+ iommus = <&apps_smmu 0x881 0x8>,
+ <&apps_smmu 0xc81 0x8>;
+ };
+
/* data and reg bus scale settings */
qcom,sde-data-bus {
- qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,name = "mdss_sde_mnoc";
qcom,msm-bus,num-cases = <3>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <22 512 0 0>, <23 512 0 0>,
- <22 512 0 6400000>, <23 512 0 6400000>,
- <22 512 0 6400000>, <23 512 0 6400000>;
+ <22 773 0 0>, <23 773 0 0>,
+ <22 773 0 6400000>, <23 773 0 6400000>,
+ <22 773 0 6400000>, <23 773 0 6400000>;
+ };
+
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "mdss_sde_llcc";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <132 770 0 0>,
+ <132 770 0 6400000>,
+ <132 770 0 6400000>;
+ };
+
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "mdss_sde_ebi";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <129 512 0 0>,
+ <129 512 0 6400000>,
+ <129 512 0 6400000>;
};
qcom,sde-reg-bus {
@@ -252,14 +279,36 @@
/* data and reg bus scale settings */
qcom,sde-data-bus {
- qcom,msm-bus,name = "disp_rsc";
+ qcom,msm-bus,name = "disp_rsc_mnoc";
qcom,msm-bus,active-only;
qcom,msm-bus,num-cases = <3>;
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
- <20003 20512 0 0>, <20004 20512 0 0>,
- <20003 20512 0 6400000>, <20004 20512 0 6400000>,
- <20003 20512 0 6400000>, <20004 20512 0 6400000>;
+ <20003 20515 0 0>, <20004 20515 0 0>,
+ <20003 20515 0 6400000>, <20004 20515 0 6400000>,
+ <20003 20515 0 6400000>, <20004 20515 0 6400000>;
+ };
+
+ qcom,sde-llcc-bus {
+ qcom,msm-bus,name = "disp_rsc_llcc";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20001 20513 0 0>,
+ <20001 20513 0 6400000>,
+ <20001 20513 0 6400000>;
+ };
+
+ qcom,sde-ebi-bus {
+ qcom,msm-bus,name = "disp_rsc_ebi";
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <20000 20512 0 0>,
+ <20000 20512 0 6400000>,
+ <20000 20512 0 6400000>;
};
};
@@ -301,6 +350,7 @@
/* Offline rotator QoS setting */
qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
+ qcom,mdss-rot-vbif-memtype = <3 3>;
qcom,mdss-rot-cdp-setting = <1 1>;
qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
qcom,mdss-rot-danger-lut = <0x0 0x0>;
@@ -321,6 +371,17 @@
cache-slice-names = "rotator";
cache-slices = <&llcc 4>;
+ /* reg bus scale settings */
+ rot_reg: qcom,rot-reg-bus {
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>;
+ };
+
smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
compatible = "qcom,smmu_sde_rot_unsec";
iommus = <&apps_smmu 0x1090 0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 53cb27e..40c677f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -21,6 +21,8 @@
reg = <0x0a600000 0xf8c00>,
<0x088ee000 0x400>;
reg-names = "core_base", "ahb2phy_base";
+ iommus = <&apps_smmu 0x740 0x0>;
+ qcom,smmu-s1-bypass;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -61,10 +63,10 @@
<MSM_BUS_MASTER_USB3 MSM_BUS_SLAVE_IPA_CFG 0 0>,
<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 0>,
<MSM_BUS_MASTER_USB3
- MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
+ MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
<MSM_BUS_MASTER_USB3
MSM_BUS_SLAVE_IPA_CFG 0 2400>,
- <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 80000>;
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 40000>;
dwc3@a600000 {
compatible = "snps,dwc3";
@@ -113,13 +115,24 @@
/* Primary USB port related QUSB2 PHY */
qusb_phy0: qusb@88e2000 {
compatible = "qcom,qusb2phy-v2";
- reg = <0x088e2000 0x400>;
- reg-names = "qusb_phy_base";
+ reg = <0x088e2000 0x400>,
+ <0x007801e8 0x4>;
+ reg-names = "qusb_phy_base", "efuse_addr";
+ qcom,efuse-bit-pos = <25>;
+ qcom,efuse-num-bits = <3>;
vdd-supply = <&pm8998_l1>;
vdda18-supply = <&pm8998_l12>;
vdda33-supply = <&pm8998_l24>;
qcom,vdd-voltage-level = <0 880000 880000>;
+ qcom,qusb-phy-reg-offset =
+ <0x240 /* QUSB2PHY_PORT_TUNE1 */
+ 0x1a0 /* QUSB2PHY_PLL_COMMON_STATUS_ONE */
+ 0x210 /* QUSB2PHY_PWR_CTRL1 */
+ 0x230 /* QUSB2PHY_INTR_CTRL */
+ 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
+ 0x254>; /* QUSB2PHY_TEST1 */
+
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
<0x23 0x210 /* PWR_CTRL1 */
@@ -133,7 +146,7 @@
0x21 0x214 /* PWR_CTRL2 */
0x00 0x220 /* IMP_CTRL1 */
0x58 0x224 /* IMP_CTRL2 */
- 0x32 0x240 /* TUNE1 */
+ 0x30 0x240 /* TUNE1 */
0x29 0x244 /* TUNE2 */
0xca 0x248 /* TUNE3 */
0x04 0x24c /* TUNE4 */
@@ -141,8 +154,6 @@
0x00 0x23c /* CHG_CTRL2 */
0x22 0x210>; /* PWR_CTRL1 */
- qcom,phy-auto-resume-offset = <0x254>;
-
phy_type= "utmi";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
@@ -220,12 +231,12 @@
0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
0x1248 0x09 0x00 /* TXA_RES_CODE_LANE_OFFSET_RX */
- 0x1244 0x0d 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */
+ 0x1244 0x06 0x00 /* TXA_RES_CODE_LANE_OFFSET_TX */
0x1660 0x10 0x00 /* TXB_HIGHZ_DRVR_EN */
0x16a4 0x12 0x00 /* TXB_RCV_DETECT_LVL_2 */
0x168c 0x16 0x00 /* TXB_LANE_MODE_1 */
0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */
- 0x1644 0x0d 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
+ 0x1644 0x06 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
0x1cc8 0x83 0x00 /* PCS_FLL_CNTRL2 */
0x1ccc 0x09 0x00 /* PCS_FLL_CNT_VAL_L */
0x1cd0 0xa2 0x00 /* PCS_FLL_CNT_VAL_H_TOL */
@@ -322,6 +333,8 @@
reg = <0x0a800000 0xf8c00>,
<0x088ee000 0x400>;
reg-names = "core_base", "ahb2phy_base";
+ iommus = <&apps_smmu 0x760 0x0>;
+ qcom,smmu-s1-bypass;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -359,8 +372,8 @@
<MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_EBI_CH0 0 0>,
<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 0>,
<MSM_BUS_MASTER_USB3_1
- MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
- <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 80000>;
+ MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>;
dwc3@a600000 {
compatible = "snps,dwc3";
@@ -385,6 +398,14 @@
vdda18-supply = <&pm8998_l12>;
vdda33-supply = <&pm8998_l24>;
qcom,vdd-voltage-level = <0 880000 880000>;
+ qcom,qusb-phy-reg-offset =
+ <0x240 /* QUSB2PHY_PORT_TUNE1 */
+ 0x1a0 /* QUSB2PHY_PLL_COMMON_STATUS_ONE */
+ 0x210 /* QUSB2PHY_PWR_CTRL1 */
+ 0x230 /* QUSB2PHY_INTR_CTRL */
+ 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
+ 0x254>; /* QUSB2PHY_TEST1 */
+
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
<0x23 0x210 /* PWR_CTRL1 */
@@ -398,7 +419,7 @@
0x21 0x214 /* PWR_CTRL2 */
0x00 0x220 /* IMP_CTRL1 */
0x58 0x224 /* IMP_CTRL2 */
- 0x32 0x240 /* TUNE1 */
+ 0x20 0x240 /* TUNE1 */
0x29 0x244 /* TUNE2 */
0xca 0x248 /* TUNE3 */
0x04 0x24c /* TUNE4 */
@@ -485,8 +506,8 @@
0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */
- 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
- 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
+ 0x248 0x06 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
+ 0x244 0x06 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts
new file mode 100644
index 0000000..3cd7678
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 CDP";
+ compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
index 8ab0593..66ee4c7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-cdp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts
new file mode 100644
index 0000000..e049357
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 MTP";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
index 57c3e71..cea38e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-mtp.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts
new file mode 100644
index 0000000..a5a32ab
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd-overlay.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 QRD";
+ compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+ qcom,msm-id = <321 0x20000>;
+ qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
index 8a9a544..9a87617 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "sdm845-v2.dtsi"
+#include "sdm845-sde-display.dtsi"
#include "sdm845-qrd.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dts b/arch/arm64/boot/dts/qcom/sdm845-v2.dts
new file mode 100644
index 0000000..d36d0fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dts
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm845-v2.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM845 v2 SoC";
+ compatible = "qcom,sdm845";
+ qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index bf72741..761efea 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -17,22 +17,569 @@
qcom,msm-id = <321 0x20000>;
};
-&spmi_debug_bus {
- status = "ok";
+&sdhc_2 {
+ /delete-property/ qcom,sdr104-wa;
+};
+
+/delete-node/ &apc0_cpr;
+/delete-node/ &apc1_cpr;
+
+&soc {
+ /* CPR controller regulators */
+ apc0_cpr: cprh-ctrl@17dc0000 {
+ compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+ reg = <0x17dc0000 0x4000>,
+ <0x00784000 0x1000>,
+ <0x17840000 0x1000>;
+ reg-names = "cpr_ctrl", "fuse_base", "saw";
+ clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+ clock-names = "core_clk";
+ qcom,cpr-ctrl-name = "apc0";
+ qcom,cpr-controller-id = <0>;
+
+ qcom,cpr-sensor-time = <1000>;
+ qcom,cpr-loop-time = <5000000>;
+ qcom,cpr-idle-cycles = <15>;
+ qcom,cpr-up-down-delay-time = <3000>;
+ qcom,cpr-step-quot-init-min = <11>;
+ qcom,cpr-step-quot-init-max = <12>;
+ qcom,cpr-count-mode = <0>; /* All at once */
+ qcom,cpr-count-repeat = <20>;
+ qcom,cpr-down-error-step-limit = <1>;
+ qcom,cpr-up-error-step-limit = <1>;
+ qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-voltage-settling-time = <1760>;
+ qcom,cpr-reset-step-quot-loop-en;
+
+ qcom,voltage-step = <4000>;
+ qcom,voltage-base = <352000>;
+ qcom,cpr-saw-use-unit-mV;
+
+ qcom,saw-avs-ctrl = <0x101C031>;
+ qcom,saw-avs-limit = <0x3B803B8>;
+
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
+
+ qcom,cpr-panic-reg-addr-list =
+ <0x17dc3a84 0x17dc3a88 0x17840c18>;
+ qcom,cpr-panic-reg-name-list =
+ "APSS_SILVER_CPRH_STATUS_0",
+ "APSS_SILVER_CPRH_STATUS_1",
+ "SILVER_SAW4_PMIC_STS";
+
+ qcom,cpr-aging-ref-voltage = <952000>;
+ vdd-supply = <&pm8998_s13>;
+
+ thread@0 {
+ qcom,cpr-thread-id = <0>;
+ qcom,cpr-consecutive-up = <0>;
+ qcom,cpr-consecutive-down = <0>;
+ qcom,cpr-up-threshold = <2>;
+ qcom,cpr-down-threshold = <2>;
+
+ apc0_pwrcl_vreg: regulator {
+ regulator-name = "apc0_pwrcl_corner";
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <18>;
+
+ qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <18 18>;
+ qcom,cpr-corners = <18>;
+
+ qcom,cpr-corner-fmax-map = <6 12 15 18>;
+
+ qcom,cpr-voltage-ceiling =
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 884000 952000 952000>;
+
+ qcom,cpr-voltage-floor =
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000>;
+
+ qcom,cpr-floor-to-ceiling-max-range =
+ <32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 40000 40000>;
+
+ qcom,corner-frequencies =
+ <300000000 403200000 480000000
+ 576000000 652800000 748800000
+ 825600000 902400000 979200000
+ 1056000000 1132800000 1228800000
+ 1324800000 1420800000 1516800000
+ 1612800000 1689600000 1766400000>;
+
+ qcom,cpr-ro-scaling-factor =
+ <2594 2795 2576 2761 2469 2673 2198
+ 2553 3188 3255 3191 2962 3055 2984
+ 2043 2947>,
+ <2594 2795 2576 2761 2469 2673 2198
+ 2553 3188 3255 3191 2962 3055 2984
+ 2043 2947>,
+ <2259 2389 2387 2531 2294 2464 2218
+ 2476 2525 2855 2817 2836 2740 2490
+ 1950 2632>,
+ <2259 2389 2387 2531 2294 2464 2218
+ 2476 2525 2855 2817 2836 2740 2490
+ 1950 2632>;
+
+ qcom,cpr-open-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000>;
+
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000>;
+
+ qcom,allow-voltage-interpolation;
+ qcom,allow-quotient-interpolation;
+ qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <18>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
+ qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
+ };
+ };
+
+ thread@1 {
+ qcom,cpr-thread-id = <1>;
+ qcom,cpr-consecutive-up = <0>;
+ qcom,cpr-consecutive-down = <0>;
+ qcom,cpr-up-threshold = <2>;
+ qcom,cpr-down-threshold = <2>;
+
+ apc0_l3_vreg: regulator {
+ regulator-name = "apc0_l3_corner";
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <14>;
+
+ qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <14 14>;
+ qcom,cpr-corners = <14>;
+
+ qcom,cpr-corner-fmax-map = <4 8 11 14>;
+
+ qcom,cpr-voltage-ceiling =
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 884000 884000 952000>;
+
+ qcom,cpr-voltage-floor =
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000>;
+
+ qcom,cpr-floor-to-ceiling-max-range =
+ <32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 40000>;
+
+ qcom,corner-frequencies =
+ <300000000 403200000 480000000
+ 576000000 652800000 748800000
+ 844800000 940800000 1036800000
+ 1132800000 1209600000 1305600000
+ 1401600000 1478400000>;
+
+ qcom,cpr-ro-scaling-factor =
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2439 2577 2552 2667 2461 2577 2394
+ 2536 2132 2307 2191 2903 2838 2912
+ 2501 2095>,
+ <2439 2577 2552 2667 2461 2577 2394
+ 2536 2132 2307 2191 2903 2838 2912
+ 2501 2095>;
+
+ qcom,cpr-open-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000>;
+
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000>;
+
+ qcom,allow-voltage-interpolation;
+ qcom,allow-quotient-interpolation;
+ qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <14>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
+ qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
+ };
+ };
+ };
+
+ apc1_cpr: cprh-ctrl@17db0000 {
+ compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+ reg = <0x17db0000 0x4000>,
+ <0x00784000 0x1000>,
+ <0x17830000 0x1000>;
+ reg-names = "cpr_ctrl", "fuse_base", "saw";
+ clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+ clock-names = "core_clk";
+ qcom,cpr-ctrl-name = "apc1";
+ qcom,cpr-controller-id = <1>;
+
+ qcom,cpr-sensor-time = <1000>;
+ qcom,cpr-loop-time = <5000000>;
+ qcom,cpr-idle-cycles = <15>;
+ qcom,cpr-up-down-delay-time = <3000>;
+ qcom,cpr-step-quot-init-min = <9>;
+ qcom,cpr-step-quot-init-max = <14>;
+ qcom,cpr-count-mode = <0>; /* All at once */
+ qcom,cpr-count-repeat = <20>;
+ qcom,cpr-down-error-step-limit = <1>;
+ qcom,cpr-up-error-step-limit = <1>;
+ qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-voltage-settling-time = <1760>;
+ qcom,cpr-reset-step-quot-loop-en;
+
+ qcom,apm-threshold-voltage = <800000>;
+ qcom,apm-crossover-voltage = <880000>;
+ qcom,mem-acc-threshold-voltage = <852000>;
+ qcom,mem-acc-crossover-voltage = <852000>;
+
+ qcom,voltage-step = <4000>;
+ qcom,voltage-base = <352000>;
+ qcom,cpr-saw-use-unit-mV;
+
+ qcom,saw-avs-ctrl = <0x101C031>;
+ qcom,saw-avs-limit = <0x4700470>;
+
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
+
+ qcom,cpr-panic-reg-addr-list =
+ <0x17db3a84 0x17830c18>;
+ qcom,cpr-panic-reg-name-list =
+ "APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+ qcom,cpr-aging-ref-voltage = <1136000>;
+ vdd-supply = <&pm8998_s12>;
+
+ thread@0 {
+ qcom,cpr-thread-id = <0>;
+ qcom,cpr-consecutive-up = <0>;
+ qcom,cpr-consecutive-down = <0>;
+ qcom,cpr-up-threshold = <2>;
+ qcom,cpr-down-threshold = <2>;
+
+ apc1_perfcl_vreg: regulator {
+ regulator-name = "apc1_perfcl_corner";
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <33>;
+
+ qcom,cpr-fuse-corners = <5>;
+ qcom,cpr-fuse-combos = <16>;
+ qcom,cpr-speed-bins = <2>;
+ qcom,cpr-speed-bin-corners = <28 31>;
+ qcom,cpr-corners =
+ /* Speed bin 0 */
+ <28 28 28 28 28 28 28 28>,
+ /* Speed bin 1 */
+ <31 31 31 31 31 31 31 31>;
+
+ qcom,cpr-corner-fmax-map =
+ /* Speed bin 0 */
+ <7 14 22 27 28>,
+ /* Speed bin 1 */
+ <7 14 22 27 31>;
+
+ qcom,cpr-voltage-ceiling =
+ /* Speed bin 0 */
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 884000 884000
+ 884000 884000 1104000 1104000 1104000
+ 1104000 1136000 1136000>,
+ /* Speed bin 1 */
+ <828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 828000 828000
+ 828000 828000 828000 884000 884000
+ 884000 884000 1104000 1104000 1104000
+ 1104000 1136000 1136000 1136000 1136000
+ 1136000>;
+
+ qcom,cpr-voltage-floor =
+ /* Speed bin 0 */
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000>,
+ /* Speed bin 1 */
+ <568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000 568000 568000 568000 568000
+ 568000>;
+
+ qcom,cpr-floor-to-ceiling-max-range =
+ /* Speed bin 0 */
+ <32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000>,
+ /* Speed bin 1 */
+ <32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 32000 32000 32000
+ 32000 32000 40000 40000 40000
+ 40000>;
+
+ qcom,corner-frequencies =
+ /* Speed bin 0 */
+ <300000000 403200000 480000000
+ 576000000 652800000 748800000
+ 825600000 902400000 979200000
+ 1056000000 1132800000 1209600000
+ 1286400000 1363200000 1459200000
+ 1536000000 1612800000 1689600000
+ 1766400000 1843200000 1920000000
+ 1996800000 2092800000 2169600000
+ 2246400000 2323200000 2400000000
+ 2400000000>,
+ /* Speed bin 1 */
+ <300000000 403200000 480000000
+ 576000000 652800000 748800000
+ 825600000 902400000 979200000
+ 1056000000 1132800000 1209600000
+ 1286400000 1363200000 1459200000
+ 1536000000 1612800000 1689600000
+ 1766400000 1843200000 1920000000
+ 1996800000 2092800000 2169600000
+ 2246400000 2323200000 2400000000
+ 2476800000 2553600000 2630400000
+ 2707200000>;
+
+ qcom,cpr-ro-scaling-factor =
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2857 3056 2828 2952 2699 2796 2447
+ 2631 2630 2579 2244 3343 3287 3137
+ 3164 2656>,
+ <2086 2208 2273 2408 2203 2327 2213
+ 2340 1755 2039 2049 2474 2437 2618
+ 2003 1675>,
+ <2086 2208 2273 2408 2203 2327 2213
+ 2340 1755 2039 2049 2474 2437 2618
+ 2003 1675>,
+ <2086 2208 2273 2408 2203 2327 2213
+ 2340 1755 2039 2049 2474 2437 2618
+ 2003 1675>;
+
+ qcom,cpr-open-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000 100000>;
+
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <100000 100000 100000 100000 100000>;
+
+ qcom,allow-voltage-interpolation;
+ qcom,allow-quotient-interpolation;
+ qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <27 31>;
+ qcom,cpr-aging-ro-scaling-factor = <1700>;
+ qcom,allow-aging-voltage-adjustment =
+ /* Speed bin 0 */
+ <0 1 1 1 1 1 1 1>,
+ /* Speed bin 1 */
+ <0 1 1 1 1 1 1 1>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
+ };
+ };
+ };
+};
+
+&clock_cpucc {
+ compatible = "qcom,clk-cpu-osm-v2";
+
+ vdd-l3-supply = <&apc0_l3_vreg>;
+ vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+ vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+ qcom,l3-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 403200000 0x500c0115 0x00002020 0x1 2 >,
+ < 480000000 0x50140219 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 748800000 0x401c0527 0x00002020 0x1 6 >,
+ < 844800000 0x4024062c 0x00002323 0x2 7 >,
+ < 940800000 0x40240731 0x00002727 0x2 8 >,
+ < 1036800000 0x40240836 0x00002b2b 0x2 9 >,
+ < 1132800000 0x402c093b 0x00002f2f 0x2 10 >,
+ < 1209600000 0x402c0a3f 0x00003232 0x2 11 >,
+ < 1305600000 0x40340b44 0x00003636 0x2 12 >,
+ < 1401600000 0x40340c49 0x00003a3a 0x2 13 >,
+ < 1478400000 0x403c0d4d 0x00003e3e 0x2 14 >;
+
+ qcom,pwrcl-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 403200000 0x500c0115 0x00002020 0x1 2 >,
+ < 480000000 0x50140219 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 748800000 0x401c0527 0x00002020 0x1 6 >,
+ < 825600000 0x401c062b 0x00002222 0x1 7 >,
+ < 902400000 0x4024072f 0x00002626 0x1 8 >,
+ < 979200000 0x40240833 0x00002929 0x1 9 >,
+ < 1056000000 0x402c0937 0x00002c2c 0x2 10 >,
+ < 1132800000 0x402c0a3b 0x00002f2f 0x2 11 >,
+ < 1228800000 0x402c0b40 0x00003333 0x2 12 >,
+ < 1324800000 0x40340c45 0x00003737 0x2 13 >,
+ < 1420800000 0x40340d4a 0x00003b3b 0x2 14 >,
+ < 1516800000 0x403c0e4f 0x00003f3f 0x2 15 >,
+ < 1612800000 0x403c0f54 0x00004343 0x2 16 >,
+ < 1689600000 0x40441058 0x00004646 0x2 17 >,
+ < 1766400000 0x4044115c 0x00004a4a 0x2 18 >;
+
+ qcom,perfcl-speedbin0-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 403200000 0x500c0115 0x00002020 0x1 2 >,
+ < 480000000 0x50140219 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 748800000 0x401c0527 0x00002020 0x1 6 >,
+ < 825600000 0x401c062b 0x00002222 0x1 7 >,
+ < 902400000 0x4024072f 0x00002626 0x1 8 >,
+ < 979200000 0x40240833 0x00002929 0x1 9 >,
+ < 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+ < 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+ < 1209600000 0x402c0b3f 0x00003232 0x2 12 >,
+ < 1286400000 0x40340c43 0x00003636 0x2 13 >,
+ < 1363200000 0x40340d47 0x00003939 0x2 14 >,
+ < 1459200000 0x403c0e4c 0x00003d3d 0x2 15 >,
+ < 1536000000 0x403c0f50 0x00004040 0x2 16 >,
+ < 1612800000 0x403c1054 0x00004343 0x2 17 >,
+ < 1689600000 0x40441158 0x00004646 0x2 18 >,
+ < 1766400000 0x4044125c 0x00004a4a 0x2 19 >,
+ < 1843200000 0x40441360 0x00004d4d 0x2 20 >,
+ < 1920000000 0x404c1464 0x00005050 0x2 21 >,
+ < 1996800000 0x404c1568 0x00005353 0x2 22 >,
+ < 2092800000 0x4054166d 0x00005757 0x2 23 >,
+ < 2169600000 0x40541771 0x00005a5a 0x2 24 >,
+ < 2246400000 0x40541875 0x00005e5e 0x2 25 >,
+ < 2323200000 0x40541979 0x00006161 0x2 26 >,
+ < 2400000000 0x40541a7d 0x00006464 0x2 27 >;
+
+ qcom,perfcl-speedbin1-v0 =
+ < 300000000 0x000c000f 0x00002020 0x1 1 >,
+ < 403200000 0x500c0115 0x00002020 0x1 2 >,
+ < 480000000 0x50140219 0x00002020 0x1 3 >,
+ < 576000000 0x5014031e 0x00002020 0x1 4 >,
+ < 652800000 0x401c0422 0x00002020 0x1 5 >,
+ < 748800000 0x401c0527 0x00002020 0x1 6 >,
+ < 825600000 0x401c062b 0x00002222 0x1 7 >,
+ < 902400000 0x4024072f 0x00002626 0x1 8 >,
+ < 979200000 0x40240833 0x00002929 0x1 9 >,
+ < 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+ < 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+ < 1209600000 0x402c0b3f 0x00003232 0x2 12 >,
+ < 1286400000 0x40340c43 0x00003636 0x2 13 >,
+ < 1363200000 0x40340d47 0x00003939 0x2 14 >,
+ < 1459200000 0x403c0e4c 0x00003d3d 0x2 15 >,
+ < 1536000000 0x403c0f50 0x00004040 0x2 16 >,
+ < 1612800000 0x403c1054 0x00004343 0x2 17 >,
+ < 1689600000 0x40441158 0x00004646 0x2 18 >,
+ < 1766400000 0x4044125c 0x00004a4a 0x2 19 >,
+ < 1843200000 0x40441360 0x00004d4d 0x2 20 >,
+ < 1920000000 0x404c1464 0x00005050 0x2 21 >,
+ < 1996800000 0x404c1568 0x00005353 0x2 22 >,
+ < 2092800000 0x4054166d 0x00005757 0x2 23 >,
+ < 2169600000 0x40541771 0x00005a5a 0x2 24 >,
+ < 2246400000 0x40541875 0x00005e5e 0x2 25 >,
+ < 2323200000 0x40541979 0x00006161 0x2 26 >,
+ < 2400000000 0x40541a7d 0x00006464 0x2 27 >,
+ < 2476800000 0x40541b81 0x00006767 0x2 28 >,
+ < 2553600000 0x40541c85 0x00006a6a 0x2 29 >,
+ < 2630400000 0x40541d89 0x00006e6e 0x2 30 >,
+ < 2707200000 0x40541e8d 0x00007171 0x2 31 >;
+
+ qcom,l3-memacc-level-vc-bin0 = <8 13>;
+
+ qcom,pwrcl-memacc-level-vc-bin0 = <12 16>;
+
+ qcom,perfcl-memacc-level-vc-bin0 = <14 22>;
+ qcom,perfcl-memacc-level-vc-bin1 = <14 22>;
};
&clock_gcc {
- compatible = "qcom,gcc-sdm845-v2";
+ compatible = "qcom,gcc-sdm845-v2", "syscon";
};
&clock_camcc {
- compatible = "qcom,cam_cc-sdm845-v2";
+ compatible = "qcom,cam_cc-sdm845-v2", "syscon";
};
&clock_dispcc {
- compatible = "qcom,dispcc-sdm845-v2";
+ compatible = "qcom,dispcc-sdm845-v2", "syscon";
+};
+
+&clock_gpucc {
+ compatible = "qcom,gpucc-sdm845-v2", "syscon";
+};
+
+&clock_gfx {
+ compatible = "qcom,gfxcc-sdm845-v2";
};
&clock_videocc {
- compatible = "qcom,video_cc-sdm845-v2";
+ compatible = "qcom,video_cc-sdm845-v2", "syscon";
+};
+
+&clock_aop {
+ compatible = "qcom,aop-qmp-clk-v2";
+};
+
+&msm_vidc {
+ qcom,allowed-clock-rates = <100000000 200000000 330000000
+ 404000000 444000000 533000000>;
+};
+
+&spss_utils {
+ qcom,spss-dev-firmware-name = "spss2d"; /* 8 chars max */
+ qcom,spss-test-firmware-name = "spss2t"; /* 8 chars max */
+ qcom,spss-prod-firmware-name = "spss2p"; /* 8 chars max */
+};
+
+&mdss_mdp {
+ clock-max-rate = <0 0 0 0 430000000 19200000 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 71c521a..42299cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -45,8 +45,8 @@
"bus_clk", "core0_clk", "core0_bus_clk",
"core1_clk", "core1_bus_clk";
qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>;
- qcom,allowed-clock-rates = <200000000 320000000 380000000
- 444000000 533000000>;
+ qcom,allowed-clock-rates = <100000000 200000000 320000000
+ 380000000 444000000 533000000>;
/* Buses */
bus_cnoc {
@@ -79,7 +79,7 @@
label = "venus-llcc";
qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
- qcom,bus-governor = "performance";
+ qcom,bus-governor = "msm-vidc-llcc";
qcom,bus-range-kbps = <17000 125700>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 1eaeb59..d4ab8f4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -29,7 +29,7 @@
/ {
model = "Qualcomm Technologies, Inc. SDM845";
compatible = "qcom,sdm845";
- qcom,msm-id = <321 0x0>;
+ qcom,msm-id = <321 0x10000>;
interrupt-parent = <&pdc>;
aliases {
@@ -476,6 +476,10 @@
method = "smc";
};
+ chosen {
+ bootargs = "rcupdate.rcu_expedited=1";
+ };
+
soc: soc { };
vendor: vendor {
@@ -511,12 +515,17 @@
#size-cells = <2>;
ranges;
- removed_region1: removed_region1@85700000 {
+ hyp_region: hyp_region@85700000 {
no-map;
- reg = <0 0x85700000 0 0x800000>;
+ reg = <0 0x85700000 0 0x600000>;
};
- removed_region2: removed_region2@85fc0000 {
+ xbl_region: xbl_region@85e00000 {
+ no-map;
+ reg = <0 0x85e00000 0 0x100000>;
+ };
+
+ removed_region: removed_region@85fc0000 {
no-map;
reg = <0 0x85fc0000 0 0x2f40000>;
};
@@ -527,64 +536,70 @@
reg = <0 0x8ab00000 0 0x500000>;
};
- pil_modem_mem: modem_region@8b000000 {
+ pil_ipa_fw_mem: pil_ipa_fw_region@8b000000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x8b000000 0 0x7300000>;
+ reg = <0 0x8b000000 0 0x10000>;
};
- pil_video_mem: pil_video_region@92300000 {
+ pil_ipa_gsi_mem: pil_ipa_gsi_region@8b010000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x92300000 0 0x500000>;
+ reg = <0 0x8b010000 0 0x5000>;
};
- pil_cdsp_mem: cdsp_regions@92800000 {
+ pil_gpu_mem: pil_gpu_region@8b015000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x92800000 0 0x800000>;
+ reg = <0 0x8b015000 0 0x1000>;
};
- pil_adsp_mem: pil_adsp_region@93000000 {
+ pil_adsp_mem: pil_adsp_region@8b100000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x93000000 0 0x1a00000>;
+ reg = <0 0x8b100000 0 0x1a00000>;
};
- pil_mba_mem: pil_mba_region@0x94a00000 {
- compatible = "removed-dma-pool";
- no-map;
- reg = <0 0x94a00000 0 0x200000>;
+ wlan_fw_region: wlan_fw_region@8cb00000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x8cb00000 0 0x100000>;
};
- pil_slpi_mem: pil_slpi_region@94c00000 {
+ pil_modem_mem: modem_region@8cc00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x94c00000 0 0x1400000>;
+ reg = <0 0x8cc00000 0 0x7600000>;
};
- pil_ipa_fw_mem: pil_ipa_fw_region@96000000 {
+ pil_video_mem: pil_video_region@94200000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x96000000 0 0x10000>;
+ reg = <0 0x94200000 0 0x500000>;
};
- pil_ipa_gsi_mem: pil_ipa_gsi_region@96010000 {
+ pil_cdsp_mem: cdsp_regions@94700000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x96010000 0 0x5000>;
+ reg = <0 0x94700000 0 0x800000>;
};
- pil_gpu_mem: pil_gpu_region@96015000 {
+ pil_mba_mem: pil_mba_region@0x94f00000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x96015000 0 0x1000>;
+ reg = <0 0x94f00000 0 0x200000>;
};
- pil_spss_mem: spss_region@96100000 {
+ pil_slpi_mem: pil_slpi_region@95100000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0 0x96100000 0 0x100000>;
+ reg = <0 0x95100000 0 0x1400000>;
+ };
+
+
+ pil_spss_mem: spss_region@96500000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x96500000 0 0x100000>;
};
adsp_mem: adsp_region {
@@ -598,12 +613,12 @@
qseecom_mem: qseecom_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
- reusable;
+ no-map;
alignment = <0 0x400000>;
size = <0 0x1400000>;
};
- sp_mem: sp_region { /* SPSS-HLOS ION shared mem */
+ secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
reusable;
@@ -639,8 +654,8 @@
#include "msm-gdsc-sdm845.dtsi"
#include "sdm845-sde-pll.dtsi"
+#include "msm-rdbg.dtsi"
#include "sdm845-sde.dtsi"
-#include "sdm845-sde-display.dtsi"
#include "sdm845-qupv3.dtsi"
&soc {
@@ -736,6 +751,12 @@
reg-names = "pshold-base", "tcsr-boot-misc-detect";
};
+ aop-msg-client {
+ compatible = "qcom,debugfs-qmp-client";
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "aop";
+ };
+
spmi_bus: qcom,spmi@c440000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0xc440000 0x1100>,
@@ -759,16 +780,18 @@
compatible = "qcom,spmi-pmic-arb-debug";
reg = <0x6b22000 0x60>, <0x7820A8 4>;
reg-names = "core", "fuse";
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "core_clk";
qcom,fuse-disable-bit = <12>;
#address-cells = <2>;
#size-cells = <0>;
- status = "disabled";
qcom,pm8998-debug@0 {
compatible = "qcom,spmi-pmic";
reg = <0x0 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8998-debug@1 {
@@ -776,6 +799,7 @@
reg = <0x1 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pmi8998-debug@2 {
@@ -783,6 +807,7 @@
reg = <0x2 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pmi8998-debug@3 {
@@ -790,6 +815,7 @@
reg = <0x3 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8005-debug@4 {
@@ -797,6 +823,7 @@
reg = <0x4 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
qcom,pm8005-debug@5 {
@@ -804,6 +831,7 @@
reg = <0x5 SPMI_USID>;
#address-cells = <2>;
#size-cells = <0>;
+ qcom,can-sleep;
};
};
@@ -918,10 +946,9 @@
qcom,core-dev-table =
< 300000 762 >,
< 748800 1720 >,
- < 979200 2929 >,
- < 1209600 3879 >,
- < 1516800 4943 >,
- < 1593600 5931 >;
+ < 1132800 2086 >,
+ < 1440000 2929 >,
+ < 1593600 3879 >;
};
devfreq_memlat_4: qcom,cpu4-memlat-mon {
@@ -931,10 +958,12 @@
qcom,cachemiss-ev = <0x2A>;
qcom,core-dev-table =
< 300000 762 >,
+ < 499200 1720 >,
+ < 806400 2086 >,
< 1036800 2929 >,
< 1190400 3879 >,
< 1574400 4943 >,
- < 1804800 5931 >,
+ < 1728000 5931 >,
< 1958400 6881 >;
};
@@ -943,7 +972,6 @@
clock-names = "devfreq_clk";
clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
governor = "performance";
- qcom,prepare-clk;
};
l3_cpu4: qcom,l3-cpu4 {
@@ -951,7 +979,6 @@
clock-names = "devfreq_clk";
clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
governor = "performance";
- qcom,prepare-clk;
};
devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -960,12 +987,13 @@
qcom,target-dev = <&l3_cpu0>;
qcom,cachemiss-ev = <0x17>;
qcom,core-dev-table =
- < 300000 300000 >,
- < 748800 576000 >,
- < 979200 652800 >,
- < 1209600 806400 >,
- < 1516800 883200 >,
- < 1593600 960000 >;
+ < 300000 300000000 >,
+ < 748800 576000000 >,
+ < 979200 652800000 >,
+ < 1209600 806400000 >,
+ < 1516800 883200000 >,
+ < 1593600 960000000 >,
+ < 1708800 1305600000 >;
};
devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
@@ -974,11 +1002,12 @@
qcom,target-dev = <&l3_cpu4>;
qcom,cachemiss-ev = <0x17>;
qcom,core-dev-table =
- < 300000 300000 >,
- < 1036800 652800 >,
- < 1190400 806400 >,
- < 1574400 883200 >,
- < 1651200 960000 >;
+ < 300000 300000000 >,
+ < 1036800 576000000 >,
+ < 1190400 806400000 >,
+ < 1574400 883200000 >,
+ < 1804800 960000000 >,
+ < 1958400 1305600000 >;
};
cpu_pmu: cpu-pmu {
@@ -1135,8 +1164,8 @@
< 652800000 0x401c0422 0x00002020 0x1 5 >,
< 729600000 0x401c0526 0x00002020 0x1 6 >,
< 806400000 0x401c062a 0x00002222 0x1 7 >,
- < 883200000 0x4024072e 0x00002525 0x2 8 >,
- < 960000000 0x40240832 0x00002828 0x2 9 >;
+ < 883200000 0x4024072e 0x00002525 0x1 8 >,
+ < 960000000 0x40240832 0x00002828 0x1 9 >;
qcom,l3-speedbin1-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1146,10 +1175,10 @@
< 652800000 0x401c0422 0x00002020 0x1 5 >,
< 729600000 0x401c0526 0x00002020 0x1 6 >,
< 806400000 0x401c062a 0x00002222 0x1 7 >,
- < 883200000 0x4024072e 0x00002525 0x2 8 >,
- < 960000000 0x40240832 0x00002828 0x2 9 >,
- < 1036800000 0x40240936 0x00002b2b 0x3 10 >,
- < 1094400000 0x402c0a39 0x00002e2e 0x3 11 >;
+ < 883200000 0x4024072e 0x00002525 0x1 8 >,
+ < 960000000 0x40240832 0x00002828 0x1 9 >,
+ < 1036800000 0x40240936 0x00002b2b 0x1 10 >,
+ < 1094400000 0x402c0a39 0x00002e2e 0x1 11 >;
qcom,l3-speedbin2-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1159,12 +1188,12 @@
< 652800000 0x401c0422 0x00002020 0x1 5 >,
< 729600000 0x401c0526 0x00002020 0x1 6 >,
< 806400000 0x401c062a 0x00002222 0x1 7 >,
- < 883200000 0x4024072e 0x00002525 0x2 8 >,
- < 960000000 0x40240832 0x00002828 0x2 9 >,
- < 1036800000 0x40240936 0x00002b2b 0x3 10 >,
- < 1113600000 0x402c0a3a 0x00002e2e 0x3 11 >,
- < 1209600000 0x402c0b3f 0x00003232 0x3 12 >,
- < 1305600000 0x40340c44 0x00003636 0x3 13 >;
+ < 883200000 0x4024072e 0x00002525 0x1 8 >,
+ < 960000000 0x40240832 0x00002828 0x1 9 >,
+ < 1036800000 0x40240936 0x00002b2b 0x1 10 >,
+ < 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+ < 1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+ < 1305600000 0x40340c44 0x00003636 0x1 13 >;
qcom,pwrcl-speedbin0-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1179,11 +1208,11 @@
< 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
< 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
< 1209600000 0x402c0b3f 0x00003232 0x1 12 >,
- < 1286400000 0x40340c43 0x00003636 0x2 13 >,
- < 1363200000 0x40340d47 0x00003939 0x2 14 >,
- < 1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
- < 1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
- < 1593600000 0x403c1053 0x00004242 0x2 17 >;
+ < 1286400000 0x40340c43 0x00003636 0x1 13 >,
+ < 1363200000 0x40340d47 0x00003939 0x1 14 >,
+ < 1440000000 0x40340e4b 0x00003c3c 0x1 15 >,
+ < 1516800000 0x403c0f4f 0x00003f3f 0x1 16 >,
+ < 1593600000 0x403c1053 0x00004242 0x1 17 >;
qcom,pwrcl-speedbin1-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1198,13 +1227,13 @@
< 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
< 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
< 1209600000 0x402c0b3f 0x00003232 0x1 12 >,
- < 1286400000 0x40340c43 0x00003636 0x2 13 >,
- < 1363200000 0x40340d47 0x00003939 0x2 14 >,
- < 1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
- < 1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
- < 1593600000 0x403c1053 0x00004242 0x2 17 >,
- < 1651200000 0x403c1156 0x00004545 0x3 18 >,
- < 1708800000 0x40441259 0x00004747 0x3 19 >;
+ < 1286400000 0x40340c43 0x00003636 0x1 13 >,
+ < 1363200000 0x40340d47 0x00003939 0x1 14 >,
+ < 1440000000 0x40340e4b 0x00003c3c 0x1 15 >,
+ < 1516800000 0x403c0f4f 0x00003f3f 0x1 16 >,
+ < 1593600000 0x403c1053 0x00004242 0x1 17 >,
+ < 1651200000 0x403c1156 0x00004545 0x1 18 >,
+ < 1708800000 0x40441259 0x00004747 0x1 19 >;
qcom,pwrcl-speedbin2-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1219,13 +1248,13 @@
< 1056000000 0x402c0937 0x00002c2c 0x1 10 >,
< 1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
< 1209600000 0x402c0b3f 0x00003232 0x1 12 >,
- < 1286400000 0x40340c43 0x00003636 0x2 13 >,
- < 1363200000 0x40340d47 0x00003939 0x2 14 >,
- < 1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
- < 1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
- < 1593600000 0x403c1053 0x00004242 0x2 17 >,
- < 1670400000 0x40441157 0x00004646 0x3 18 >,
- < 1747200000 0x4044125b 0x00004949 0x3 19 >;
+ < 1286400000 0x40340c43 0x00003636 0x1 13 >,
+ < 1363200000 0x40340d47 0x00003939 0x1 14 >,
+ < 1440000000 0x40340e4b 0x00003c3c 0x1 15 >,
+ < 1516800000 0x403c0f4f 0x00003f3f 0x1 16 >,
+ < 1593600000 0x403c1053 0x00004242 0x1 17 >,
+ < 1670400000 0x40441157 0x00004646 0x1 18 >,
+ < 1747200000 0x4044125b 0x00004949 0x1 19 >;
qcom,perfcl-speedbin0-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1240,16 +1269,16 @@
< 1036800000 0x40240936 0x00002b2b 0x1 10 >,
< 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
< 1190400000 0x402c0b3e 0x00003232 0x1 12 >,
- < 1267200000 0x40340c42 0x00003535 0x2 13 >,
- < 1344000000 0x40340d46 0x00003838 0x2 14 >,
- < 1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
- < 1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
- < 1574400000 0x403c1052 0x00004242 0x2 17 >,
- < 1651200000 0x403c1156 0x00004545 0x2 18 >,
- < 1728000000 0x4044125a 0x00004848 0x3 19 >,
- < 1804800000 0x4044135e 0x00004b4b 0x3 20 >,
- < 1881600000 0x404c1462 0x00004e4e 0x3 21 >,
- < 1958400000 0x404c1566 0x00005252 0x3 22 >;
+ < 1267200000 0x40340c42 0x00003535 0x1 13 >,
+ < 1344000000 0x40340d46 0x00003838 0x1 14 >,
+ < 1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+ < 1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+ < 1574400000 0x403c1052 0x00004242 0x1 17 >,
+ < 1651200000 0x403c1156 0x00004545 0x1 18 >,
+ < 1728000000 0x4044125a 0x00004848 0x1 19 >,
+ < 1804800000 0x4044135e 0x00004b4b 0x1 20 >,
+ < 1881600000 0x404c1462 0x00004e4e 0x1 21 >,
+ < 1958400000 0x404c1566 0x00005252 0x1 22 >;
qcom,perfcl-speedbin1-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1264,18 +1293,18 @@
< 1036800000 0x40240936 0x00002b2b 0x1 10 >,
< 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
< 1190400000 0x402c0b3e 0x00003232 0x1 12 >,
- < 1267200000 0x40340c42 0x00003535 0x2 13 >,
- < 1344000000 0x40340d46 0x00003838 0x2 14 >,
- < 1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
- < 1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
- < 1574400000 0x403c1052 0x00004242 0x2 17 >,
- < 1651200000 0x403c1156 0x00004545 0x2 18 >,
- < 1728000000 0x4044125a 0x00004848 0x3 19 >,
- < 1804800000 0x4044135e 0x00004b4b 0x3 20 >,
- < 1881600000 0x404c1462 0x00004e4e 0x3 21 >,
- < 1958400000 0x404c1566 0x00005252 0x3 22 >,
- < 2035200000 0x404c166a 0x00005555 0x3 23 >,
- < 2092800000 0x4054176d 0x00005757 0x3 24 >;
+ < 1267200000 0x40340c42 0x00003535 0x1 13 >,
+ < 1344000000 0x40340d46 0x00003838 0x1 14 >,
+ < 1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+ < 1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+ < 1574400000 0x403c1052 0x00004242 0x1 17 >,
+ < 1651200000 0x403c1156 0x00004545 0x1 18 >,
+ < 1728000000 0x4044125a 0x00004848 0x1 19 >,
+ < 1804800000 0x4044135e 0x00004b4b 0x1 20 >,
+ < 1881600000 0x404c1462 0x00004e4e 0x1 21 >,
+ < 1958400000 0x404c1566 0x00005252 0x1 22 >,
+ < 2035200000 0x404c166a 0x00005555 0x1 23 >,
+ < 2092800000 0x4054176d 0x00005757 0x1 24 >;
qcom,perfcl-speedbin2-v0 =
< 300000000 0x000c000f 0x00002020 0x1 1 >,
@@ -1290,30 +1319,36 @@
< 1036800000 0x40240936 0x00002b2b 0x1 10 >,
< 1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
< 1190400000 0x402c0b3e 0x00003232 0x1 12 >,
- < 1267200000 0x40340c42 0x00003535 0x2 13 >,
- < 1344000000 0x40340d46 0x00003838 0x2 14 >,
- < 1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
- < 1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
- < 1574400000 0x403c1052 0x00004242 0x2 17 >,
- < 1651200000 0x403c1156 0x00004545 0x2 18 >,
- < 1728000000 0x4044125a 0x00004848 0x3 19 >,
- < 1804800000 0x4044135e 0x00004b4b 0x3 20 >,
- < 1881600000 0x404c1462 0x00004e4e 0x3 21 >,
- < 1958400000 0x404c1566 0x00005252 0x3 22 >,
- < 2035200000 0x404c166a 0x00005555 0x3 23 >,
- < 2112000000 0x4054176e 0x00005858 0x3 24 >,
- < 2208000000 0x40541873 0x00005c5c 0x3 25 >;
+ < 1267200000 0x40340c42 0x00003535 0x1 13 >,
+ < 1344000000 0x40340d46 0x00003838 0x1 14 >,
+ < 1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+ < 1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+ < 1574400000 0x403c1052 0x00004242 0x1 17 >,
+ < 1651200000 0x403c1156 0x00004545 0x1 18 >,
+ < 1728000000 0x4044125a 0x00004848 0x1 19 >,
+ < 1804800000 0x4044135e 0x00004b4b 0x1 20 >,
+ < 1881600000 0x404c1462 0x00004e4e 0x1 21 >,
+ < 1958400000 0x404c1566 0x00005252 0x1 22 >,
+ < 2035200000 0x404c166a 0x00005555 0x1 23 >,
+ < 2112000000 0x4054176e 0x00005858 0x1 24 >,
+ < 2208000000 0x40541873 0x00005c5c 0x1 25 >;
- qcom,l3-min-cpr-vc-bin0 = <7>;
- qcom,pwrcl-min-cpr-vc-bin0 = <6>;
- qcom,perfcl-min-cpr-vc-bin0 = <7>;
+ qcom,l3-memacc-level-vc-bin0 = <7 63>;
+ qcom,l3-memacc-level-vc-bin1 = <7 9>;
+ qcom,l3-memacc-level-vc-bin2 = <7 9>;
+
+ qcom,pwrcl-memacc-level-vc-bin0 = <12 63>;
+ qcom,pwrcl-memacc-level-vc-bin1 = <12 17>;
+ qcom,pwrcl-memacc-level-vc-bin2 = <12 17>;
+
+ qcom,perfcl-memacc-level-vc-bin0 = <12 18>;
+ qcom,perfcl-memacc-level-vc-bin1 = <12 18>;
+ qcom,perfcl-memacc-level-vc-bin2 = <12 18>;
qcom,up-timer =
<1000 1000 1000>;
qcom,down-timer =
<100000 100000 100000>;
- qcom,pc-override-index =
- <0 0 0>;
qcom,set-ret-inactive;
qcom,enable-llm-freq-vote;
qcom,llm-freq-up-timer =
@@ -1377,7 +1412,7 @@
};
clock_aop: qcom,aopclk {
- compatible = "qcom,aop-qmp-clk";
+ compatible = "qcom,aop-qmp-clk-v1";
#clock-cells = <1>;
mboxes = <&qmp_aop 0>;
mbox-names = "qdss_clk";
@@ -1661,11 +1696,18 @@
<81 512 1338562 4096000>,
<1 608 1338562 4096000>;
qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
- 100000000 200000000 4294967295>;
+ 100750000 200000000 4294967295>;
qcom,sdr104-wa;
- qcom,devfreq,freq-table = <50000000 200000000>;
+ qcom,restore-after-cx-collapse;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000 201500000>;
+ qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50",
+ "SDR104";
+
+ qcom,devfreq,freq-table = <50000000 201500000>;
clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
<&clock_gcc GCC_SDCC2_APPS_CLK>;
clock-names = "iface_clk", "core_clk";
@@ -1808,6 +1850,15 @@
interrupt-names = "slimbus_irq", "slimbus_bam_irq";
qcom,apps-ch-pipes = <0x780000>;
qcom,ea-pc = <0x270>;
+ qcom,iommu-s1-bypass;
+
+ iommu_slim_aud_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+ compatible = "qcom,iommu-slim-ctrl-cb";
+ iommus = <&apps_smmu 0x1806 0x0>,
+ <&apps_smmu 0x180d 0x0>,
+ <&apps_smmu 0x180e 0x1>,
+ <&apps_smmu 0x1810 0x1>;
+ };
};
slim_qca: slim@17240000 {
@@ -1819,6 +1870,12 @@
reg-names = "slimbus_physical", "slimbus_bam_physical";
interrupts = <0 291 0>, <0 292 0>;
interrupt-names = "slimbus_irq", "slimbus_bam_irq";
+ qcom,iommu-s1-bypass;
+
+ iommu_slim_qca_ctrl_cb: qcom,iommu_slim_ctrl_cb {
+ compatible = "qcom,iommu-slim-ctrl-cb";
+ iommus = <&apps_smmu 0x1813 0x0>;
+ };
/* Slimbus Slave DT for WCN3990 */
btfmslim_codec: wcn3990 {
@@ -1939,63 +1996,75 @@
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1401 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1402 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1403 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1404 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb5 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1405 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1406 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1407 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
iommus = <&apps_smmu 0x1408 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb9 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
iommus = <&apps_smmu 0x1409 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb10 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "cdsprpc-smd";
qcom,secure-context-bank;
iommus = <&apps_smmu 0x140A 0x30>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb11 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&apps_smmu 0x1823 0x0>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb12 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&apps_smmu 0x1824 0x0>;
+ dma-coherent;
};
};
@@ -2209,7 +2278,6 @@
compatible = "qcom,sdm845-llcc";
#cache-cells = <1>;
max-slices = <32>;
- qcom,dump-size = <0x3c0000>;
};
qcom,llcc-erp {
@@ -2223,19 +2291,19 @@
};
LLCC_1: llcc_1_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_2: llcc_2_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_3: llcc_3_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
LLCC_4: llcc_4_dcache {
- qcom,dump-size = <0xd8000>;
+ qcom,dump-size = <0x114100>;
};
};
@@ -2271,7 +2339,7 @@
qcom,rx-ring-size = <0x400>;
};
- qmp_aop: mailbox@1799000c {
+ qmp_aop: qcom,qmp-aop@c300000 {
compatible = "qcom,qmp-mbox";
label = "aop";
reg = <0xc300000 0x100000>,
@@ -2279,6 +2347,7 @@
reg-names = "msgram", "irq-reg-base";
qcom,irq-mask = <0x1>;
interrupts = <0 389 1>;
+ priority = <0>;
mbox-desc-offset = <0x0>;
#mbox-cells = <1>;
};
@@ -2471,6 +2540,7 @@
qcom,xprt-linkid = <1>;
qcom,xprt-version = <1>;
qcom,fragmented-data;
+ qcom,dynamic-wakeup-source;
};
qcom,ipc_router_cdsp_xprt {
@@ -2588,6 +2658,7 @@
qcom,disk-encrypt-pipe-pair = <2>;
qcom,support-fde;
qcom,no-clock-support;
+ qcom,fde-key-size;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <1>;
@@ -2654,6 +2725,13 @@
<&clock_gcc GCC_CE1_AXI_CLK>;
qcom,ce-opp-freq = <171430000>;
qcom,request-bw-before-clk;
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x702 0x1>,
+ <&apps_smmu 0x712 0x1>;
+ };
+
+ qcom_msmhdcp: qcom,msm_hdcp {
+ compatible = "qcom,msm-hdcp";
};
qcom_crypto: qcrypto@1de0000 {
@@ -2688,6 +2766,9 @@
qcom,use-sw-ahash-algo;
qcom,use-sw-aead-algo;
qcom,use-sw-hmac-algo;
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x704 0x3>,
+ <&apps_smmu 0x714 0x3>;
};
qcom,msm_gsi {
@@ -2741,17 +2822,17 @@
<90 512 80000 640000>,
<90 585 80000 640000>,
<1 676 80000 80000>,
- <143 777 0 150000000>,
+ <143 777 0 150>, /* IB defined for IPA clk in MHz*/
/* NOMINAL */
<90 512 206000 960000>,
<90 585 206000 960000>,
<1 676 206000 160000>,
- <143 777 0 300000000>,
+ <143 777 0 300>, /* IB defined for IPA clk in MHz*/
/* TURBO */
<90 512 206000 3600000>,
<90 585 206000 3600000>,
<1 676 206000 300000>,
- <143 777 0 355333333>;
+ <143 777 0 355>; /* IB defined for IPA clk in MHz*/
qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
/* IPA RAM mmap */
@@ -2942,6 +3023,62 @@
vdd-3.3-ch0-supply = <&pm8998_l25>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+ qcom,smmu-s1-bypass;
+ };
+
+ qmi-tmd-devices {
+ compatible = "qcom,qmi_cooling_devices";
+
+ modem {
+ qcom,instance-id = <0x0>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_proc: modem_proc {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ adsp {
+ qcom,instance-id = <0x1>;
+
+ adsp_vdd: adsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ cdsp {
+ qcom,instance-id = <0x43>;
+
+ cdsp_vdd: cdsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ slpi {
+ qcom,instance-id = <0x53>;
+
+ slpi_vdd: slpi_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
};
thermal_zones: thermal-zones {
@@ -3261,10 +3398,10 @@
};
};
- silver-virt-max-usr {
- polling-delay-passive = <100>;
- polling-delay = <100>;
- thermal-governor = "user_space";
+ silv-virt-max-step {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
trips {
silver-trip {
temperature = <120000>;
@@ -3274,10 +3411,10 @@
};
};
- gold-virt-max-usr {
- polling-delay-passive = <100>;
- polling-delay = <100>;
- thermal-governor = "user_space";
+ gold-virt-max-step {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "step_wise";
trips {
gold-trip {
temperature = <120000>;
@@ -3327,615 +3464,6 @@
};
};
- aoss0-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 0>;
- tracks-low;
- trips {
- aoss0_trip: aoss0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&aoss0_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&aoss0_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&aoss0_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu0-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 1>;
- tracks-low;
- trips {
- cpu0_trip: cpu0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpu0_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu1-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 2>;
- tracks-low;
- trips {
- cpu1_trip: cpu1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpu1_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu2-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 3>;
- tracks-low;
- trips {
- cpu2_trip: cpu2-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpu2_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu3-silver-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 4>;
- tracks-low;
- trips {
- cpu3_trip: cpu3-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpu3_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- kryo-l3-0-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 5>;
- tracks-low;
- trips {
- l3_0_trip: l3-0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&l3_0_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- kryo-l3-1-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 6>;
- tracks-low;
- trips {
- l3_1_trip: l3-1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&l3_1_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu0-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 7>;
- tracks-low;
- trips {
- cpug0_trip: cpug0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpug0_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu1-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 8>;
- tracks-low;
- trips {
- cpug1_trip: cpug1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpug1_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu2-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 9>;
- tracks-low;
- trips {
- cpug2_trip: cpug2-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug2_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpug2_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpug2_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- cpu3-gold-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 10>;
- tracks-low;
- trips {
- cpug3_trip: cpug3-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&cpug3_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&cpug3_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&cpug3_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- gpu0-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 11>;
- tracks-low;
- trips {
- gpu0_trip_l: gpu0-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&gpu0_trip_l>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- gpu1-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens0 12>;
- tracks-low;
- trips {
- gpu1_trip_l: gpu1-trip_l {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&gpu1_trip_l>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- aoss1-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 0>;
- tracks-low;
- trips {
- aoss1_trip: aoss1-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&aoss1_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&aoss1_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&aoss1_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- mdm-dsp-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 1>;
- tracks-low;
- trips {
- dsp_trip: dsp-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&dsp_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- ddr-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 2>;
- tracks-low;
- trips {
- ddr_trip: ddr-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&ddr_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- wlan-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 3>;
- tracks-low;
- trips {
- wlan_trip: wlan-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&wlan_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- compute-hvx-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 4>;
- tracks-low;
- trips {
- hvx_trip: hvx-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&hvx_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- camera-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 5>;
- tracks-low;
- trips {
- camera_trip: camera-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&camera_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- mmss-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 6>;
- tracks-low;
- trips {
- mmss_trip: mmss-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&mmss_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
- mdm-core-lowf {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "low_limits_floor";
- thermal-sensors = <&tsens1 7>;
- tracks-low;
- trips {
- mdm_trip: mdm-trip {
- temperature = <5000>;
- hysteresis = <5000>;
- type = "passive";
- };
- };
- cooling-maps {
- cpu0_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&CPU0 4 4>;
- };
- cpu4_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&CPU4 9 9>;
- };
- gpu_vdd_cdev {
- trip = <&mdm_trip>;
- cooling-device = <&msm_gpu 1 1>;
- };
- };
- };
-
lmh-dcvs-01 {
polling-delay-passive = <0>;
polling-delay = <0>;
@@ -4071,6 +3599,60 @@
iommus = <&apps_smmu 0x06d6 0x0>;
status = "ok";
};
+
+ tspp: msm_tspp@0x8880000 {
+ compatible = "qcom,msm_tspp";
+ reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */
+ <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */
+ <0x088a9000 0x1000>, /* MSM_TSPP_PHYS */
+ <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */
+ reg-names = "MSM_TSIF0_PHYS",
+ "MSM_TSIF1_PHYS",
+ "MSM_TSPP_PHYS",
+ "MSM_TSPP_BAM_PHYS";
+ interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */
+ <0 119 0>, /* TSIF0_IRQ */
+ <0 120 0>, /* TSIF1_IRQ */
+ <0 122 0>; /* TSIF_BAM_IRQ */
+ interrupt-names = "TSIF_TSPP_IRQ",
+ "TSIF0_IRQ",
+ "TSIF1_IRQ",
+ "TSIF_BAM_IRQ";
+
+ clock-names = "iface_clk", "ref_clk";
+ clocks = <&clock_gcc GCC_TSIF_AHB_CLK>,
+ <&clock_gcc GCC_TSIF_REF_CLK>;
+
+ qcom,msm-bus,name = "tsif";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <82 512 0 0>, /* No vote */
+ <82 512 12288 24576>;
+ /* Max. bandwidth, 2xTSIF, each max of 96Mbps */
+
+ pinctrl-names = "disabled",
+ "tsif0-mode1", "tsif0-mode2",
+ "tsif1-mode1", "tsif1-mode2",
+ "dual-tsif-mode1", "dual-tsif-mode2";
+
+ pinctrl-0 = <>; /* disabled */
+ pinctrl-1 = <&tsif0_signals_active>; /* tsif0-mode1 */
+ pinctrl-2 = <&tsif0_signals_active
+ &tsif0_sync_active>; /* tsif0-mode2 */
+ pinctrl-3 = <&tsif1_signals_active>; /* tsif1-mode1 */
+ pinctrl-4 = <&tsif1_signals_active
+ &tsif1_sync_active>; /* tsif1-mode2 */
+ pinctrl-5 = <&tsif0_signals_active
+ &tsif1_signals_active>; /* dual-tsif-mode1 */
+ pinctrl-6 = <&tsif0_signals_active
+ &tsif0_sync_active
+ &tsif1_signals_active
+ &tsif1_sync_active>; /* dual-tsif-mode2 */
+
+ qcom,smmu-s1-bypass;
+ iommus = <&apps_smmu 0x20 0x0f>;
+ };
};
&clock_cpucc {
@@ -4163,6 +3745,7 @@
};
&bps_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
@@ -4175,10 +3758,12 @@
};
&ipe_0_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
&ipe_1_gdsc {
+ qcom,support-hw-trigger;
status = "ok";
};
@@ -4321,3 +3906,1202 @@
};
};
};
+
+&thermal_zones {
+ aoss0-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 0>;
+ tracks-low;
+ trips {
+ aoss0_trip: aoss0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&aoss0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu0-silver-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 1>;
+ tracks-low;
+ trips {
+ cpu0_trip: cpu0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu1-silver-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 2>;
+ tracks-low;
+ trips {
+ cpu1_trip: cpu1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu2-silver-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 3>;
+ tracks-low;
+ trips {
+ cpu2_trip: cpu2-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu2_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu3-silver-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 4>;
+ tracks-low;
+ trips {
+ cpu3_trip: cpu3-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpu3_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ kryo-l3-0-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 5>;
+ tracks-low;
+ trips {
+ l3_0_trip: l3-0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&l3_0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ kryo-l3-1-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 6>;
+ tracks-low;
+ trips {
+ l3_1_trip: l3-1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&l3_1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu0-gold-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 7>;
+ tracks-low;
+ trips {
+ cpug0_trip: cpug0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug0_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu1-gold-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 8>;
+ tracks-low;
+ trips {
+ cpug1_trip: cpug1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu2-gold-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 9>;
+ tracks-low;
+ trips {
+ cpug2_trip: cpug2-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug2_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu3-gold-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 10>;
+ tracks-low;
+ trips {
+ cpug3_trip: cpug3-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&cpug3_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ gpu0-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 11>;
+ tracks-low;
+ trips {
+ gpu0_trip_l: gpu0-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&gpu0_trip_l>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ gpu1-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 12>;
+ tracks-low;
+ trips {
+ gpu1_trip_l: gpu1-trip_l {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&gpu1_trip_l>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ aoss1-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 0>;
+ tracks-low;
+ trips {
+ aoss1_trip: aoss1-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&aoss1_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ mdm-dsp-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 1>;
+ tracks-low;
+ trips {
+ dsp_trip: dsp-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&dsp_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ ddr-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 2>;
+ tracks-low;
+ trips {
+ ddr_trip: ddr-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&ddr_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ wlan-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 3>;
+ tracks-low;
+ trips {
+ wlan_trip: wlan-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&wlan_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ compute-hvx-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 4>;
+ tracks-low;
+ trips {
+ hvx_trip: hvx-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&hvx_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ camera-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 5>;
+ tracks-low;
+ trips {
+ camera_trip: camera-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&camera_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ mmss-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 6>;
+ tracks-low;
+ trips {
+ mmss_trip: mmss-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&mmss_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+
+ mdm-core-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens1 7>;
+ tracks-low;
+ trips {
+ mdm_trip: mdm-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ cpu0_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&CPU0 4 4>;
+ };
+ cpu4_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&CPU4 9 9>;
+ };
+ gpu_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&msm_gpu 1 1>;
+ };
+ cx_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&cx_cdev 0 0>;
+ };
+ mx_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&mx_cdev 0 0>;
+ };
+ ebi_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&ebi_cdev 0 0>;
+ };
+ modem_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ cdsp_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&cdsp_vdd 0 0>;
+ };
+ slpi_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&slpi_vdd 0 0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index 33c5e97..999d87a 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -39,6 +39,11 @@
interrupt-parent = <&smb1355>;
status = "disabled";
+ io-channels = <&pmi8998_rradc 2>,
+ <&pmi8998_rradc 12>;
+ io-channel-names = "charger_temp",
+ "charger_temp_max";
+
qcom,chgr@1000 {
reg = <0x1000 0x100>;
interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
new file mode 100644
index 0000000..5437417
--- /dev/null
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -0,0 +1,579 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+# CONFIG_WIL6210_TRACING is not set
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_WCD9XXX_CODEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
new file mode 100644
index 0000000..6d41d516
--- /dev/null
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -0,0 +1,636 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_SDM845=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
+CONFIG_EDAC_QCOM_LLCC=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 8a5b17d..0e8aef9 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -39,7 +39,7 @@
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -51,7 +51,6 @@
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
@@ -131,6 +130,7 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -260,14 +260,19 @@
CONFIG_DUMMY=y
CONFIG_TUN=y
CONFIG_SKY2=y
-CONFIG_RNDIS_IPA=y
CONFIG_SMSC911X=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
CONFIG_WIL6210=m
# CONFIG_WIL6210_TRACING is not set
@@ -291,6 +296,7 @@
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SOUNDWIRE=y
@@ -302,7 +308,6 @@
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM670=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -329,9 +334,11 @@
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
-CONFIG_WCD934X_CODEC=y
+CONFIG_WCD9XXX_CODEC_CORE=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
@@ -340,6 +347,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -350,6 +358,10 @@
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -365,10 +377,14 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MACHINE_SDM845=y
CONFIG_SND_SOC_SDM845=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_HID_PLANTRONICS=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -385,6 +401,7 @@
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -438,6 +455,7 @@
CONFIG_GSI=y
CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -463,6 +481,7 @@
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
@@ -493,6 +512,7 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
@@ -501,12 +521,14 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
CONFIG_APSS_CORE_EA=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -557,12 +579,15 @@
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index e70963a..c516cd3 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -42,7 +42,7 @@
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -56,7 +56,6 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
CONFIG_SCHED_MC=y
@@ -137,6 +136,7 @@
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -269,13 +269,18 @@
CONFIG_BONDING=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-CONFIG_RNDIS_IPA=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
CONFIG_WIL6210=m
CONFIG_WCNSS_MEM_PRE_ALLOC=y
@@ -298,6 +303,7 @@
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SOUNDWIRE=y
@@ -309,7 +315,6 @@
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM670=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -336,9 +341,11 @@
CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_QTI_THERMAL_LIMITS_DCVS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
-CONFIG_WCD934X_CODEC=y
+CONFIG_WCD9XXX_CODEC_CORE=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
@@ -347,6 +354,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -357,6 +365,10 @@
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -372,10 +384,14 @@
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MACHINE_SDM845=y
CONFIG_SND_SOC_SDM845=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_HID_PLANTRONICS=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -391,6 +407,7 @@
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_MSM_SSPHY_QMP=y
@@ -454,6 +471,7 @@
CONFIG_GSI=y
CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
@@ -485,7 +503,6 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM845_LLCC=y
-CONFIG_QCOM_SDM670_LLCC=y
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -514,6 +531,7 @@
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_ICNSS=y
@@ -523,6 +541,7 @@
CONFIG_MSM_PERFORMANCE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_SMCINVOKE=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
@@ -530,6 +549,7 @@
CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_QCOMCCI_HWMON=y
@@ -590,7 +610,6 @@
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
@@ -635,8 +654,10 @@
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 44e1d7f..28196b1 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += bugs.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index e517088..de04879 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -22,9 +22,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
-#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index e3c80f1..ef5970e 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -176,22 +176,25 @@
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
- * @tmp: optional scratch register to be used if <dst> == sp, which
- * is not allowed in an adrp instruction
*/
- .macro adr_l, dst, sym, tmp=
- .ifb \tmp
+ .macro adr_l, dst, sym
+#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
- .else
- adrp \tmp, \sym
- add \dst, \tmp, :lo12:\sym
- .endif
+#else
+ movz \dst, #:abs_g3:\sym
+ movk \dst, #:abs_g2_nc:\sym
+ movk \dst, #:abs_g1_nc:\sym
+ movk \dst, #:abs_g0_nc:\sym
+#endif
.endm
/*
@@ -202,6 +205,7 @@
* the address
*/
.macro ldr_l, dst, sym, tmp=
+#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -209,6 +213,15 @@
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
+#else
+ .ifb \tmp
+ adr_l \dst, \sym
+ ldr \dst, [\dst]
+ .else
+ adr_l \tmp, \sym
+ ldr \dst, [\tmp]
+ .endif
+#endif
.endm
/*
@@ -218,19 +231,35 @@
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
+#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
+#else
+ adr_l \tmp, \sym
+ str \src, [\tmp]
+#endif
.endm
/*
+ * @dst: Result of per_cpu(sym, smp_processor_id())
* @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
* @tmp: scratch register
*/
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
+ add \dst, \dst, \tmp
+ .endm
+
+ /*
+ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ ldr \dst, [\dst, \tmp]
.endm
/*
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 0000000..86c4041
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct task_struct *)sp_el0;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f..afa23b0 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@@ -169,7 +168,8 @@
#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6d22017..ef305f8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -47,7 +47,7 @@
* If the page is in the bottom half, we have to use the top half. If
* the page is in the top half, we have to use the bottom half:
*
- * T = __virt_to_phys(__hyp_idmap_text_start)
+ * T = __pa_symbol(__hyp_idmap_text_start)
* if (T & BIT(VA_BITS - 1))
* HYP_VA_MIN = 0 //idmap in upper half
* else
@@ -270,7 +270,7 @@
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}
-#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
+#define kvm_virt_to_phys(x) __pa_symbol(x)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 53211a0..5edb6ed 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -204,7 +204,8 @@
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index dc06a33..88025ba 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -51,7 +51,7 @@
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = virt_to_phys(empty_zero_page);
+ unsigned long ttbr = __pa_symbol(empty_zero_page);
write_sysreg(ttbr, ttbr0_el1);
isb();
@@ -120,7 +120,7 @@
local_flush_tlb_all();
cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(idmap_pg_dir, &init_mm);
+ cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
}
/*
@@ -135,7 +135,7 @@
phys_addr_t pgd_phys = virt_to_phys(pgd);
- replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+ replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
replace_phys(pgd_phys);
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5394c84..d7a3c62 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/stack_pointer.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 38b6a2b..8d5cbec 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+#include <asm/stack_pointer.h>
+
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3845f33..c05ee84 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -53,7 +53,7 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
+#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 0226447..d050d72 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -29,11 +29,22 @@
#ifndef __ASSEMBLY__
+#include <asm/percpu.h>
+
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;
@@ -73,6 +84,7 @@
*/
struct secondary_data {
void *stack;
+ struct task_struct *task;
long status;
};
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 0000000..ffcdf74
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index fe5e287..b86a086 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -30,6 +30,7 @@
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 2eb714c..d0aa429 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -63,6 +63,11 @@
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#endif
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index b8a313f..de5600f 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 10
+#define NR_CTX_REGS 12
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 4ad25a5..ebd18b7 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,61 +36,31 @@
struct task_struct;
+#include <asm/stack_pointer.h>
#include <asm/types.h>
typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
*/
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
};
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
- .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-/*
- * struct thread_info can be accessed directly via sp_el0.
- *
- * We don't use read_sysreg() as we want the compiler to cache the value where
- * possible.
- */
-static inline struct thread_info *current_thread_info(void)
-{
- unsigned long sp_el0;
-
- asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
- return (struct thread_info *)sp_el0;
-}
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 2df5d5f..4d9222a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -383,9 +383,9 @@
{
unsigned long res = n;
kasan_check_write(to, n);
+ check_object_size(to, n, false);
if (access_ok(VERIFY_READ, from, n)) {
- check_object_size(to, n, false);
res = __arch_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -396,9 +396,9 @@
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
+ check_object_size(from, n, true);
if (access_ok(VERIFY_WRITE, to, n)) {
- check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
}
return n;
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index a32b401..1f5655c 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <asm/cpu_ops.h>
@@ -109,7 +110,7 @@
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5cdbc55..b3bb7ef 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -36,17 +36,13 @@
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
-#endif
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
@@ -129,6 +125,7 @@
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index d4e9ecb..6c2b1b4 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -24,7 +24,7 @@
el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)virt_to_phys(__cpu_soft_restart);
+ restart = (void *)__pa_symbol(__cpu_soft_restart);
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index af5a1e3..0127e1b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -23,6 +23,7 @@
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -737,7 +738,7 @@
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
int __unused)
{
- phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+ phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
/*
* Activate the lower HYP offset only if:
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c44a933..718c4c8 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -93,9 +93,8 @@
.if \el == 0
mrs x21, sp_el0
- mov tsk, sp
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
mov x29, xzr // fp pointed to user-space
@@ -103,10 +102,10 @@
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
- ldr x20, [tsk, #TI_ADDR_LIMIT]
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
mrs x22, elr_el1
@@ -168,7 +167,7 @@
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
@@ -252,15 +251,16 @@
mov x19, sp // preserve the original sp
/*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
+ * Compare sp with the base of the task stack.
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+ * and should switch to the irq stack.
*/
- and x25, x19, #~(THREAD_SIZE - 1)
- cmp x25, tsk
- b.ne 9998f
+ ldr x25, [tsk, TSK_STACK]
+ eor x25, x25, x19
+ and x25, x25, #~(THREAD_SIZE - 1)
+ cbnz x25, 9998f
- this_cpu_ptr irq_stack, x25, x26
+ adr_this_cpu x25, irq_stack, x26
mov x26, #IRQ_STACK_START_SP
add x26, x25, x26
@@ -488,9 +488,9 @@
irq_handler
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
1:
@@ -505,7 +505,7 @@
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
#endif
@@ -735,8 +735,7 @@
ldp x29, x9, [x8], #16
ldr lr, [x8]
mov sp, x9
- and x9, x9, #~(THREAD_SIZE - 1)
- msr sp_el0, x9
+ msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
@@ -747,7 +746,7 @@
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
@@ -767,14 +766,14 @@
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
- ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
@@ -807,7 +806,7 @@
enable_dbg_and_irq
ct_user_exit 1
- ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c7d26bb..7ee6d74 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -428,7 +428,8 @@
__primary_switched:
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
- msr sp_el0, x4 // Save thread_info
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
@@ -699,10 +700,10 @@
isb
adr_l x0, secondary_data
- ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- mov sp, x0
- and x0, x0, #~(THREAD_SIZE - 1)
- msr sp_el0, x0 // save thread_info
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ msr sp_el0, x2
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index d55a7b0..8bed26a 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -50,9 +50,6 @@
*/
extern int in_suspend;
-/* Find a symbols alias in the linear map */
-#define LMADDR(x) phys_to_virt(virt_to_phys(x))
-
/* Do we need to reset el2? */
#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
@@ -102,8 +99,8 @@
int pfn_is_nosave(unsigned long pfn)
{
- unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
- unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+ unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
+ unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
}
@@ -125,12 +122,12 @@
return -EOVERFLOW;
arch_hdr_invariants(&hdr->invariants);
- hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
+ hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
hdr->reenter_kernel = _cpu_resume;
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
if (el2_reset_needed())
- hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+ hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
else
hdr->__hyp_stub_vectors = 0;
@@ -460,7 +457,6 @@
void *zero_page;
size_t exit_size;
pgd_t *tmp_pg_dir;
- void *lm_restore_pblist;
phys_addr_t phys_hibernate_exit;
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
@@ -481,12 +477,6 @@
goto out;
/*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
- /*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
@@ -537,7 +527,7 @@
}
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
- resume_hdr.reenter_kernel, lm_restore_pblist,
+ resume_hdr.reenter_kernel, restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
out:
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 6f2ac4f..f607b38 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -97,7 +97,7 @@
if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
page = vmalloc_to_page(addr);
else if (!module)
- page = pfn_to_page(PHYS_PFN(__pa(addr)));
+ page = phys_to_page(__pa_symbol(addr));
else
return addr;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bb24b4e..b325f74 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1189,20 +1189,24 @@
{
int ret, cpu;
- for_each_possible_cpu(cpu)
- per_cpu(is_hotplugging, cpu) = false;
+ /* set to true so armv8pmu_idle_update doesn't try to load
+ * hw_events before arm_pmu_device_probe has initialized it.
+ */
+ for_each_possible_cpu(cpu) {
+ per_cpu(is_hotplugging, cpu) = true;
+ }
- ret = perf_event_cpu_hp_init();
+ ret = arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
+ (acpi_disabled ? NULL : armv8_pmu_probe_table));
- if (ret)
- return ret;
+ if (!ret) {
+ for_each_possible_cpu(cpu)
+ per_cpu(is_hotplugging, cpu) = false;
- if (acpi_disabled)
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- NULL);
+ ret = perf_event_cpu_hp_init();
+ }
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- armv8_pmu_probe_table);
+ return ret;
}
static struct platform_driver armv8_pmu_driver = {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index fc1a286..5fe594e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,7 @@
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
+#include <linux/percpu.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -390,6 +391,20 @@
}
/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+
+/*
* Thread switching.
*/
struct task_struct *__switch_to(struct task_struct *prev,
@@ -401,6 +416,7 @@
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+ entry_task_switch(next);
uao_thread_switch(next);
/*
@@ -418,27 +434,35 @@
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
- unsigned long stack_page;
+ unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
- stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(p, &frame))
- return 0;
- if (!in_sched_functions(frame.pc))
- return frame.pc;
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
} while (count ++ < 16);
- return 0;
+
+out:
+ put_task_stack(p);
+ return ret;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 81762dd..716a5c2 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -20,6 +20,7 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/psci.h>
+#include <linux/mm.h>
#include <uapi/linux/psci.h>
@@ -45,7 +46,7 @@
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706..12a87f2 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 49f3ae0..ba29095 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -44,6 +44,7 @@
#include <linux/psci.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/mm.h>
#include <asm/acpi.h>
#include <asm/fixmap.h>
@@ -212,10 +213,10 @@
struct memblock_region *region;
struct resource *res;
- kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(__init_begin - 1);
- kernel_data.start = virt_to_phys(_sdata);
- kernel_data.end = virt_to_phys(_end - 1);
+ kernel_code.start = __pa_symbol(_text);
+ kernel_code.end = __pa_symbol(__init_begin - 1);
+ kernel_data.start = __pa_symbol(_sdata);
+ kernel_data.end = __pa_symbol(_end - 1);
for_each_memblock(memory, region) {
res = alloc_bootmem_low(sizeof(*res));
@@ -312,7 +313,7 @@
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+ init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
#endif
#ifdef CONFIG_VT
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 1bec41b..df67652 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -125,9 +125,6 @@
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context address pointer
*/
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 0f62709..2437f15 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -58,6 +58,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -147,6 +150,7 @@
* We need to tell the secondary core where to find its stack and the
* page tables.
*/
+ secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -171,6 +175,7 @@
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
+ secondary_data.task = NULL;
secondary_data.stack = NULL;
status = READ_ONCE(secondary_data.status);
if (ret && status) {
@@ -209,7 +214,10 @@
asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
@@ -218,8 +226,6 @@
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
pr_debug("CPU%u: Booted secondary processor\n", cpu);
/*
@@ -733,6 +739,8 @@
*/
for_each_possible_cpu(cpu) {
+ per_cpu(cpu_number, cpu) = cpu;
+
if (cpu == smp_processor_id())
continue;
@@ -1035,7 +1043,7 @@
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
- if (cpu_ops[any_cpu]->cpu_die)
+ if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
return true;
#endif
return false;
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 9a00eee..9303465 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@@ -98,7 +99,7 @@
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index bedf97d..900c1ec 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -22,6 +22,7 @@
#include <linux/stacktrace.h>
#include <asm/irq.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
@@ -128,7 +129,6 @@
break;
}
}
-EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -181,6 +181,9 @@
struct stack_trace_data data;
struct stackframe frame;
+ if (!try_get_task_stack(tsk))
+ return;
+
data.trace = trace;
data.skip = trace->skip;
@@ -202,6 +205,8 @@
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+ put_task_stack(tsk);
}
EXPORT_SYMBOL(save_stack_trace_tsk);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index bb0cd78..1e3be90 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -47,12 +47,6 @@
cpu_uninstall_idmap();
/*
- * Restore per-cpu offset before any kernel
- * subsystem relying on it has a chance to run.
- */
- set_my_cpu_offset(per_cpu_offset(cpu));
-
- /*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d8253fb..e576c1d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
@@ -147,6 +148,9 @@
if (!tsk)
tsk = current;
+ if (!try_get_task_stack(tsk))
+ return;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
@@ -212,6 +216,8 @@
stack + sizeof(struct pt_regs));
}
}
+
+ put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -227,10 +233,9 @@
#endif
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct thread_info *thread,
- struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -245,7 +250,8 @@
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ end_of_stack(tsk));
if (!user_mode(regs)) {
dump_backtrace(regs, tsk);
@@ -310,7 +316,6 @@
*/
void die(const char *str, struct pt_regs *regs, int err)
{
- struct thread_info *thread = current_thread_info();
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
unsigned long flags = oops_begin();
int ret;
@@ -320,7 +325,7 @@
if (bug_type != BUG_TRAP_TYPE_NONE && !strlen(str))
str = "Oops - BUG";
- ret = __die(str, err, thread, regs);
+ ret = __die(str, err, regs);
oops_end(flags, regs, ret);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a2c2478..ef3bdfd 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -37,7 +37,7 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start, vdso_end;
+extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages __ro_after_init;
/*
@@ -123,15 +123,16 @@
{
int i;
struct page **vdso_pagelist;
+ unsigned long pfn;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
- vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
+ vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -140,11 +141,14 @@
return -ENOMEM;
/* Grab the vDSO data page. */
- vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
+ vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
/* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(vdso_start);
+
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
+ vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
vdso_spec[0].pages = &vdso_pagelist[0];
vdso_spec[1].pages = &vdso_pagelist[1];
@@ -217,10 +221,11 @@
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
- vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
+ vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
+ tk->tkr_raw.shift) +
+ tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- /* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b467..76320e9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
- lsl x14, x14, x12
get_nsec_per_sec res=x9
lsl x9, x9, x12
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index acbe515..7f90b7e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -157,12 +157,13 @@
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
+ void *addr;
+
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA;
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
struct page *page;
- void *addr;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
@@ -172,20 +173,20 @@
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
memset(addr, 0, size);
-
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
- (attrs & DMA_ATTR_STRONGLY_ORDERED)) {
- /*
- * flush the caches here because we can't later
- */
- __dma_flush_area(addr, size);
- __dma_remap(page, size, __pgprot(0), true);
- }
-
- return addr;
} else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ addr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
+
+ if (addr && ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
+ (attrs & DMA_ATTR_STRONGLY_ORDERED))) {
+ /*
+ * flush the caches here because we can't later
+ */
+ __dma_flush_area(addr, size);
+ __dma_remap(virt_to_page(addr), size, __pgprot(0), true);
+ }
+
+ return addr;
}
static void __dma_free_coherent(struct device *dev, size_t size,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0a34644..d0ffade 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,7 +40,6 @@
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/kryo3xx-arm64-edac.h>
#include <soc/qcom/scm.h>
struct fault_info {
@@ -521,7 +520,6 @@
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- kryo3xx_poll_cache_errors(NULL);
return 1;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0b9492e..f8ef496 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -36,6 +36,7 @@
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -211,8 +212,8 @@
* linear mapping. Take care not to clip the kernel which may be
* high in memory.
*/
- memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
- ULLONG_MAX);
+ memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+ __pa_symbol(_end)), ULLONG_MAX);
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
/* ensure that memstart_addr remains sufficiently aligned */
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
@@ -227,7 +228,7 @@
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
memblock_mem_limit_remove_map(memory_limit);
- memblock_add(__pa(_text), (u64)(_end - _text));
+ memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
@@ -280,7 +281,7 @@
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
memblock_reserve(initrd_start, initrd_end - initrd_start);
@@ -488,7 +489,8 @@
void free_initmem(void)
{
- free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
+ free_reserved_area(lm_alias(__init_begin),
+ lm_alias(__init_end),
0, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009d..201d918 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/start_kernel.h>
+#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/kernel-pgtable.h>
@@ -26,6 +27,13 @@
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). All the early functions are called too
+ * early to use lm_alias so __p*d_populate functions must be used to populate
+ * with the physical address from __pa_symbol.
+ */
+
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
unsigned long end)
{
@@ -33,12 +41,12 @@
unsigned long next;
if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
pte = pte_offset_kimg(pmd, addr);
do {
next = addr + PAGE_SIZE;
- set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+ set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
PAGE_KERNEL));
} while (pte++, addr = next, addr != end && pte_none(*pte));
}
@@ -51,7 +59,7 @@
unsigned long next;
if (pud_none(*pud))
- pud_populate(&init_mm, pud, kasan_zero_pmd);
+ __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
pmd = pmd_offset_kimg(pud, addr);
do {
@@ -68,7 +76,7 @@
unsigned long next;
if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
pud = pud_offset_kimg(pgd, addr);
do {
@@ -148,7 +156,7 @@
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
dsb(ishst);
- cpu_replace_ttbr1(tmp_pg_dir);
+ cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -199,10 +207,10 @@
*/
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_zero_pte[i],
- pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+ pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
memset(kasan_zero_page, 0, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f70b433..41efd5e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,6 +32,7 @@
#include <linux/stop_machine.h>
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
+#include <linux/mm.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -357,8 +358,8 @@
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
{
- unsigned long kernel_start = __pa(_text);
- unsigned long kernel_end = __pa(__init_begin);
+ unsigned long kernel_start = __pa_symbol(_text);
+ unsigned long kernel_end = __pa_symbol(__init_begin);
/*
* Take care not to create a writable alias for the
@@ -425,21 +426,21 @@
unsigned long section_size;
section_size = (unsigned long)_etext - (unsigned long)_text;
- create_mapping_late(__pa(_text), (unsigned long)_text,
+ create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
section_size, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
- create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
+ create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
}
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma)
{
- phys_addr_t pa_start = __pa(va_start);
+ phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
BUG_ON(!PAGE_ALIGNED(pa_start));
@@ -487,7 +488,7 @@
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+ __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
pud_clear_fixmap();
} else {
BUG();
@@ -518,7 +519,7 @@
*/
cpu_replace_ttbr1(__va(pgd_phys));
memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
- cpu_replace_ttbr1(swapper_pg_dir);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
pgd_clear_fixmap();
memblock_free(pgd_phys, PAGE_SIZE);
@@ -527,7 +528,7 @@
* We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
* allocated with it.
*/
- memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+ memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
SWAPPER_DIR_SIZE - PAGE_SIZE);
}
@@ -638,6 +639,12 @@
return &bm_pte[pte_index(addr)];
}
+/*
+ * The p*d_populate functions call virt_to_phys implicitly so they can't be used
+ * directly on kernel symbols (bm_p*d). This function is called too early to use
+ * lm_alias so __p*d_populate functions must be used to populate with the
+ * physical address from __pa_symbol.
+ */
void __init early_fixmap_init(void)
{
pgd_t *pgd;
@@ -647,7 +654,7 @@
pgd = pgd_offset_k(addr);
if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
+ !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
@@ -656,12 +663,14 @@
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
pud = pud_offset_kimg(pgd, addr);
} else {
- pgd_populate(&init_mm, pgd, bm_pud);
+ if (pgd_none(*pgd))
+ __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
pud = fixmap_pud(addr);
}
- pud_populate(&init_mm, pud, bm_pmd);
+ if (pud_none(*pud))
+ __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
pmd = fixmap_pmd(addr);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
+ __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
/*
* The boot-ioremap range spans multiple pmds, for which
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 61330c9..8d21250 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -116,11 +116,14 @@
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -136,6 +139,7 @@
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -158,6 +162,8 @@
msr mdscr_el1, x10
msr sctlr_el1, x12
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e2..e93c949 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@
return c;
}
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f147..efa59f1 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a..145b5ce 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@
-DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells)
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf..da80878 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
- return regs->cp0_epc;
- }
-
- if (!delay_slot(regs)) {
+ } else if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 12c7181..c3f2fb3 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
- * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * @returns: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
@@ -431,7 +431,7 @@
/* Fall through */
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
- goto sigill_r6;
+ goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -446,7 +446,7 @@
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@
case bgezl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bltzall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bgezall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@
/*
* These are unconditional and in j_format.
*/
+ case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
@@ -573,7 +570,7 @@
*/
case beql_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@
case bnel_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case blez_op:
/*
* Compact branches for R6 for the
@@ -636,7 +633,7 @@
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -774,48 +771,40 @@
#else
case bc6_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
regs->cp0_epc += 8;
break;
case balc6_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BALC */
regs->regs[31] = epc + 4;
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
case pop66_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
case pop76_op:
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/* Compact branch: BNEZC || JIALC */
- if (insn.i_format.rs)
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
+ }
regs->cp0_epc += 8;
break;
#endif
case pop10_op:
case pop30_op:
/* Only valid for MIPS R6 */
- if (!cpu_has_mips_r6) {
- ret = -SIGILL;
- break;
- }
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
/*
* Compact branches:
* bovc, beqc, beqzalc, bnvc, bnec, bnezlac
@@ -829,11 +818,17 @@
return ret;
sigill_dsp:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
- force_sig(SIGBUS, current);
+ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
+ return -EFAULT;
+sigill_r2r6:
+ pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
return -EFAULT;
sigill_r6:
- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+ pr_info("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
current->comm);
force_sig(SIGILL, current);
return -EFAULT;
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 7791840..db07793 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
+#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -137,6 +138,7 @@
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
+ TRACE_IRQS_OFF
jal schedule
local_irq_disable # make sure need_resched and
@@ -173,6 +175,7 @@
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
+ TRACE_IRQS_ON
move a0, sp
jal syscall_trace_leave
b resume_userspace
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index cf05220..d1bb506 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -106,8 +106,8 @@
beq t0, t1, dtb_found
#endif
li t1, -2
- beq a0, t1, dtb_found
move t2, a1
+ beq a0, t1, dtb_found
li t2, 0
dtb_found:
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 7cf653e..60c4d45 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -56,7 +56,6 @@
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
- unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@
}
if (!per_cpu(ready_count, core)) {
- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
- per_cpu(ready_count_alloc, core) = core_rc;
-
- /* Ensure ready_count is aligned to a cacheline boundary */
- core_rc += dlinesz - 1;
- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4eff2ae..4c01ee5 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@
}
seq_printf(m, "isa\t\t\t:");
- if (cpu_has_mips_r1)
+ if (cpu_has_mips_1)
seq_printf(m, " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index bf83dc1..3de0260 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -924,7 +924,7 @@
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[2]);
+ trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397..e6be1f62 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -371,7 +371,7 @@
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96..aa27daf 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -311,7 +311,7 @@
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba3..37f608f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -302,7 +302,7 @@
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042..7913a5c 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -371,7 +371,7 @@
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 53a7ef9..4234b2d 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,6 +28,7 @@
#include <linux/elf.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -138,10 +139,12 @@
__asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
- "1: ll %[old], (%[addr]) \n"
+ "1: \n"
+ user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
- "2: sc %[tmp], (%[addr]) \n"
- " bnez %[tmp], 4f \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 4f \n"
"3: \n"
" .insn \n"
" .subsection 2 \n"
@@ -199,6 +202,12 @@
unreachable();
}
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index ec87ef9..b0b29cb 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -199,6 +199,8 @@
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
+
+ regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index f8b7bf8..e9385bc 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2522,6 +2522,35 @@
return 0;
}
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized. In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
@@ -2607,6 +2636,15 @@
break;
if (sig)
break;
+ /*
+ * We have to check for the ISA bit explicitly here,
+ * because `get_isa16_mode' may return 0 if support
+ * for code compression has been globally disabled,
+ * or otherwise we may produce the wrong signal or
+ * even proceed successfully where we must not.
+ */
+ if ((xcp->cp0_epc ^ prevepc) & 0x1)
+ break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d08ea3f..a44052c 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311..67e333a 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
struct task_struct;
struct thread_struct;
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
struct fpu_state_struct;
extern asmlinkage void fpu_save(struct fpu_state_struct *);
#define switch_fpu(prev, next) \
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 16e0246..cb7697d 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -20,6 +20,8 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
+
#ifdef CONFIG_PA11
extern struct dma_map_ops pcxl_dma_ops;
extern struct dma_map_ops pcx_dma_ops;
@@ -54,12 +56,13 @@
break;
}
}
- BUG_ON(!dev->platform_data);
return dev->platform_data;
}
-
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-
+
+#define GET_IOC(dev) ({ \
+ void *__pdata = parisc_walk_tree(dev); \
+ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
+})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be257..a812262 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@
mtctl(__space_to_prot(context), 8);
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
{
-
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 0a393a0..1d7691f 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
@@ -141,7 +142,7 @@
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
@@ -175,9 +176,11 @@
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1d..8ec2ff8 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
- ENTRY_SAME(keyctl)
+ ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 040c48f..b6f3b5e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -366,7 +366,7 @@
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
- address < vma->vm_start || address > vma->vm_end) {
+ address < vma->vm_start || address >= vma->vm_end) {
si.si_signo = SIGSEGV;
si.si_code = SEGV_MAPERR;
break;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8f01f21..b4758f5 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -84,6 +84,7 @@
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 2b90335..a2cc801 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -560,7 +560,7 @@
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
{
long t1, t2;
@@ -579,7 +579,7 @@
: "r" (&v->counter)
: "cc", "xer", "memory");
- return t1;
+ return t1 != 0;
}
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffe..743ad7a 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE 0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
+ 0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e7d9eca..ceb168c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1283,7 +1283,7 @@
" .llong 0\n" \
".previous" \
: "=r" (rval) \
- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
#else
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f257316..e5bfbf6 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1ba82ea..2e2fc1e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@
.align 7
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
- andis. r0,r4,0xa410 /* weird error? */
+ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
- andis. r0,r4,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@
/* Error */
blt- 13f
+
+ /* Reload DSISR into r4 for the DABR check below */
+ ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
-11: ld r4,_DAR(r1)
+11: andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+ ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e785cc9..fe97cbe 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -511,6 +511,15 @@
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
@@ -533,6 +542,8 @@
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 094deb6..5c02984 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2807,12 +2807,34 @@
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
@@ -2834,6 +2856,13 @@
flush_all_to_thread(current);
+ /* Save userspace EBB register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ }
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2856,6 +2885,13 @@
r = kvmppc_xics_rm_complete(vcpu, 0);
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ }
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0c84d6b..7e2af42 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -97,7 +97,8 @@
(unsigned long)selected_size / SZ_1M);
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
- KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
+ KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
+ &kvm_cma);
}
}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 6ca3b90..776c1a1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -687,8 +687,10 @@
case 19:
switch ((instr >> 1) & 0x3ff) {
case 0: /* mcrf */
- rd = (instr >> 21) & 0x1c;
- ra = (instr >> 16) & 0x1c;
+ rd = 7 - ((instr >> 23) & 0x7);
+ ra = 7 - ((instr >> 18) & 0x7);
+ rd *= 4;
+ ra *= 4;
val = (regs->ccr >> ra) & 0xf;
regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
goto instr_done;
@@ -968,6 +970,19 @@
#endif
case 19: /* mfcr */
+ if ((instr >> 20) & 1) {
+ imm = 0xf0000000UL;
+ for (sh = 0; sh < 8; ++sh) {
+ if (instr & (0x80000 >> sh)) {
+ regs->gpr[rd] = regs->ccr & imm;
+ break;
+ }
+ imm >>= 4;
+ }
+
+ goto instr_done;
+ }
+
regs->gpr[rd] = regs->ccr;
regs->gpr[rd] &= 0xffffffffUL;
goto instr_done;
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a6..a2b2d97 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -65,7 +65,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 2f1e443..5bc2845 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -106,7 +106,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -142,7 +142,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 73bf6e1..a006f82 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -167,9 +167,15 @@
mm->context.cop_lockp = NULL;
#endif /* CONFIG_PPC_ICSWX */
- if (radix_enabled())
- process_tb[mm->context.id].prtb1 = 0;
- else
+ if (radix_enabled()) {
+ /*
+ * Radix doesn't have a valid bit in the process table
+ * entries. However we know that at least P9 implementation
+ * will avoid caching an entry with an invalid RTS field,
+ * and 0 is invalid. So this will do.
+ */
+ process_tb[mm->context.id].prtb0 = 0;
+ } else
subpage_prot_free(mm);
destroy_pagetable_page(mm);
__destroy_context(mm->context.id);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 2b27458..c4d5c9c 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index d24a8a3..28ae8bd 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -100,5 +100,6 @@
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
- regs_user->abi = perf_reg_abi(current);
+ regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+ PERF_SAMPLE_REGS_ABI_NONE;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f2c98f6..a7bb872 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -279,7 +279,7 @@
int ssize, unsigned long inv_flags)
{
unsigned long lpar_rc;
- unsigned long flags = (newpp & 7) | H_AVPN;
+ unsigned long flags;
unsigned long want_v;
want_v = hpte_encode_avpn(vpn, psize, ssize);
@@ -287,6 +287,11 @@
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
+ flags = (newpp & 7) | H_AVPN;
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ /* Move pp0 into bit 8 (IBM 55) */
+ flags |= (newpp & HPTE_R_PP0) >> 55;
+
lpar_rc = plpar_pte_protect(flags, slot, want_v);
if (lpar_rc == H_NOT_FOUND) {
@@ -358,6 +363,10 @@
BUG_ON(slot == -1);
flags = newpp & 7;
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ /* Move pp0 into bit 8 (IBM 55) */
+ flags |= (newpp & HPTE_R_PP0) >> 55;
+
lpar_rc = plpar_pte_protect(flags, slot, 0);
BUG_ON(lpar_rc != H_SUCCESS);
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 32c46b4..b53f80f 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -130,14 +130,16 @@
{
int hw_cpu = get_hard_smp_processor_id(cpu);
+ kvmppc_set_host_ipi(cpu, 1);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
- int hw_cpu = hard_smp_processor_id();
+ int cpu = smp_processor_id();
- opal_int_set_mfrr(hw_cpu, 0xff);
+ kvmppc_set_host_ipi(cpu, 0);
+ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux();
}
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab..8e136b88 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d..8d665f1 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -158,14 +158,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. 64-bit
- tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
- (STACK_TOP / 3 * 2) : \
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf9..6bc941b 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@
{
unsigned long mask = -1UL;
+ /*
+ * No arguments for this syscall, there's nothing to do.
+ */
+ if (!n)
+ return;
+
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c438168..3bc2825 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -240,12 +240,17 @@
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
-.Lrewind_pad:
- nop 0
+.Lrewind_pad6:
+ nopr 7
+.Lrewind_pad4:
+ nopr 7
+.Lrewind_pad2:
+ nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -258,7 +263,9 @@
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 4aa8a7e..f5d7984 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -972,11 +972,12 @@
ptr = asce.origin * 4096;
if (asce.r) {
*fake = 1;
+ ptr = 0;
asce.dt = ASCE_TYPE_REGION1;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1:
- if (vaddr.rfx01 > asce.tl && !asce.r)
+ if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS;
break;
case ASCE_TYPE_REGION2:
@@ -1004,8 +1005,7 @@
union region1_table_entry rfte;
if (*fake) {
- /* offset in 16EB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+ ptr += (unsigned long) vaddr.rfx << 53;
rfte.val = ptr;
goto shadow_r2t;
}
@@ -1031,8 +1031,7 @@
union region2_table_entry rste;
if (*fake) {
- /* offset in 8PB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+ ptr += (unsigned long) vaddr.rsx << 42;
rste.val = ptr;
goto shadow_r3t;
}
@@ -1059,8 +1058,7 @@
union region3_table_entry rtte;
if (*fake) {
- /* offset in 4TB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+ ptr += (unsigned long) vaddr.rtx << 31;
rtte.val = ptr;
goto shadow_sgt;
}
@@ -1096,8 +1094,7 @@
union segment_table_entry ste;
if (*fake) {
- /* offset in 2G guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+ ptr += (unsigned long) vaddr.sx << 20;
ste.val = ptr;
goto shadow_pgt;
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index eb9df28..812368f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -98,7 +98,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -136,7 +136,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177..7df7d59 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -113,7 +113,7 @@
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1b1ce6..5cbf03c 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1021,7 +1021,7 @@
unsigned long order = get_order(size);
unsigned long p;
- p = __get_free_pages(GFP_KERNEL, order);
+ p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index fe8b8ee..02e05e2 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -181,7 +181,7 @@
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4094a51..d44fb80 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@
void bad_trap(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
@@ -2051,6 +2051,73 @@
atomic_inc(&sun4v_resum_oflow_cnt);
}
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+ unsigned int insn;
+
+ if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+ return compute_effective_address(regs, insn,
+ (insn >> 25) & 0x1f);
+ }
+ return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+ struct sun4v_error_entry *ent) {
+
+ unsigned int attrs = ent->err_attrs;
+
+ if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+ unsigned long addr = ent->err_raddr;
+ siginfo_t info;
+
+ if (addr == ~(u64)0) {
+ /* This seems highly unlikely to ever occur */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+ } else {
+ unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+ PAGE_SIZE);
+
+ /* Break the unfortunate news. */
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+ addr);
+ pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
+ page_cnt);
+
+ while (page_cnt-- > 0) {
+ if (pfn_valid(addr >> PAGE_SHIFT))
+ get_page(pfn_to_page(addr >> PAGE_SHIFT));
+ addr += PAGE_SIZE;
+ }
+ }
+ info.si_signo = SIGKILL;
+ info.si_errno = 0;
+ info.si_trapno = 0;
+ force_sig_info(info.si_signo, &info, current);
+
+ return true;
+ }
+ if (attrs & SUN4V_ERR_ATTRS_PIO) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+ force_sig_info(info.si_signo, &info, current);
+
+ return true;
+ }
+
+ /* Default to doing nothing */
+ return false;
+}
+
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
* Log the event, clear the first word of the entry, and die.
*/
@@ -2075,6 +2142,12 @@
put_cpu();
+ if (!(regs->tstate & TSTATE_PRIV) &&
+ sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+ /* DON'T PANIC: This userspace error was handled. */
+ return;
+ }
+
#ifdef CONFIG_PCI
/* Check for the special PCI poke sequence. */
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 988acc8..58cde8d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -116,7 +116,7 @@
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 77ceaa3..67508b2 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -232,7 +232,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 053baff..14fc4d2 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -63,6 +63,7 @@
#define TIF_RESTORE_SIGMASK 7
#define TIF_NOTIFY_RESUME 8
#define TIF_SECCOMP 9 /* secure computing */
+#define TIF_MM_RELEASED 10 /* task MM has been released */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bada636..3735222 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if X86_64
select ARCH_HAS_KCOV if X86_64
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index a66854d..6de58f1 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -430,9 +430,6 @@
{
unsigned long random_addr, min_addr;
- /* By default, keep output position unchanged. */
- *virt_addr = *output;
-
if (cmdline_find_option_bool("nokaslr")) {
warn("KASLR disabled: 'nokaslr' on cmdline.");
return;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b3c5a5f0..5955954 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -338,7 +338,7 @@
unsigned long output_len)
{
const unsigned long kernel_total_size = VO__end - VO__text;
- unsigned long virt_addr = (unsigned long)output;
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@@ -397,7 +397,7 @@
#ifndef CONFIG_RELOCATABLE
if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
error("Destination address does not match LOAD_PHYSICAL_ADDR");
- if ((unsigned long)output != virt_addr)
+ if (virt_addr != LOAD_PHYSICAL_ADDR)
error("Destination virtual address changed when not relocatable");
#endif
@@ -409,3 +409,8 @@
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
+
+void fortify_panic(const char *name)
+{
+ error("detected buffer overflow");
+}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 1c8355e..766a521 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -81,8 +81,6 @@
unsigned long output_size,
unsigned long *virt_addr)
{
- /* No change from existing output location. */
- *virt_addr = *output;
}
#endif
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739..f960a04 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@
static bool avx2_usable(void)
{
- if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 38623e2..9604b25 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -505,6 +505,10 @@
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
+
+ /* There's no sense in having PEBS for non sampling events: */
+ if (!is_sampling_event(event))
+ return -EINVAL;
}
/*
* check that PEBS LBR correction does not conflict with
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cb85222..f0f197f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
- [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
- [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
@@ -3164,13 +3164,16 @@
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+ struct cpu_hw_events *sibling;
struct intel_excl_cntrs *c;
- c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ sibling = &per_cpu(cpu_hw_events, i);
+ c = sibling->excl_cntrs;
if (c && c->core_id == core_id) {
cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc->excl_cntrs = c;
- cpuc->excl_thread_id = 1;
+ if (!sibling->excl_thread_id)
+ cpuc->excl_thread_id = 1;
break;
}
}
@@ -3975,7 +3978,7 @@
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 2724277..afe8024 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2686,7 +2686,7 @@
void hswep_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 94aad63..c152db2 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e9cd7be..19d14ac 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -221,6 +221,9 @@
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+
+ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
+ void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -290,7 +293,6 @@
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
- int emul_flags;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 78f3760..b601dda 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c..fffb279 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
+extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 3d3e835..e9ee848 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -142,7 +142,9 @@
}
#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
@@ -195,11 +197,15 @@
#endif
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t n);
+extern int memcmp(const void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#define memcmp __builtin_memcmp
+#endif
#define __HAVE_ARCH_MEMCHR
extern void *memchr(const void *cs, int c, size_t count);
@@ -321,6 +327,8 @@
: __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
@@ -330,6 +338,7 @@
(count)) \
: __memset((s), (c), (count)))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
/*
* find the first occurrence of byte 'c', or 1 past the area if none
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index a164862..f942deb 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -31,6 +31,7 @@
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
@@ -51,6 +52,7 @@
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
@@ -77,6 +79,11 @@
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index a12a047..8b678af 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/smap.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
@@ -214,10 +215,12 @@
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ stac();
asm volatile("call *%[call]"
: __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
+ clac();
return (long)__res;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 931ced8..d3e0d04 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -338,6 +338,14 @@
struct mpc_intsrc mp_irq;
/*
+ * Check bus_irq boundary.
+ */
+ if (bus_irq >= NR_IRQS_LEGACY) {
+ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+ return;
+ }
+
+ /*
* Convert 'gsi' to 'ioapic.pin'.
*/
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7249f15..cf89928 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2116,7 +2116,7 @@
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))
- unmask_ioapic_irq(irq_get_chip_data(0));
+ unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9c337b0..feaab07 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1054,6 +1054,13 @@
max_possible_pfn = max_pfn;
/*
+ * This call is required when the CPU does not support PAT. If
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
+ * effect.
+ */
+ init_cache_modes();
+
+ /*
* Define random base addresses for memory sections after max_pfn is
* defined and before each memory section base is used.
*/
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index a55ed63..1119414 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -140,7 +140,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index eea88fe..6e57edf 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -694,6 +694,7 @@
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 35058c2..9368fec 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -144,6 +144,14 @@
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
+
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9f676ad..de36660 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2543,7 +2543,7 @@
u64 smbase;
int ret;
- if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
@@ -2592,11 +2592,11 @@
return X86EMUL_UNHANDLEABLE;
}
- if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
- ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
+ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
@@ -5312,6 +5312,7 @@
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
+ unsigned emul_flags;
ctxt->mem_read.pos = 0;
@@ -5326,6 +5327,7 @@
goto done;
}
+ emul_flags = ctxt->ops->get_hflags(ctxt);
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@@ -5359,7 +5361,7 @@
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5388,7 +5390,7 @@
goto done;
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5442,7 +5444,7 @@
special_insn:
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 9d4a850..5ab4a36 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@
((u64)1 << edx.split.bit_width_fixed) - 1;
}
- pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+ pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 89b98e0..3dc6d80 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2455,7 +2455,7 @@
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
@@ -2987,7 +2987,8 @@
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -3069,7 +3070,11 @@
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+ return 1;
+ if (is_noncanonical_address(data & PAGE_MASK) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@@ -6474,7 +6479,6 @@
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 62cde4f..3dbcb09 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4999,6 +4999,8 @@
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
+ if (base3)
+ *base3 = 0;
return false;
}
@@ -5154,6 +5156,16 @@
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
+static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+{
+ return emul_to_vcpu(ctxt)->arch.hflags;
+}
+
+static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+{
+ kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@@ -5193,6 +5205,8 @@
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
.set_nmi_mask = emulator_set_nmi_mask,
+ .get_hflags = emulator_get_hflags,
+ .set_hflags = emulator_set_hflags,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -5245,7 +5259,6 @@
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
- ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@@ -5636,8 +5649,6 @@
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- if (vcpu->arch.hflags != ctxt->emul_flags)
- kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@@ -6111,7 +6122,8 @@
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b..04c067b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -84,7 +84,7 @@
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -105,7 +105,8 @@
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-17: movl %edx,%ecx
+.L_copy_short_string:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@@ -221,6 +222,8 @@
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
+ cmpl $64,%edx
+ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index cad1263..2eab7d0 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -6,7 +6,7 @@
__visible void *memcpy(void *to, const void *from, size_t n)
{
-#ifdef CONFIG_X86_USE_3DNOW
+#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
return __memcpy3d(to, from, n);
#else
return __memcpy(to, from, n);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 2ae8584..fe342e8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 14b9dd7..9a324fc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -94,10 +94,10 @@
*/
void sync_global_pgds(unsigned long start, unsigned long end, int removed)
{
- unsigned long address;
+ unsigned long addr;
- for (address = start; address <= end; address += PGDIR_SIZE) {
- const pgd_t *pgd_ref = pgd_offset_k(address);
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ const pgd_t *pgd_ref = pgd_offset_k(addr);
struct page *page;
/*
@@ -113,7 +113,7 @@
pgd_t *pgd;
spinlock_t *pgt_lock;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ pgd = (pgd_t *)page_address(page) + pgd_index(addr);
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index e4f8009..a75103e 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
@@ -525,15 +525,7 @@
if (!kernel_managing_mpx_tables(current->mm))
return -EINVAL;
- if (do_mpx_bt_fault()) {
- force_sig(SIGSEGV, current);
- /*
- * The force_sig() is essentially "handling" this
- * exception, so we do not pass up the error
- * from do_mpx_bt_fault().
- */
- }
- return 0;
+ return do_mpx_bt_fault();
}
/*
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 6b7ce62..aca6295 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
+ __vmalloc_start_set = true;
setup_bootmem_allocator();
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 83e701f..89d7907 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,14 +36,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
- if (!__pat_enabled)
+ if (pat_disabled)
return;
if (boot_cpu_done) {
@@ -51,10 +51,8 @@
return;
}
- __pat_enabled = 0;
+ pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
-
- init_cache_modes();
}
static int __init nopat(char *str)
@@ -66,7 +64,7 @@
bool pat_enabled(void)
{
- return !!__pat_enabled;
+ return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@@ -204,6 +202,8 @@
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+ init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -224,6 +224,7 @@
}
wrmsrl(MSR_IA32_CR_PAT, pat);
+ pat_initialized = true;
__init_cache_modes(pat);
}
@@ -241,10 +242,9 @@
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-static void init_cache_modes(void)
+void init_cache_modes(void)
{
u64 pat = 0;
- static int init_cm_done;
if (init_cm_done)
return;
@@ -286,8 +286,6 @@
}
__init_cache_modes(pat);
-
- init_cm_done = 1;
}
/**
@@ -305,10 +303,8 @@
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!pat_enabled()) {
- init_cache_modes();
+ if (pat_disabled)
return;
- }
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a7655f6..75fb011 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -263,8 +263,6 @@
{
struct flush_tlb_info info;
- if (end == 0)
- end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
@@ -393,7 +391,7 @@
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
+ flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
preempt_enable();
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6d52b94..20fa7c8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -571,3 +571,35 @@
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+
+/*
+ * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
+ *
+ * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
+ * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
+ * for soft poweroff and suspend-to-RAM.
+ *
+ * As far as we know, this is related to the address space, not to the Root
+ * Port itself. Attaching the quirk to the Root Port is a convenience, but
+ * it could probably also be a standalone DMI quirk.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=103211
+ */
+static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
+ !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
+ pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
+ return;
+
+ res = request_mem_region(0x7fa00000, 0x200000,
+ "MacBook Pro poweroff workaround");
+ if (res)
+ dev_info(dev, "claimed %s %pR\n", res->name, res);
+ else
+ dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8..73eb7fd 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
- sort_relocs(&relocs16);
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
+#else
+ sort_relocs(&relocs16);
#endif
/* Print the relocations */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4ac3d23..4416944 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@
{
int irq = irq_find_mapping(NULL, hwirq);
- if (hwirq >= NR_IRQS) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, hwirq);
- }
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 83cf496..3aaaae1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -87,7 +87,7 @@
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
/* Interrupt configuration. */
-#define PLATFORM_NR_IRQS 10
+#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
-#define C67X00_IRQ 5
+
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be72..42285f3 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
- .start = OETH_IRQ,
- .end = OETH_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -213,8 +213,8 @@
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
- .start = C67X00_IRQ,
- .end = C67X00_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -247,7 +247,7 @@
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
- .irq = DUART16552_INTNUM,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b..5610cd5 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
+ if (memcmp(flavour, "bsd\0", 4) == 0)
+ bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
continue;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 123d211..8cac3d3 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1691,6 +1691,7 @@
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
+ init_completion(&drbg->ctr_completion);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 8baab43..7830d30 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -496,7 +496,7 @@
goto done;
pos++;
- if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
+ if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
goto done;
pos += digest_info->size;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 22ca892..79152db 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,7 +147,7 @@
module_param(ec_storm_threshold, uint, 0644);
MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
-static bool ec_freeze_events __read_mostly = true;
+static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
@@ -1865,24 +1865,6 @@
}
#ifdef CONFIG_PM_SLEEP
-static int acpi_ec_suspend_noirq(struct device *dev)
-{
- struct acpi_ec *ec =
- acpi_driver_data(to_acpi_device(dev));
-
- acpi_ec_enter_noirq(ec);
- return 0;
-}
-
-static int acpi_ec_resume_noirq(struct device *dev)
-{
- struct acpi_ec *ec =
- acpi_driver_data(to_acpi_device(dev));
-
- acpi_ec_leave_noirq(ec);
- return 0;
-}
-
static int acpi_ec_suspend(struct device *dev)
{
struct acpi_ec *ec =
@@ -1904,7 +1886,6 @@
#endif
static const struct dev_pm_ops acpi_ec_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 9ef3941..f3bc901 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2945,6 +2945,8 @@
static __init int nfit_init(void)
{
+ int ret;
+
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -2972,8 +2974,14 @@
return -ENOMEM;
nfit_mce_register();
+ ret = acpi_bus_register_driver(&acpi_nfit_driver);
+ if (ret) {
+ nfit_mce_unregister();
+ destroy_workqueue(nfit_wq);
+ }
- return acpi_bus_register_driver(&acpi_nfit_driver);
+ return ret;
+
}
static __exit void nfit_exit(void)
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b0..4b7c726 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6485c77..632c814 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -34,30 +67,31 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -103,17 +137,13 @@
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -170,26 +200,27 @@
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -199,11 +230,14 @@
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -213,19 +247,26 @@
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -236,11 +277,20 @@
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -248,8 +298,76 @@
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @sched_policy: minimum scheduling policy for node
+ * (invariant after initialized)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @inherit_rt: inherit RT scheduling policy from caller
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -260,88 +378,185 @@
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 sched_policy:2;
+ u8 inherit_rt:1;
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy scheduler policy
+ * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+ unsigned int sched_policy;
+ int prio;
+};
+
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
+ bool is_dead;
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
-
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -349,10 +564,13 @@
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
- long default_priority;
+ int tmp_ref;
+ struct binder_priority default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -361,22 +579,60 @@
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ * @task: struct task_struct for this thread
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
+ struct task_struct *task;
};
struct binder_transaction {
@@ -393,20 +649,263 @@
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
- long priority;
- long saved_priority;
+ struct binder_priority priority;
+ struct binder_priority saved_priority;
+ bool set_priority_called;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
- int ret;
if (files == NULL)
return -ESRCH;
@@ -417,11 +916,7 @@
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- preempt_enable_no_resched();
- ret = __alloc_fd(files, 0, rlim_cur, flags);
- preempt_disable();
-
- return ret;
+ return __alloc_fd(files, 0, rlim_cur, flags);
}
/*
@@ -430,11 +925,8 @@
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files) {
- preempt_enable_no_resched();
+ if (proc->files)
__fd_install(proc->files, fd, file);
- preempt_disable();
- }
}
/*
@@ -458,525 +950,281 @@
return retval;
}
-static inline void binder_lock(const char *tag)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- preempt_disable();
- trace_binder_locked(tag);
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static inline void binder_unlock(const char *tag)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
- preempt_enable();
+ bool has_work;
+
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
+
+ return has_work;
}
-static inline void *kzalloc_preempt_disabled(size_t size)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- void *ptr;
-
- ptr = kzalloc(size, GFP_NOWAIT);
- if (ptr)
- return ptr;
-
- preempt_enable_no_resched();
- ptr = kzalloc(size, GFP_KERNEL);
- preempt_disable();
-
- return ptr;
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- long ret;
+ struct rb_node *n;
+ struct binder_thread *thread;
- preempt_enable_no_resched();
- ret = copy_to_user(to, from, n);
- preempt_disable();
- return ret;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
+ }
}
-static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- long ret;
+ struct binder_thread *thread;
- preempt_enable_no_resched();
- ret = copy_from_user(to, from, n);
- preempt_disable();
- return ret;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
+
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
+
+ return thread;
}
-#define get_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = get_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
-#define put_user_preempt_disabled(x, ptr) \
-({ \
- int __ret; \
- preempt_enable_no_resched(); \
- __ret = put_user(x, ptr); \
- preempt_disable(); \
- __ret; \
-})
-
-static void binder_set_nice(long nice)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- long min_nice;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
return;
}
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
+
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool is_rt_policy(int policy)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
-
- BUG_ON(!new_buffer->free);
-
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return policy == SCHED_FIFO || policy == SCHED_RR;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool is_fair_policy(int policy)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ return policy == SCHED_NORMAL || policy == SCHED_BATCH;
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static bool binder_supported_policy(int policy)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
- }
- return NULL;
+ return is_fair_policy(policy) || is_rt_policy(policy);
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+static int to_userspace_prio(int policy, int kernel_priority)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
+ if (is_fair_policy(policy))
+ return PRIO_TO_NICE(kernel_priority);
else
- mm = get_task_mm(proc->tsk);
-
- preempt_enable_no_resched();
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
-
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
-
- preempt_disable();
-
- return 0;
-
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE, NULL);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
-
- preempt_disable();
-
- return -ENOMEM;
+ return MAX_USER_RT_PRIO - 1 - kernel_priority;
}
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+static int to_kernel_prio(int policy, int user_priority)
{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
+ if (is_fair_policy(policy))
+ return NICE_TO_PRIO(user_priority);
+ else
+ return MAX_USER_RT_PRIO - 1 - user_priority;
+}
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
+static void binder_do_set_priority(struct task_struct *task,
+ struct binder_priority desired,
+ bool verify)
+{
+ int priority; /* user-space prio value */
+ bool has_cap_nice;
+ unsigned int policy = desired.sched_policy;
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
+ if (task->policy == policy && task->normal_prio == desired.prio)
+ return;
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
+ has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
+ priority = to_userspace_prio(policy, desired.prio);
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
+ if (verify && is_rt_policy(policy) && !has_cap_nice) {
+ long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
+
+ if (max_rtprio == 0) {
+ policy = SCHED_NORMAL;
+ priority = MIN_NICE;
+ } else if (priority > max_rtprio) {
+ priority = max_rtprio;
}
}
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
+ if (verify && is_fair_policy(policy) && !has_cap_nice) {
+ long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
-
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
-}
-
-static void *buffer_end_page(struct binder_buffer *buffer)
-{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
-
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
+ if (min_nice > MAX_NICE) {
+ binder_user_error("%d RLIMIT_NICE not set\n",
+ task->pid);
+ return;
+ } else if (priority < min_nice) {
+ priority = min_nice;
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+
+ if (policy != desired.sched_policy ||
+ to_kernel_prio(policy, priority) != desired.prio)
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: priority %d not allowed, using %d instead\n",
+ task->pid, desired.prio,
+ to_kernel_prio(policy, priority));
+
+ /* Set the actual priority */
+ if (task->policy != policy || is_rt_policy(policy)) {
+ struct sched_param params;
+
+ params.sched_priority = is_rt_policy(policy) ? priority : 0;
+
+ sched_setscheduler_nocheck(task,
+ policy | SCHED_RESET_ON_FORK,
+ ¶ms);
}
+ if (is_fair_policy(policy))
+ set_user_nice(task, priority);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_set_priority(struct task_struct *task,
+ struct binder_priority desired)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
-
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
- }
- binder_insert_free_buffer(proc, buffer);
+ binder_do_set_priority(task, desired, /* verify = */ true);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static void binder_restore_priority(struct task_struct *task,
+ struct binder_priority desired)
+{
+ binder_do_set_priority(task, desired, /* verify = */ false);
+}
+
+static void binder_transaction_priority(struct task_struct *task,
+ struct binder_transaction *t,
+ struct binder_priority node_prio,
+ bool inherit_rt)
+{
+ struct binder_priority desired_prio;
+
+ if (t->set_priority_called)
+ return;
+
+ t->set_priority_called = true;
+ t->saved_priority.sched_policy = task->policy;
+ t->saved_priority.prio = task->normal_prio;
+
+ if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+ desired_prio.prio = NICE_TO_PRIO(0);
+ desired_prio.sched_policy = SCHED_NORMAL;
+ } else {
+ desired_prio.prio = t->priority.prio;
+ desired_prio.sched_policy = t->priority.sched_policy;
+ }
+
+ if (node_prio.prio < t->priority.prio ||
+ (node_prio.prio == t->priority.prio &&
+ node_prio.sched_policy == SCHED_FIFO)) {
+ /*
+ * In case the minimum priority on the node is
+ * higher (lower value), use that priority. If
+ * the priority is the same, but the node uses
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
+ * run unbounded, unlike SCHED_RR.
+ */
+ desired_prio = node_prio;
+ }
+
+ binder_set_priority(task, desired_prio);
+}
+
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -984,21 +1232,46 @@
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+ s8 priority;
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -1006,33 +1279,78 @@
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc_preempt_disabled(sizeof(*node));
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+ node->min_priority = to_kernel_prio(node->sched_policy, priority);
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ BUG_ON(!spin_is_locked(&node->lock));
+ if (proc)
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -1049,8 +1367,8 @@
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -1061,58 +1379,169 @@
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
+{
+ struct binder_proc *proc = node->proc;
+
+ BUG_ON(!spin_is_locked(&node->lock));
+ if (proc)
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
-
- return 0;
+ return false;
}
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
+
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1120,11 +1549,11 @@
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1134,14 +1563,34 @@
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1154,22 +1603,22 @@
else
return ref;
}
- new_ref = kzalloc_preempt_disabled(sizeof(*ref));
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1177,121 +1626,423 @@
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
- }
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
}
- t->need_reply = 0;
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
+ }
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1306,30 +2057,28 @@
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1338,7 +2087,7 @@
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1547,24 +2296,26 @@
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1603,7 +2354,8 @@
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1635,102 +2387,122 @@
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1765,9 +2537,7 @@
ret = -EBADF;
goto err_fget;
}
- preempt_enable_no_resched();
ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
- preempt_disable();
if (ret < 0) {
ret = -EPERM;
goto err_security;
@@ -1823,7 +2593,8 @@
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1891,12 +2662,87 @@
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ struct binder_priority node_prio;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ node_prio.prio = node->min_priority;
+ node_prio.sched_policy = node->sched_policy;
+
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread) {
+ target_list = &thread->todo;
+ binder_transaction_priority(thread->task, t, node_prio,
+ node->inherit_rt);
+ } else if (!target_list) {
+ target_list = &proc->todo;
+ } else {
+ BUG_ON(target_list != &node->async_todo);
+ }
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1908,19 +2754,21 @@
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1930,29 +2778,39 @@
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1961,89 +2819,137 @@
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
- t = kzalloc_preempt_disabled(sizeof(*t));
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
- tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2073,15 +2979,30 @@
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
- t->priority = task_nice(current);
+ if (!(t->flags & TF_ONE_WAY) &&
+ binder_supported_policy(current->policy)) {
+ /* Inherit supported policies for synchronous transactions */
+ t->priority.sched_policy = current->policy;
+ t->priority.prio = current->normal_prio;
+ } else {
+ /* Otherwise, fall back to the default priority */
+ t->priority = target_proc->default_priority;
+ }
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2089,31 +3010,34 @@
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
- if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2121,6 +3045,8 @@
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2137,6 +3063,8 @@
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2151,6 +3079,8 @@
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2162,6 +3092,8 @@
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2173,6 +3105,8 @@
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2189,6 +3123,8 @@
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2198,12 +3134,16 @@
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2219,20 +3159,24 @@
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
- if (copy_from_user_preempt_disabled(
- sg_bufp,
- (const void __user *)(uintptr_t)
- bp->buffer, bp->length)) {
+ if (copy_from_user(sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2241,6 +3185,8 @@
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2250,34 +3196,61 @@
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_restore_priority(current, in_reply_to->saved_priority);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2285,8 +3258,9 @@
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2299,24 +3273,50 @@
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_restore_priority(current, in_reply_to->saved_priority);
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2330,15 +3330,17 @@
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
- if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
+ if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2346,53 +3348,61 @@
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2400,11 +3410,12 @@
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
- if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@@ -2424,13 +3435,17 @@
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2439,16 +3454,23 @@
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2462,11 +3484,12 @@
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2488,15 +3511,25 @@
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w)
+ buf_node->has_async_transaction = 0;
else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ binder_enqueue_work_ilocked(
+ w, &thread->todo);
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2504,8 +3537,7 @@
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
- if (copy_from_user_preempt_disabled(&tr, ptr,
- sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
@@ -2516,7 +3548,7 @@
case BC_REPLY: {
struct binder_transaction_data tr;
- if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
@@ -2528,6 +3560,7 @@
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2541,6 +3574,7 @@
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2565,15 +3599,37 @@
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
+ if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2581,6 +3637,8 @@
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2590,21 +3648,18 @@
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc_preempt_disabled(sizeof(*death));
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2613,17 +3668,29 @@
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work(
+ proc,
+ &ref->death->work,
+ &thread->todo);
+ else {
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
+ binder_inner_proc_unlock(proc);
}
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2632,33 +3699,52 @@
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
+
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2672,21 +3758,26 @@
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
- }
- break;
+ binder_inner_proc_unlock(proc);
+ } break;
default:
pr_err("%d:%d unknown command %d\n",
@@ -2703,23 +3794,79 @@
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
-{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
-}
-
static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
+}
+
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
+{
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2735,43 +3882,21 @@
int wait_for_proc_work;
if (*consumed == 0) {
- if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2780,24 +3905,16 @@
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
- binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+ binder_restore_priority(current, proc->default_priority);
}
- binder_lock(__func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2806,33 +3923,54 @@
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
+
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2840,112 +3978,134 @@
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2957,16 +4117,14 @@
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
+ struct binder_priority node_prio;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
- t->saved_priority = task_nice(current);
- if (t->priority < target_node->min_priority &&
- !(t->flags & TF_ONE_WAY))
- binder_set_nice(t->priority);
- else if (!(t->flags & TF_ONE_WAY) ||
- t->saved_priority > target_node->min_priority)
- binder_set_nice(target_node->min_priority);
+ node_prio.sched_policy = target_node->sched_policy;
+ node_prio.prio = target_node->min_priority;
+ binder_transaction_priority(current, t, node_prio,
+ target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
@@ -2977,8 +4135,9 @@
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2988,18 +4147,24 @@
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -3009,21 +4174,22 @@
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -3031,29 +4197,36 @@
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
- if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -3066,11 +4239,17 @@
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3097,7 +4276,8 @@
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3112,38 +4292,102 @@
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc_preempt_disabled(sizeof(*thread));
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ get_task_struct(current);
+ thread->task = current;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ put_task_struct(thread->task);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3164,12 +4408,16 @@
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3178,30 +4426,24 @@
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(__func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(__func__);
+ binder_inner_proc_unlock(thread->proc);
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+
return 0;
}
@@ -3219,7 +4461,7 @@
ret = -EINVAL;
goto out;
}
- if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3237,7 +4479,7 @@
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3248,10 +4490,12 @@
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -3261,7 +4505,7 @@
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -3274,9 +4518,10 @@
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3297,19 +4542,48 @@
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info) {
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3321,17 +4595,12 @@
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
- if (unlikely(current->mm != proc->vma_vm_mm)) {
- pr_err("current mm mismatch proc mm\n");
- return -EINVAL;
- }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3344,12 +4613,19 @@
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3358,7 +4634,7 @@
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3368,8 +4644,27 @@
ret = -EINVAL;
goto err;
}
- if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
- ret = -EINVAL;
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+ &ver->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
goto err;
}
break;
@@ -3381,8 +4676,7 @@
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3411,8 +4705,7 @@
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3430,11 +4723,8 @@
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
-
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
if (proc->tsk != current->group_leader)
return -EINVAL;
@@ -3443,8 +4733,8 @@
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3454,77 +4744,15 @@
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- /* binder_update_page_range assumes preemption is disabled */
- preempt_disable();
- ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
- preempt_enable_no_resched();
- if (ret) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3542,25 +4770,33 @@
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
- proc->vma_vm_mm = current->group_leader->mm;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
- proc->default_priority = task_nice(current);
+ if (binder_supported_policy(current->policy)) {
+ proc->default_priority.sched_policy = current->policy;
+ proc->default_priority.prio = current->normal_prio;
+ } else {
+ proc->default_priority.sched_policy = SCHED_NORMAL;
+ proc->default_priority.prio = NICE_TO_PRIO(0);
+ }
+
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3596,16 +4832,17 @@
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3626,13 +4863,21 @@
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3640,45 +4885,58 @@
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3686,15 +4944,25 @@
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3704,73 +4972,42 @@
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3781,12 +5018,7 @@
int defer;
do {
- trace_binder_lock(__func__);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(__func__);
-
mutex_lock(&binder_deferred_lock);
- preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@@ -3812,9 +5044,6 @@
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- trace_binder_unlock(__func__);
- mutex_unlock(&binder_main_lock);
- preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
@@ -3834,41 +5063,53 @@
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ WARN_ON(!spin_is_locked(&proc->inner_lock));
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ t->code, t->flags, t->priority.sched_policy,
+ t->priority.prio, t->need_reply);
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3876,8 +5117,16 @@
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3902,70 +5151,90 @@
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
int count;
+ WARN_ON(!spin_is_locked(&node->lock));
+ if (node->proc)
+ WARN_ON(!spin_is_locked(&node->proc->inner_lock));
+
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ node->sched_policy, node->min_priority,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3975,36 +5244,60 @@
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -4070,17 +5363,21 @@
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4088,11 +5385,15 @@
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4100,51 +5401,59 @@
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4155,57 +5464,67 @@
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4213,44 +5532,63 @@
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
- if (do_lock)
- binder_lock(__func__);
-
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4285,6 +5623,7 @@
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4304,6 +5643,9 @@
struct binder_device *device;
struct hlist_node *tmp;
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 0000000..b90222a
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,802 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return alloc->buffer +
+ alloc->buffer_size - (void *)buffer->data;
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (*page == NULL) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL, page);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ alloc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+ alloc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(alloc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < alloc->buffer);
+ BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ if (binder_update_page_range(alloc, 1, alloc->buffer,
+ alloc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = alloc->buffer;
+ INIT_LIST_HEAD(&alloc->buffers);
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ struct binder_buffer *buffer;
+
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+
+ if (!alloc->pages[i])
+ continue;
+
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK not freed\n",
+ __func__, alloc->pid, i, page_addr);
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i]);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+}
+
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 0000000..088e4ff
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ uint8_t data[0];
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of physical page addresses for each
+ * page of mmap'd space
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3d..7967db1 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5ba619a..35ab4d5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2106,7 +2106,11 @@
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->bus && dev->bus->shutdown) {
+ if (dev->class && dev->class->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->class->shutdown(dev);
+ } else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 624f069..55687b8 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -208,145 +208,10 @@
#endif
-#ifdef CONFIG_SCHED_HMP
-
-static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
-
- rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
- if (err)
- return err;
-
- err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
-
- rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int pwr_cost;
-
- err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
- if (err)
- return err;
-
- err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static ssize_t show_sched_cluser_wake_idle(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
- int cpuid = cpu->dev.id;
- unsigned int wake_up_idle;
-
- wake_up_idle = sched_get_cluster_wake_idle(cpuid);
-
- rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);
-
- return rc;
-}
-
-static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int err;
- int cpuid = cpu->dev.id;
- unsigned int wake_up_idle;
-
- err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
- if (err)
- return err;
-
- err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);
-
- if (err >= 0)
- err = count;
-
- return err;
-}
-
-static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
- show_sched_static_cpu_pwr_cost,
- store_sched_static_cpu_pwr_cost);
-static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
- show_sched_static_cluster_pwr_cost,
- store_sched_static_cluster_pwr_cost);
-static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
- show_sched_cluser_wake_idle,
- store_sched_cluster_wake_idle);
-
-static struct attribute *hmp_sched_cpu_attrs[] = {
- &dev_attr_sched_static_cpu_pwr_cost.attr,
- &dev_attr_sched_static_cluster_pwr_cost.attr,
- &dev_attr_sched_cluster_wake_up_idle.attr,
- NULL
-};
-
-static struct attribute_group sched_hmp_cpu_attr_group = {
- .attrs = hmp_sched_cpu_attrs,
-};
-
-#endif /* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
-#ifdef CONFIG_SCHED_HMP
- &sched_hmp_cpu_attr_group,
-#endif
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
@@ -357,9 +222,6 @@
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
#endif
-#ifdef CONFIG_SCHED_HMP
- &sched_hmp_cpu_attr_group,
-#endif
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index e167a1e1..4f638ab 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -165,7 +165,8 @@
{
int ret;
- ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
+ "reserved", res_cma);
if (ret)
return ret;
@@ -257,7 +258,7 @@
return -EINVAL;
}
- err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
+ err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
if (err) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index aa5e22c..7c8f6bf 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -306,7 +306,7 @@
unsigned long vm_flags,
pgprot_t prot, const void *caller)
{
- int i;
+ unsigned long i;
struct page **pages;
void *ptr;
unsigned long pfn;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c4af003..5eba478 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -856,7 +856,7 @@
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
if (count > PATH_MAX)
return -EINVAL;
@@ -869,12 +869,15 @@
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -885,8 +888,12 @@
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
+ ssize_t len;
- return sprintf(buf, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = sprintf(buf, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066..8c7d0f3 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1029,8 +1029,6 @@
spin_unlock_irq(&dev->power.lock);
- dev_pm_domain_set(dev, &genpd->domain);
-
return gpd_data;
err_free:
@@ -1044,8 +1042,6 @@
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
- dev_pm_domain_set(dev, NULL);
-
spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL;
@@ -1082,6 +1078,8 @@
if (ret)
goto out;
+ dev_pm_domain_set(dev, &genpd->domain);
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1143,6 +1141,8 @@
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev_pm_domain_set(dev, NULL);
+
list_del_init(&pdd->list_node);
mutex_unlock(&genpd->lock);
@@ -1244,7 +1244,7 @@
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
- struct gpd_link *link;
+ struct gpd_link *l, *link;
int ret = -EINVAL;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
@@ -1260,7 +1260,7 @@
goto out;
}
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
if (link->slave != subdomain)
continue;
@@ -1607,12 +1607,12 @@
*/
void of_genpd_del_provider(struct device_node *np)
{
- struct of_genpd_provider *cp;
+ struct of_genpd_provider *cp, *tmp;
struct generic_pm_domain *gpd;
mutex_lock(&gpd_list_lock);
mutex_lock(&of_genpd_mutex);
- list_for_each_entry(cp, &of_genpd_providers, link) {
+ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
if (cp->node == np) {
/*
* For each PM domain associated with the
@@ -1752,14 +1752,14 @@
*/
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
+ struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
int ret;
if (IS_ERR_OR_NULL(np))
return ERR_PTR(-EINVAL);
mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
if (gpd->provider == &np->fwnode) {
ret = genpd_remove(gpd);
genpd = ret ? ERR_PTR(ret) : gpd;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 23f3b95..147d2e3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -889,13 +889,13 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -921,13 +921,13 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -952,7 +952,8 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b4679..39efa7e 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -268,6 +268,8 @@
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 270cdd4..90c16d8 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -61,6 +61,8 @@
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -199,7 +201,7 @@
spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
- synchronize_rcu();
+ synchronize_srcu(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -333,12 +335,12 @@
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
-
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -349,12 +351,12 @@
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
-
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -837,10 +839,10 @@
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
- int active = 0;
+ int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
pr_info("active wakeup source: %s\n", ws->name);
@@ -856,7 +858,7 @@
if (!active && last_activity_ws)
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
@@ -983,8 +985,9 @@
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
@@ -998,7 +1001,7 @@
}
spin_unlock_irq(&ws->lock);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
@@ -1059,15 +1062,16 @@
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
+ int srcuidx;
seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
print_wakeup_source_stats(m, ws);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
print_wakeup_source_stats(m, &deleted_ws);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee7..d6eaaa2 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@
unsigned long timeout;
int ret;
- xen_blkif_get(blkif);
-
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -665,7 +663,6 @@
print_stats(ring);
ring->xenblkd = NULL;
- xen_blkif_put(blkif);
return 0;
}
@@ -1436,34 +1433,35 @@
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6..ecb35fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
+
+/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@
} u;
} __attribute__((__packed__));
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@
} u;
} __attribute__((__packed__));
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
@@ -281,6 +266,7 @@
wait_queue_head_t wq;
atomic_t inflight;
+ bool active;
/* One thread per blkif ring. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d..5dfe6e8 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@
init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif;
ring->st_print = jiffies;
- xen_blkif_get(blkif);
+ ring->active = true;
}
return 0;
@@ -249,10 +249,12 @@
struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0;
+ if (!ring->active)
+ continue;
+
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq);
- ring->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
- xen_blkif_put(blkif);
+ ring->active = false;
}
blkif->nr_ring_pages = 0;
/*
@@ -313,8 +315,10 @@
static void xen_blkif_free(struct xen_blkif *blkif)
{
- xen_blkif_disconnect(blkif);
+ WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
+ kfree(blkif->be->mode);
+ kfree(blkif->be);
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
@@ -509,8 +513,6 @@
/* Put the reference we set in xen_blkif_alloc(). */
xen_blkif_put(be->blkif);
- kfree(be->mode);
- kfree(be);
return 0;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c9914d65..3868665 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1271,6 +1271,7 @@
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ __set_bit(QUEUE_FLAG_FAST, &zram->disk->queue->queue_flags);
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index bfc3648..f927756 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -708,6 +708,7 @@
} else {
BT_PWR_ERR("BT chip state is already :%d no change d\n"
, pwr_state);
+ ret = 0;
}
break;
default:
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index dc9bb0b..8f0e632 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -127,7 +127,7 @@
if (!btfmslim || !ch)
return -EINVAL;
- BTFMSLIM_DBG("port:%d", ch->port);
+ BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
/* Define the channel with below parameters */
prop.prot = SLIM_AUTO_ISO;
@@ -496,9 +496,18 @@
/* Driver specific data allocation */
btfm_slim->dev = &slim->dev;
ret = btfm_slim_register_codec(&slim->dev);
+ if (ret) {
+ BTFMSLIM_ERR("error, registering slimbus codec failed");
+ goto free;
+ }
ret = bt_register_slimdev(&slim->dev);
+ if (ret < 0) {
+ btfm_slim_unregister_codec(&slim->dev);
+ goto free;
+ }
return ret;
-
+free:
+ slim_remove_device(&btfm_slim->slim_ifd);
dealloc:
mutex_destroy(&btfm_slim->io_lock);
mutex_destroy(&btfm_slim->xfer_lock);
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 00d46a5..cc9d14d 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -68,6 +68,7 @@
uint32_t num_rx_port;
uint32_t num_tx_port;
+ uint32_t sample_rate;
struct btfmslim_ch *rx_chs;
struct btfmslim_ch *tx_chs;
@@ -161,4 +162,12 @@
* 0
*/
int btfm_slim_register_codec(struct device *dev);
+
+/**
+ * btfm_slim_unregister_codec: Unregister codec driver in slimbus device node
+ * @dev: device node
+ * Returns:
+ * VOID
+ */
+void btfm_slim_unregister_codec(struct device *dev);
#endif /* BTFM_SLIM_H */
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 86760cd..309648f 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -26,6 +26,9 @@
#include <sound/tlv.h>
#include <btfm_slim.h>
+static int bt_soc_enable_status;
+
+
static int btfm_slim_codec_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
@@ -38,8 +41,31 @@
return 0;
}
+static int bt_soc_status_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = bt_soc_enable_status;
+ return 1;
+}
+
+static int bt_soc_status_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 1;
+}
+
+static const struct snd_kcontrol_new status_controls[] = {
+ SOC_SINGLE_EXT("BT SOC status", 0, 0, 1, 0,
+ bt_soc_status_get,
+ bt_soc_status_put)
+
+};
+
+
static int btfm_slim_codec_probe(struct snd_soc_codec *codec)
{
+ snd_soc_add_codec_controls(codec, status_controls,
+ ARRAY_SIZE(status_controls));
return 0;
}
@@ -54,8 +80,8 @@
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("substream = %s stream = %d dai->name = %s",
+ substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
}
@@ -63,10 +89,48 @@
static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ int i;
struct btfmslim *btfmslim = dai->dev->platform_data;
+ struct btfmslim_ch *ch;
+ uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ dai->id, dai->rate);
+
+ switch (dai->id) {
+ case BTFM_FM_SLIM_TX:
+ grp = true; nchan = 2;
+ ch = btfmslim->tx_chs;
+ rxport = 0;
+ break;
+ case BTFM_BT_SCO_SLIM_TX:
+ ch = btfmslim->tx_chs;
+ rxport = 0;
+ break;
+ case BTFM_BT_SCO_A2DP_SLIM_RX:
+ case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+ ch = btfmslim->rx_chs;
+ rxport = 1;
+ break;
+ case BTFM_SLIM_NUM_CODEC_DAIS:
+ default:
+ BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+ return;
+ }
+
+ /* Search for dai->id matched port handler */
+ for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+ (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+ (ch->id != dai->id); ch++, i++)
+ ;
+
+ if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+ (ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+ BTFMSLIM_ERR("ch is invalid!!");
+ return;
+ }
+
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
btfm_slim_hw_deinit(btfmslim);
}
@@ -74,24 +138,28 @@
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
+ BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
dai->name, dai->id, params_rate(params),
params_channels(params));
return 0;
}
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int i, ret = -EINVAL;
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
+ bt_soc_enable_status = 0;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
+ /* save sample rate */
+ btfmslim->sample_rate = dai->rate;
+
switch (dai->id) {
case BTFM_FM_SLIM_TX:
grp = true; nchan = 2;
@@ -126,53 +194,10 @@
}
ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
- return ret;
-}
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- int i, ret = -EINVAL;
- struct btfmslim *btfmslim = dai->dev->platform_data;
- struct btfmslim_ch *ch;
- uint8_t rxport, grp = false, nchan = 1;
-
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
- dai->id, dai->rate);
-
- switch (dai->id) {
- case BTFM_FM_SLIM_TX:
- grp = true; nchan = 2;
- ch = btfmslim->tx_chs;
- rxport = 0;
- break;
- case BTFM_BT_SCO_SLIM_TX:
- ch = btfmslim->tx_chs;
- rxport = 0;
- break;
- case BTFM_BT_SCO_A2DP_SLIM_RX:
- case BTFM_BT_SPLIT_A2DP_SLIM_RX:
- ch = btfmslim->rx_chs;
- rxport = 1;
- break;
- case BTFM_SLIM_NUM_CODEC_DAIS:
- default:
- BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- return ret;
- }
-
- /* Search for dai->id matched port handler */
- for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
- (ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
- (ch->id != dai->id); ch++, i++)
- ;
-
- if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
- (ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
- BTFMSLIM_ERR("ch is invalid!!");
- return ret;
- }
- ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+ /* save the enable channel status */
+ if (ret == 0)
+ bt_soc_enable_status = 1;
return ret;
}
@@ -282,6 +307,9 @@
*tx_num = 0;
*rx_num = num;
break;
+ default:
+ BTFMSLIM_ERR("Unsupported DAI %d", dai->id);
+ return -EINVAL;
}
do {
@@ -318,7 +346,6 @@
.shutdown = btfm_slim_dai_shutdown,
.hw_params = btfm_slim_dai_hw_params,
.prepare = btfm_slim_dai_prepare,
- .hw_free = btfm_slim_dai_hw_free,
.set_channel_map = btfm_slim_dai_set_channel_map,
.get_channel_map = btfm_slim_dai_get_channel_map,
};
@@ -387,7 +414,7 @@
static struct snd_soc_codec_driver btfmslim_codec = {
.probe = btfm_slim_codec_probe,
.remove = btfm_slim_codec_remove,
- .read = btfm_slim_codec_read,
+ .read = btfm_slim_codec_read,
.write = btfm_slim_codec_write,
};
@@ -406,5 +433,12 @@
return ret;
}
+void btfm_slim_unregister_codec(struct device *dev)
+{
+ BTFMSLIM_DBG("");
+ /* Unregister Codec driver */
+ snd_soc_unregister_codec(dev);
+}
+
MODULE_DESCRIPTION("BTFM Slimbus Codec driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index c2d5b7b..3d66fff 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -39,6 +39,7 @@
{
int ret = 0;
uint8_t reg_val;
+ uint16_t reg;
BTFMSLIM_DBG("");
@@ -46,20 +47,20 @@
return -EINVAL;
/* Get SB_SLAVE_HW_REV_MSB value*/
- ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_MSB, 1,
- ®_val, IFD);
+ reg = CHRK_SB_SLAVE_HW_REV_MSB;
+ ret = btfm_slim_read(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to read (%d)", ret);
+ BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
goto error;
}
BTFMSLIM_DBG("Major Rev: 0x%x, Minor Rev: 0x%x",
(reg_val & 0xF0) >> 4, (reg_val & 0x0F));
/* Get SB_SLAVE_HW_REV_LSB value*/
- ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_LSB, 1,
- ®_val, IFD);
+ reg = CHRK_SB_SLAVE_HW_REV_LSB;
+ ret = btfm_slim_read(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to read (%d)", ret);
+ BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
goto error;
}
BTFMSLIM_DBG("Step Rev: 0x%x", reg_val);
@@ -68,62 +69,87 @@
return ret;
}
+static inline int is_fm_port(uint8_t port_num)
+{
+ if (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
+ port_num == CHRK_SB_PGD_PORT_TX2_FM)
+ return 1;
+ else
+ return 0;
+}
int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t rxport, uint8_t enable)
{
int ret = 0;
- uint8_t reg_val = 0;
+ uint8_t reg_val = 0, en;
+ uint8_t port_bit = 0;
uint16_t reg;
- BTFMSLIM_DBG("enable(%d)", enable);
+ BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
- /* Port enable */
- reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
- } else { /* txport */
- /* Multiple Channel Setting - only FM Tx will be multiple
- * channel
- */
- if (enable && (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
- port_num == CHRK_SB_PGD_PORT_TX2_FM)) {
-
- reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
- (0x1 << CHRK_SB_PGD_PORT_TX2_FM);
- reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+ if (enable) {
+ /* For SCO Rx, A2DP Rx */
+ reg_val = 0x1;
+ port_bit = port_num - 0x10;
+ reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
+ BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)",
+ reg_val, reg);
ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+ ret, reg);
goto error;
}
}
+ /* Port enable */
+ reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
+ goto enable_disable_rxport;
+ }
+ if (!enable)
+ goto enable_disable_txport;
- /* Enable Tx port hw auto recovery for underrun or
- * overrun error
- */
- reg_val = (enable) ? (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
- CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY) : 0x0;
-
- ret = btfm_slim_write(btfmslim,
- CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num), 1,
- ®_val, IFD);
+ /* txport */
+ /* Multiple Channel Setting */
+ if (is_fm_port(port_num)) {
+ reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
+ (0x1 << CHRK_SB_PGD_PORT_TX2_FM);
+ reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+ ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret) {
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
goto error;
}
-
- /* Port enable */
- reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
}
+ /* Enable Tx port hw auto recovery for underrun or overrun error */
+ reg_val = (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
+ CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY);
+ reg = CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num);
+ ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
+ if (ret) {
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+ goto error;
+ }
+
+enable_disable_txport:
+ /* Port enable */
+ reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
+
+enable_disable_rxport:
if (enable)
- /* Set water mark to 1 and enable the port */
- reg_val = CHRK_SB_PGD_PORT_ENABLE | CHRK_SB_PGD_PORT_WM_LB;
+ en = CHRK_SB_PGD_PORT_ENABLE;
else
- reg_val = CHRK_SB_PGD_PORT_DISABLE;
+ en = CHRK_SB_PGD_PORT_DISABLE;
+
+ if (is_fm_port(port_num))
+ reg_val = en | CHRK_SB_PGD_PORT_WM_L8;
+ else
+ reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_LB : en;
ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD);
if (ret)
- BTFMSLIM_ERR("failed to write (%d)", ret);
+ BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
error:
return ret;
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.h b/drivers/bluetooth/btfm_slim_wcn3990.h
index 6bbdb6b..b2723ff 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.h
+++ b/drivers/bluetooth/btfm_slim_wcn3990.h
@@ -68,6 +68,7 @@
#define CHRK_SB_PGD_PORT_WM_L1 (0x1 << 1)
#define CHRK_SB_PGD_PORT_WM_L2 (0x2 << 1)
#define CHRK_SB_PGD_PORT_WM_L3 (0x3 << 1)
+#define CHRK_SB_PGD_PORT_WM_L8 (0x8 << 1)
#define CHRK_SB_PGD_PORT_WM_LB (0xB << 1)
#define CHRK_SB_PGD_PORT_RX_NUM 16
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3e1367a..49fb8e5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -604,5 +604,12 @@
applications DSP processor. Say M if you want to enable this
module.
+config MSM_RDBG
+ tristate "QTI Remote debug driver"
+ help
+ Implements a shared memory based transport mechanism that allows
+ for a debugger running on a host PC to communicate with a remote
+ stub running on peripheral subsystems such as the ADSP, MODEM etc.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b73165a..19c3c98 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -65,3 +65,4 @@
ifdef CONFIG_COMPAT
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
endif
+obj-$(CONFIG_MSM_RDBG) += rdbg.o
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 9102df7..2ede69e 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -58,7 +58,8 @@
#define BALIGN 128
#define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/
#define NUM_SESSIONS 9 /*8 compute, 1 cpz*/
-#define M_FDLIST 16
+#define M_FDLIST (16)
+#define M_CRCLIST (64)
#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
@@ -172,6 +173,7 @@
struct overlap *overs;
struct overlap **overps;
struct smq_msg msg;
+ uint32_t *crc;
};
struct fastrpc_ctx_lst {
@@ -209,6 +211,7 @@
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
struct completion work;
+ struct completion workport;
struct notifier_block nb;
struct kref kref;
int sesscount;
@@ -281,6 +284,7 @@
int cid;
int ssrcount;
int pd;
+ int file_close;
struct fastrpc_apps *apps;
struct fastrpc_perf perf;
struct dentry *debugfs_file;
@@ -477,7 +481,7 @@
if (!IS_ERR_OR_NULL(map->handle))
ion_free(fl->apps->client, map->handle);
- if (sess->smmu.enabled) {
+ if (sess && sess->smmu.enabled) {
if (map->size || map->phys)
msm_dma_unmap_sg(sess->dev,
map->table->sgl,
@@ -555,7 +559,9 @@
sess = fl->secsctx;
else
sess = fl->sctx;
-
+ VERIFY(err, !IS_ERR_OR_NULL(sess));
+ if (err)
+ goto bail;
VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd)));
if (err)
goto bail;
@@ -570,6 +576,13 @@
goto bail;
if (sess->smmu.enabled) {
attrs = DMA_ATTR_EXEC_MAPPING;
+
+ if (map->attr & FASTRPC_ATTR_NON_COHERENT ||
+ (sess->smmu.coherent && map->uncached))
+ attrs |= DMA_ATTR_FORCE_NON_COHERENT;
+ else if (map->attr & FASTRPC_ATTR_COHERENT)
+ attrs |= DMA_ATTR_FORCE_COHERENT;
+
VERIFY(err, map->table->nents ==
msm_dma_map_sg_attrs(sess->dev,
map->table->sgl, map->table->nents,
@@ -681,7 +694,7 @@
static int context_restore_interrupted(struct fastrpc_file *fl,
- struct fastrpc_ioctl_invoke_attrs *inv,
+ struct fastrpc_ioctl_invoke_crc *inv,
struct smq_invoke_ctx **po)
{
int err = 0;
@@ -788,7 +801,7 @@
static void context_free(struct smq_invoke_ctx *ctx);
static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
- struct fastrpc_ioctl_invoke_attrs *invokefd,
+ struct fastrpc_ioctl_invoke_crc *invokefd,
struct smq_invoke_ctx **po)
{
int err = 0, bufs, size = 0;
@@ -834,7 +847,7 @@
if (err)
goto bail;
}
-
+ ctx->crc = (uint32_t *)invokefd->crc;
ctx->sc = invoke->sc;
if (bufs) {
VERIFY(err, 0 == context_build_overlap(ctx));
@@ -993,6 +1006,7 @@
int err = 0;
int mflags = 0;
uint64_t *fdlist;
+ uint32_t *crclist;
/* calculate size of the metadata */
rpra = 0;
@@ -1018,7 +1032,9 @@
goto bail;
ipage += 1;
}
- metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST);
+ metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
+ (sizeof(uint32_t) * M_CRCLIST);
+
/* calculate len requreed for copying */
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
@@ -1049,6 +1065,9 @@
if (err)
goto bail;
}
+ if (ctx->buf->virt && metalen <= copylen)
+ memset(ctx->buf->virt, 0, metalen);
+
/* copy metadata */
rpra = ctx->buf->virt;
ctx->rpra = rpra;
@@ -1112,6 +1131,8 @@
fdlist = (uint64_t *)&pages[bufs + handles];
for (i = 0; i < M_FDLIST; i++)
fdlist[i] = 0;
+ crclist = (uint32_t *)&fdlist[M_FDLIST];
+ memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
/* copy non ion buffers */
PERF(ctx->fl->profile, ctx->fl->perf.copy,
@@ -1159,10 +1180,14 @@
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
- if (ctx->fl->sctx->smmu.coherent)
- continue;
if (map && map->uncached)
continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
if (rpra[i].buf.len && ctx->overps[oix]->mstart)
dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv),
uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len));
@@ -1191,6 +1216,8 @@
struct smq_phy_page *pages;
struct fastrpc_mmap *mmap;
uint64_t *fdlist;
+ uint32_t *crclist = NULL;
+
remote_arg64_t *rpra = ctx->rpra;
int i, inbufs, outbufs, handles;
int err = 0;
@@ -1201,6 +1228,8 @@
list = smq_invoke_buf_start(ctx->rpra, sc);
pages = smq_phy_page_start(sc, list);
fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
+ crclist = (uint32_t *)(fdlist + M_FDLIST);
+
for (i = inbufs; i < inbufs + outbufs; ++i) {
if (!ctx->maps[i]) {
K_COPY_TO_USER(err, kernel,
@@ -1223,6 +1252,10 @@
fastrpc_mmap_free(mmap);
}
}
+ if (ctx->crc && crclist && rpra)
+ K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc,
+ crclist, M_CRCLIST*sizeof(uint32_t));
+
bail:
return err;
}
@@ -1243,6 +1276,12 @@
continue;
if (!rpra[i].buf.len)
continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
buf_page_start(rpra[i].buf.pv))
continue;
@@ -1273,6 +1312,12 @@
continue;
if (!rpra[i].buf.len)
continue;
+ if (ctx->fl->sctx->smmu.coherent &&
+ !(map && (map->attr & FASTRPC_ATTR_NON_COHERENT)))
+ continue;
+ if (map && (map->attr & FASTRPC_ATTR_COHERENT))
+ continue;
+
if (buf_page_start(ptr_to_uint64((void *)rpra)) ==
buf_page_start(rpra[i].buf.pv)) {
continue;
@@ -1337,6 +1382,7 @@
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
+ init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
}
}
@@ -1345,7 +1391,7 @@
static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
uint32_t kernel,
- struct fastrpc_ioctl_invoke_attrs *inv)
+ struct fastrpc_ioctl_invoke_crc *inv)
{
struct smq_invoke_ctx *ctx = 0;
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
@@ -1356,6 +1402,14 @@
if (fl->profile)
getnstimeofday(&invoket);
+
+ VERIFY(err, fl->sctx != NULL);
+ if (err)
+ goto bail;
+ VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+
if (!kernel) {
VERIFY(err, 0 == context_restore_interrupted(fl, inv,
&ctx));
@@ -1436,7 +1490,7 @@
struct fastrpc_ioctl_init_attrs *uproc)
{
int err = 0;
- struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct fastrpc_ioctl_invoke_crc ioctl;
struct fastrpc_ioctl_init *init = &uproc->init;
struct smq_phy_page pages[1];
struct fastrpc_mmap *file = 0, *mem = 0;
@@ -1455,6 +1509,7 @@
ioctl.inv.pra = ra;
ioctl.fds = 0;
ioctl.attrs = 0;
+ ioctl.crc = NULL;
fl->pd = 0;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
@@ -1524,6 +1579,7 @@
ioctl.inv.pra = ra;
ioctl.fds = fds;
ioctl.attrs = 0;
+ ioctl.crc = NULL;
VERIFY(err, !(err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
@@ -1542,7 +1598,7 @@
static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
{
int err = 0;
- struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct fastrpc_ioctl_invoke_crc ioctl;
remote_arg_t ra[1];
int tgid = 0;
@@ -1560,6 +1616,7 @@
ioctl.inv.pra = ra;
ioctl.fds = 0;
ioctl.attrs = 0;
+ ioctl.crc = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
bail:
@@ -1569,7 +1626,7 @@
static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
struct fastrpc_mmap *map)
{
- struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct fastrpc_ioctl_invoke_crc ioctl;
struct smq_phy_page page;
int num = 1;
remote_arg_t ra[3];
@@ -1606,6 +1663,7 @@
ioctl.inv.pra = ra;
ioctl.fds = 0;
ioctl.attrs = 0;
+ ioctl.crc = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
map->raddr = (uintptr_t)routargs.vaddrout;
@@ -1616,7 +1674,7 @@
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
struct fastrpc_mmap *map)
{
- struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct fastrpc_ioctl_invoke_crc ioctl;
remote_arg_t ra[1];
int err = 0;
struct {
@@ -1639,6 +1697,7 @@
ioctl.inv.pra = ra;
ioctl.fds = 0;
ioctl.attrs = 0;
+ ioctl.crc = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
return err;
@@ -1703,6 +1762,8 @@
cid = ctx - &gcinfo[0];
fastrpc_glink_close(ctx->chan, cid);
ctx->chan = 0;
+ glink_unregister_link_state_cb(ctx->link.link_notify_handle);
+ ctx->link.link_notify_handle = NULL;
mutex_unlock(&me->smd_mutex);
pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
@@ -1780,7 +1841,7 @@
switch (event) {
case GLINK_CONNECTED:
link->port_state = FASTRPC_LINK_CONNECTED;
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case GLINK_LOCAL_DISCONNECTED:
link->port_state = FASTRPC_LINK_DISCONNECTED;
@@ -1838,6 +1899,9 @@
return 0;
}
(void)fastrpc_release_current_dsp_process(fl);
+ spin_lock(&fl->hlock);
+ fl->file_close = 1;
+ spin_unlock(&fl->hlock);
fastrpc_context_list_dtor(fl);
fastrpc_buf_list_free(fl);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
@@ -1930,8 +1994,7 @@
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2113,7 +2176,8 @@
if (err)
goto bail;
- VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+ VERIFY(err,
+ wait_for_completion_timeout(&me->channel[cid].workport,
RPC_TIMEOUT));
if (err) {
me->channel[cid].chan = 0;
@@ -2122,6 +2186,11 @@
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
+ err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
+ err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+ if (err)
+ pr_warn("adsprpc: initial intent fail for %d err %d\n",
+ cid, err);
if (me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
me->channel[cid].prevssrcount =
@@ -2185,6 +2254,9 @@
if (err)
goto bail;
}
+ VERIFY(err, fl->sctx != NULL);
+ if (err)
+ goto bail;
*info = (fl->sctx->smmu.enabled ? 1 : 0);
bail:
return err;
@@ -2194,7 +2266,7 @@
unsigned long ioctl_param)
{
union {
- struct fastrpc_ioctl_invoke_attrs inv;
+ struct fastrpc_ioctl_invoke_crc inv;
struct fastrpc_ioctl_mmap mmap;
struct fastrpc_ioctl_munmap munmap;
struct fastrpc_ioctl_init_attrs init;
@@ -2207,10 +2279,20 @@
p.inv.fds = 0;
p.inv.attrs = 0;
+ p.inv.crc = NULL;
+ spin_lock(&fl->hlock);
+ if (fl->file_close == 1) {
+ err = EBADF;
+ pr_warn("ADSPRPC: fastrpc_device_release is happening, So not sending any new requests to DSP");
+ spin_unlock(&fl->hlock);
+ goto bail;
+ }
+ spin_unlock(&fl->hlock);
switch (ioctl_num) {
case FASTRPC_IOCTL_INVOKE:
size = sizeof(struct fastrpc_ioctl_invoke);
+ /* fall through */
case FASTRPC_IOCTL_INVOKE_FD:
if (!size)
size = sizeof(struct fastrpc_ioctl_invoke_fd);
@@ -2218,6 +2300,10 @@
case FASTRPC_IOCTL_INVOKE_ATTRS:
if (!size)
size = sizeof(struct fastrpc_ioctl_invoke_attrs);
+ /* fall through */
+ case FASTRPC_IOCTL_INVOKE_CRC:
+ if (!size)
+ size = sizeof(struct fastrpc_ioctl_invoke_crc);
VERIFY(err, 0 == copy_from_user(&p.inv, param, size));
if (err)
goto bail;
@@ -2403,7 +2489,7 @@
start = 0x60000000;
VERIFY(err, !IS_ERR_OR_NULL(sess->smmu.mapping =
arm_iommu_create_mapping(&platform_bus_type,
- start, 0x7fffffff)));
+ start, 0x78000000)));
if (err)
goto bail;
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 8e72b4d..078b4d9 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -36,6 +36,8 @@
_IOWR('R', 9, struct compat_fastrpc_ioctl_perf)
#define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \
_IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs)
+#define COMPAT_FASTRPC_IOCTL_INVOKE_CRC \
+ _IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc)
struct compat_remote_buf {
compat_uptr_t pv; /* buffer pointer */
@@ -64,6 +66,13 @@
compat_uptr_t attrs; /* attribute list */
};
+struct compat_fastrpc_ioctl_invoke_crc {
+ struct compat_fastrpc_ioctl_invoke inv;
+ compat_uptr_t fds; /* fd list */
+ compat_uptr_t attrs; /* attribute list */
+ compat_uptr_t crc; /* crc list */
+};
+
struct compat_fastrpc_ioctl_mmap {
compat_int_t fd; /* ion fd */
compat_uint_t flags; /* flags for dsp to map with */
@@ -100,14 +109,14 @@
};
static int compat_get_fastrpc_ioctl_invoke(
- struct compat_fastrpc_ioctl_invoke_attrs __user *inv32,
- struct fastrpc_ioctl_invoke_attrs __user **inva,
+ struct compat_fastrpc_ioctl_invoke_crc __user *inv32,
+ struct fastrpc_ioctl_invoke_crc __user **inva,
unsigned int cmd)
{
compat_uint_t u, sc;
compat_ssize_t s;
compat_uptr_t p;
- struct fastrpc_ioctl_invoke_attrs *inv;
+ struct fastrpc_ioctl_invoke_crc *inv;
union compat_remote_arg *pra32;
union remote_arg *pra;
int err, len, j;
@@ -146,10 +155,16 @@
err |= put_user(p, (compat_uptr_t *)&inv->fds);
}
err |= put_user(NULL, &inv->attrs);
- if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) {
+ if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) ||
+ (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC)) {
err |= get_user(p, &inv32->attrs);
err |= put_user(p, (compat_uptr_t *)&inv->attrs);
}
+ err |= put_user(NULL, (compat_uptr_t __user **)&inv->crc);
+ if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) {
+ err |= get_user(p, &inv32->crc);
+ err |= put_user(p, (compat_uptr_t __user *)&inv->crc);
+ }
*inva = inv;
return err;
@@ -273,9 +288,10 @@
case COMPAT_FASTRPC_IOCTL_INVOKE:
case COMPAT_FASTRPC_IOCTL_INVOKE_FD:
case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS:
+ case COMPAT_FASTRPC_IOCTL_INVOKE_CRC:
{
- struct compat_fastrpc_ioctl_invoke_attrs __user *inv32;
- struct fastrpc_ioctl_invoke_attrs __user *inv;
+ struct compat_fastrpc_ioctl_invoke_crc __user *inv32;
+ struct fastrpc_ioctl_invoke_crc __user *inv;
inv32 = compat_ptr(arg);
VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32,
@@ -283,7 +299,7 @@
if (err)
return err;
return filp->f_op->unlocked_ioctl(filp,
- FASTRPC_IOCTL_INVOKE_ATTRS, (unsigned long)inv);
+ FASTRPC_IOCTL_INVOKE_CRC, (unsigned long)inv);
}
case COMPAT_FASTRPC_IOCTL_MMAP:
{
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 7175b9e..0441451 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -27,6 +27,7 @@
#define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t)
#define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf)
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
+#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc)
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
@@ -35,6 +36,12 @@
/* Set for buffers that have no virtual mapping in userspace */
#define FASTRPC_ATTR_NOVA 0x1
+/* Set for buffers that are NOT dma coherent */
+#define FASTRPC_ATTR_NON_COHERENT 0x2
+
+/* Set for buffers that are dma coherent */
+#define FASTRPC_ATTR_COHERENT 0x4
+
/* Driver should operate in parallel with the co-processor */
#define FASTRPC_MODE_PARALLEL 0
@@ -93,7 +100,7 @@
#define VERIFY(err, val) \
do {\
VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\
- if (0 == (val)) {\
+ if ((val) == 0) {\
(err) = (err) == 0 ? -1 : (err);\
VERIFY_EPRINTF(__FILE_LINE__"error: %d: " #val "\n", (err));\
} else {\
@@ -156,6 +163,13 @@
unsigned int *attrs; /* attribute list */
};
+struct fastrpc_ioctl_invoke_crc {
+ struct fastrpc_ioctl_invoke inv;
+ int *fds; /* fd list */
+ unsigned int *attrs; /* attribute list */
+ unsigned int *crc;
+};
+
struct fastrpc_ioctl_init {
uint32_t flags; /* one of FASTRPC_INIT_* macros */
uintptr_t __user file; /* pointer to elf file */
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 8f0597f..e2d39e7 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2208,11 +2208,28 @@
{
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl, track);
- if (entry->client->tgid == tgid)
- return entry;
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid pid doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid == tgid)
+ return entry;
}
return NULL;
}
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 89fba64..177bbdb 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -69,6 +69,7 @@
"Uses Device Tree: %d\n"
"Apps Supports Separate CMDRSP: %d\n"
"Apps Supports HDLC Encoding: %d\n"
+ "Apps Supports Header Untagging: %d\n"
"Apps Supports Sockets: %d\n"
"Logging Mode: %d\n"
"RSP Buffer is Busy: %d\n"
@@ -83,6 +84,7 @@
driver->use_device_tree,
driver->supports_separate_cmdrsp,
driver->supports_apps_hdlc_encoding,
+ driver->supports_apps_header_untagging,
driver->supports_sockets,
driver->logging_mode,
driver->rsp_buf_busy,
@@ -94,7 +96,7 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
ret += scnprintf(buf+ret, buf_size-ret,
- "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c|\n",
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c|\n",
PERIPHERAL_STRING(i),
driver->feature[i].feature_mask[0],
driver->feature[i].feature_mask[1],
@@ -105,7 +107,8 @@
driver->feature[i].mask_centralization ? 'M':'m',
driver->feature[i].stm_support ? 'Q':'q',
driver->feature[i].sockets_enabled ? 'S':'s',
- driver->feature[i].sent_feature_mask ? 'T':'t');
+ driver->feature[i].sent_feature_mask ? 'T':'t',
+ driver->feature[i].untag_header ? 'U':'u');
}
#ifdef CONFIG_DIAG_OVER_USB
@@ -264,8 +267,10 @@
struct list_head *temp;
struct diag_cmd_reg_t *item = NULL;
+ mutex_lock(&driver->cmd_reg_mutex);
if (diag_dbgfs_table_index == driver->cmd_reg_count) {
diag_dbgfs_table_index = 0;
+ mutex_unlock(&driver->cmd_reg_mutex);
return 0;
}
@@ -274,6 +279,7 @@
buf = kcalloc(buf_size, sizeof(char), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(buf)) {
pr_err("diag: %s, Error allocating memory\n", __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
return -ENOMEM;
}
buf_size = ksize(buf);
@@ -318,6 +324,7 @@
break;
}
diag_dbgfs_table_index = i;
+ mutex_unlock(&driver->cmd_reg_mutex);
*ppos = 0;
ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index b5a594a..d734e29 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -28,7 +28,8 @@
#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
#define diag_check_update(x) \
- (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
+ (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
+ || (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
struct diag_mask_info msg_mask;
struct diag_mask_info msg_bt_mask;
@@ -90,8 +91,8 @@
int err = 0;
int send_once = 0;
int header_len = sizeof(struct diag_ctrl_log_mask);
- uint8_t *buf = NULL;
- uint8_t *temp = NULL;
+ uint8_t *buf = NULL, *temp = NULL;
+ uint8_t upd = 0;
uint32_t mask_size = 0;
struct diag_ctrl_log_mask ctrl_pkt;
struct diag_mask_info *mask_info = NULL;
@@ -107,16 +108,32 @@
return;
}
- if (driver->md_session_mask != 0 &&
- driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))
- mask_info = driver->md_session_map[peripheral]->log_mask;
- else
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->log_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->log_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
mask_info = &log_mask;
+ }
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr)
+ return;
buf = mask_info->update_buf;
switch (mask_info->status) {
@@ -194,8 +211,8 @@
static void diag_send_event_mask_update(uint8_t peripheral)
{
- uint8_t *buf = NULL;
- uint8_t *temp = NULL;
+ uint8_t *buf = NULL, *temp = NULL;
+ uint8_t upd = 0;
struct diag_ctrl_event_mask header;
struct diag_mask_info *mask_info = NULL;
int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
@@ -219,13 +236,27 @@
return;
}
- if (driver->md_session_mask != 0 &&
- (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
- mask_info = driver->md_session_map[peripheral]->event_mask;
- else
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->event_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->event_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
mask_info = &event_mask;
+ }
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
buf = mask_info->update_buf;
@@ -283,12 +314,13 @@
int err = 0;
int header_len = sizeof(struct diag_ctrl_msg_mask);
int temp_len = 0;
- uint8_t *buf = NULL;
- uint8_t *temp = NULL;
+ uint8_t *buf = NULL, *temp = NULL;
+ uint8_t upd = 0;
uint32_t mask_size = 0;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
+ uint8_t msg_mask_tbl_count_local;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -300,17 +332,37 @@
return;
}
- if (driver->md_session_mask != 0 &&
- (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
- mask_info = driver->md_session_map[peripheral]->msg_mask;
- else
+ if (driver->md_session_mask != 0) {
+ if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
+ if (driver->md_session_map[peripheral])
+ mask_info =
+ driver->md_session_map[peripheral]->msg_mask;
+ } else if (driver->md_session_mask &
+ MD_PERIPHERAL_PD_MASK(peripheral)) {
+ upd = diag_mask_to_pd_value(driver->md_session_mask);
+ if (upd && driver->md_session_map[upd])
+ mask_info =
+ driver->md_session_map[upd]->msg_mask;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with unknown session mask\n");
+ return;
+ }
+ } else {
mask_info = &msg_mask;
+ }
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr || !mask_info->update_buf)
return;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ mutex_unlock(&driver->msg_mask_lock);
+ return;
+ }
buf = mask_info->update_buf;
+ msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+ mutex_unlock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
switch (mask_info->status) {
case DIAG_CTRL_MASK_ALL_DISABLED:
@@ -327,9 +379,11 @@
goto err;
}
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
- if (((first < mask->ssid_first) ||
- (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+ for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+ mutex_lock(&driver->msg_mask_lock);
+ if (((mask->ssid_first > first) ||
+ (mask->ssid_last_tools < last)) && first != ALL_SSID) {
+ mutex_unlock(&driver->msg_mask_lock);
continue;
}
@@ -370,12 +424,13 @@
if (mask_size > 0)
memcpy(buf + header_len, mask->ptr, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
err = diagfwd_write(peripheral, TYPE_CNTL, buf,
header_len + mask_size);
if (err && err != -ENODEV)
- pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
- peripheral);
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+ peripheral, err);
if (first != ALL_SSID)
break;
@@ -454,6 +509,13 @@
DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
if (driver->supports_apps_hdlc_encoding)
DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+ if (driver->supports_apps_header_untagging) {
+ if (peripheral == PERIPHERAL_MODEM) {
+ DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
+ driver->peripheral_untag[peripheral] =
+ ENABLE_PKT_HEADER_UNTAGGING;
+ }
+ }
DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
if (driver->supports_sockets)
DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
@@ -495,7 +557,7 @@
if (!diag_apps_responds())
return 0;
-
+ mutex_lock(&driver->msg_mask_lock);
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
rsp.status = MSG_STATUS_SUCCESS;
@@ -503,7 +565,6 @@
rsp.count = driver->msg_mask_tbl_count;
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
if (write_len + sizeof(ssid_range) > dest_len) {
@@ -516,7 +577,7 @@
memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
write_len += sizeof(ssid_range);
}
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -540,7 +601,7 @@
if (!diag_apps_responds())
return 0;
-
+ mutex_lock(&driver->msg_mask_lock);
req = (struct diag_build_mask_req_t *)src_buf;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
@@ -548,9 +609,8 @@
rsp.ssid_last = req->ssid_last;
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
-
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
if (build_mask->ssid_first != req->ssid_first)
continue;
num_entries = req->ssid_last - req->ssid_first + 1;
@@ -571,7 +631,7 @@
}
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -599,6 +659,7 @@
if (!diag_apps_responds())
return 0;
+ mutex_lock(&driver->msg_mask_lock);
req = (struct diag_build_mask_req_t *)src_buf;
rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
@@ -606,7 +667,6 @@
rsp.ssid_last = req->ssid_last;
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
-
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
@@ -624,7 +684,7 @@
}
memcpy(dest_buf, &rsp, sizeof(rsp));
write_len += sizeof(rsp);
-
+ mutex_unlock(&driver->msg_mask_lock);
return write_len;
}
@@ -655,8 +715,8 @@
}
req = (struct diag_msg_build_mask_t *)src_buf;
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
@@ -696,6 +756,8 @@
pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
__func__, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return -ENOMEM;
}
mask->ptr = temp;
@@ -714,8 +776,8 @@
mask_info->status = DIAG_CTRL_MASK_VALID;
break;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -740,7 +802,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -769,8 +833,10 @@
req = (struct diag_msg_config_rsp_t *)src_buf;
- mask = (struct diag_msg_mask_t *)mask_info->ptr;
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -779,6 +845,7 @@
mask->range * sizeof(uint32_t));
mutex_unlock(&mask->lock);
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
@@ -799,7 +866,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -893,7 +962,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -940,7 +1011,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
write_len += sizeof(header);
@@ -1194,7 +1267,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -1245,7 +1320,9 @@
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -1284,6 +1361,7 @@
struct diag_ssid_range_t range;
mutex_lock(&msg_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
@@ -1292,6 +1370,7 @@
if (err)
break;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&msg_mask.lock);
return err;
}
@@ -1306,8 +1385,10 @@
struct diag_ssid_range_t range;
mutex_lock(&msg_bt_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
+ driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
range.ssid_last = msg_mask_tbl[i].ssid_last;
err = diag_create_msg_mask_table_entry(build_mask, &range);
@@ -1417,8 +1498,8 @@
}
memcpy(build_mask->ptr, tbl, tbl_size);
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&msg_bt_mask.lock);
-
return err;
}
@@ -1565,10 +1646,11 @@
pr_err("diag: Unable to create msg masks, err: %d\n", err);
return err;
}
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask = &msg_mask;
-
for (i = 0; i < NUM_PERIPHERALS; i++)
driver->max_ssid_count[i] = 0;
+ mutex_unlock(&driver->msg_mask_lock);
return 0;
}
@@ -1587,8 +1669,8 @@
err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
-
mutex_lock(&dest->lock);
+ mutex_lock(&driver->msg_mask_lock);
src_mask = (struct diag_msg_mask_t *)src->ptr;
dest_mask = (struct diag_msg_mask_t *)dest->ptr;
@@ -1605,8 +1687,8 @@
src_mask++;
dest_mask++;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&dest->lock);
-
return err;
}
@@ -1617,15 +1699,15 @@
if (!mask_info)
return;
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
__diag_mask_exit(mask_info);
}
@@ -1633,15 +1715,17 @@
{
int i;
struct diag_msg_mask_t *mask = NULL;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
if (mask) {
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
kfree(mask->ptr);
kfree(msg_mask.ptr);
+ msg_mask.ptr = NULL;
}
-
kfree(msg_mask.update_buf);
+ msg_mask.update_buf = NULL;
+ mutex_unlock(&driver->msg_mask_lock);
}
static int diag_build_time_mask_init(void)
@@ -1666,13 +1750,15 @@
{
int i;
struct diag_msg_mask_t *mask = NULL;
-
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
if (mask) {
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, mask++)
kfree(mask->ptr);
- kfree(msg_mask.ptr);
+ kfree(msg_bt_mask.ptr);
+ msg_bt_mask.ptr = NULL;
}
+ mutex_unlock(&driver->msg_mask_lock);
}
static int diag_log_mask_init(void)
@@ -1790,8 +1876,9 @@
return -EIO;
}
mutex_unlock(&driver->diag_maskclear_mutex);
-
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
@@ -1828,8 +1915,8 @@
}
total_len += len;
}
+ mutex_unlock(&driver->msg_mask_lock);
mutex_unlock(&mask_info->lock);
-
return err ? err : total_len;
}
@@ -1896,15 +1983,27 @@
void diag_send_updates_peripheral(uint8_t peripheral)
{
diag_send_feature_mask_update(peripheral);
- if (driver->time_sync_enabled)
- diag_send_time_sync_update(peripheral);
- diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
- diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
- diag_send_event_mask_update(peripheral);
- diag_send_real_time_update(peripheral,
+ /*
+ * Masks (F3, logs and events) will be sent to
+ * peripheral immediately following feature mask update only
+ * if diag_id support is not present or
+ * diag_id support is present and diag_id has been sent to
+ * peripheral.
+ */
+ if (!driver->feature[peripheral].diag_id_support ||
+ driver->diag_id_sent[peripheral]) {
+ if (driver->time_sync_enabled)
+ diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
+ diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+ diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+ diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
+ diag_send_real_time_update(peripheral,
driver->real_time_mode[DIAG_LOCAL_PROC]);
- diag_send_peripheral_buffering_mode(
- &driver->buffering_mode[peripheral]);
+ diag_send_peripheral_buffering_mode(
+ &driver->buffering_mode[peripheral]);
+ }
}
int diag_process_apps_masks(unsigned char *buf, int len,
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 13ad402..7e3fe90 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include "diagmem.h"
#include "diagfwd.h"
#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
{
@@ -132,7 +133,7 @@
uint8_t found = 0;
unsigned long flags;
struct diag_md_info *ch = NULL;
- uint8_t peripheral;
+ int peripheral;
struct diag_md_session_t *session_info = NULL;
if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
@@ -141,11 +142,12 @@
if (!buf || len < 0)
return -EINVAL;
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
+ peripheral = diag_md_get_peripheral(ctx);
+ if (peripheral < 0)
return -EINVAL;
- session_info = diag_md_session_get_peripheral(peripheral);
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
if (!session_info)
return -EIO;
@@ -214,7 +216,7 @@
struct diag_md_info *ch = NULL;
struct diag_buf_tbl_t *entry = NULL;
uint8_t drain_again = 0;
- uint8_t peripheral = 0;
+ int peripheral = 0;
struct diag_md_session_t *session_info = NULL;
for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
@@ -223,12 +225,15 @@
entry = &ch->tbl[j];
if (entry->len <= 0)
continue;
- peripheral = GET_BUF_PERIPHERAL(entry->ctx);
- /* Account for Apps data as well */
- if (peripheral > NUM_PERIPHERALS)
+
+ peripheral = diag_md_get_peripheral(entry->ctx);
+ if (peripheral < 0)
goto drop_data;
session_info =
diag_md_session_get_peripheral(peripheral);
+ if (!session_info)
+ goto drop_data;
+
if (session_info && info &&
(session_info->pid != info->pid))
continue;
@@ -320,8 +325,15 @@
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
entry = &ch->tbl[i];
- if (GET_BUF_PERIPHERAL(entry->ctx) != peripheral)
- continue;
+
+ if (peripheral > NUM_PERIPHERALS) {
+ if (GET_PD_CTXT(entry->ctx) != peripheral)
+ continue;
+ } else {
+ if (GET_BUF_PERIPHERAL(entry->ctx) !=
+ peripheral)
+ continue;
+ }
found = 1;
if (ch->ops && ch->ops->write_done) {
ch->ops->write_done(entry->buf, entry->len,
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 8f5a002..e65b493 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,7 +27,8 @@
#include "diag_mux.h"
#include "diag_usb.h"
#include "diag_memorydevice.h"
-
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
struct diag_mux_state_t *diag_mux;
static struct diag_logger_t usb_logger;
@@ -141,9 +142,13 @@
if (!diag_mux)
return -EIO;
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
+ peripheral = diag_md_get_peripheral(ctx);
+ if (peripheral < 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s:%d invalid peripheral = %d\n",
+ __func__, __LINE__, peripheral);
return -EINVAL;
+ }
if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
logger = diag_mux->md_ptr;
@@ -162,8 +167,13 @@
if (proc < 0 || proc >= NUM_MUX_PROC)
return -EINVAL;
/* Peripheral should account for Apps data as well */
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
+ if (peripheral > NUM_PERIPHERALS) {
+ if (!driver->num_pd_session)
+ return -EINVAL;
+ if (peripheral > NUM_MD_SESSIONS)
+ return -EINVAL;
+ }
+
if (!diag_mux)
return -EIO;
@@ -184,7 +194,8 @@
if (!req_mode)
return -EINVAL;
- if (*peripheral_mask <= 0 || *peripheral_mask > DIAG_CON_ALL) {
+ if (*peripheral_mask <= 0 ||
+ (*peripheral_mask > (DIAG_CON_ALL | DIAG_CON_UPD_ALL))) {
pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
return -EINVAL;
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index d3dde50..ac3c1fd 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -58,19 +58,23 @@
#define DIAG_CTRL_MSG_F3_MASK 11
#define CONTROL_CHAR 0x7E
+#define DIAG_ID_ROOT_STRING "root"
+
#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
-#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
-#define DIAG_CON_CDSP (0x0040)
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+#define DIAG_CON_CDSP (0x0040) /* Bit mask for CDSP */
+#define DIAG_CON_UPD_WLAN (0x1000) /*Bit mask for WLAN PD*/
#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
| DIAG_CON_SENSORS | DIAG_CON_WDSP \
| DIAG_CON_CDSP)
+#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
@@ -165,7 +169,7 @@
#define PKT_ALLOC 1
#define PKT_RESET 2
-#define FEATURE_MASK_LEN 2
+#define FEATURE_MASK_LEN 4
#define DIAG_MD_NONE 0
#define DIAG_MD_PERIPHERAL 1
@@ -209,11 +213,18 @@
#define NUM_PERIPHERALS 6
#define APPS_DATA (NUM_PERIPHERALS)
+#define UPD_WLAN 7
+#define NUM_UPD 1
+#define MAX_PERIPHERAL_UPD 1
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
-#define NUM_MD_SESSIONS (NUM_PERIPHERALS + 1)
+#define NUM_MD_SESSIONS (NUM_PERIPHERALS \
+ + NUM_UPD + 1)
#define MD_PERIPHERAL_MASK(x) (1 << x)
+#define MD_PERIPHERAL_PD_MASK(x) \
+ ((x == PERIPHERAL_MODEM) ? (1 << UPD_WLAN) : 0)\
+
/*
* Number of stm processors includes all the peripherals and
* apps.Added 1 below to indicate apps
@@ -439,6 +450,7 @@
struct diag_logging_mode_param_t {
uint32_t req_mode;
uint32_t peripheral_mask;
+ uint32_t pd_mask;
uint8_t mode_param;
} __packed;
@@ -485,11 +497,13 @@
uint8_t log_on_demand;
uint8_t separate_cmd_rsp;
uint8_t encode_hdlc;
+ uint8_t untag_header;
uint8_t peripheral_buffering;
uint8_t mask_centralization;
uint8_t stm_support;
uint8_t sockets_enabled;
uint8_t sent_feature_mask;
+ uint8_t diag_id_support;
};
struct diagchar_dev {
@@ -516,6 +530,8 @@
int use_device_tree;
int supports_separate_cmdrsp;
int supports_apps_hdlc_encoding;
+ int supports_apps_header_untagging;
+ int peripheral_untag[NUM_PERIPHERALS];
int supports_sockets;
/* The state requested in the STM command */
int stm_state_requested[NUM_STM_PROCESSORS];
@@ -580,6 +596,7 @@
unsigned char *buf_feature_mask_update;
uint8_t hdlc_disabled;
struct mutex hdlc_disable_mutex;
+ struct mutex hdlc_recovery_mutex;
struct timer_list hdlc_reset_timer;
struct mutex diag_hdlc_mutex;
unsigned char *hdlc_buf;
@@ -611,6 +628,10 @@
int in_busy_dcipktdata;
int logging_mode;
int logging_mask;
+ int pd_logging_mode[NUM_UPD];
+ int pd_session_clear[NUM_UPD];
+ int num_pd_session;
+ int diag_id_sent[NUM_PERIPHERALS];
int mask_check;
uint32_t md_session_mask;
uint8_t md_session_mode;
@@ -625,8 +646,10 @@
struct diag_mask_info *event_mask;
struct diag_mask_info *build_time_mask;
uint8_t msg_mask_tbl_count;
+ uint8_t bt_msg_mask_tbl_count;
uint16_t event_mask_size;
uint16_t last_event_id;
+ struct mutex msg_mask_lock;
/* Variables for Mask Centralization */
uint16_t num_event_id[NUM_PERIPHERALS];
uint32_t num_equip_id[NUM_PERIPHERALS];
@@ -669,6 +692,7 @@
int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
int diag_mask_param(void);
void diag_clear_masks(struct diag_md_session_t *info);
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
void diag_record_stats(int type, int flag);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 128d6ce..f0e69ef 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -393,9 +393,28 @@
ret |= DIAG_CON_WDSP;
if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
ret |= DIAG_CON_CDSP;
-
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
+ ret |= DIAG_CON_UPD_WLAN;
return ret;
}
+
+uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
+{
+ uint8_t upd = 0;
+ uint32_t pd_mask = 0;
+
+ pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
+ switch (pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ upd = UPD_WLAN;
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "asking for mask update with no pd mask set\n");
+ }
+ return upd;
+}
+
int diag_mask_param(void)
{
return diag_mask_clear_param;
@@ -423,8 +442,9 @@
static void diag_close_logging_process(const int pid)
{
- int i;
- int session_peripheral_mask;
+ int i, j;
+ int session_mask;
+ uint32_t p_mask;
struct diag_md_session_t *session_info = NULL;
struct diag_logging_mode_param_t params;
@@ -440,18 +460,33 @@
mutex_unlock(&driver->diag_maskclear_mutex);
mutex_lock(&driver->diagchar_mutex);
- session_peripheral_mask = session_info->peripheral_mask;
+ session_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
- mutex_unlock(&driver->diagchar_mutex);
+
+ p_mask =
+ diag_translate_kernel_to_user_mask(session_mask);
+
for (i = 0; i < NUM_MD_SESSIONS; i++)
- if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
+ if (MD_PERIPHERAL_MASK(i) & session_mask)
diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
params.req_mode = USB_MODE;
params.mode_param = 0;
- params.peripheral_mask =
- diag_translate_kernel_to_user_mask(session_peripheral_mask);
- mutex_lock(&driver->diagchar_mutex);
+ params.pd_mask = 0;
+ params.peripheral_mask = p_mask;
+
+ if (driver->num_pd_session > 0) {
+ for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
+ if (session_mask & MD_PERIPHERAL_MASK(i)) {
+ j = i - UPD_WLAN;
+ driver->pd_session_clear[j] = 1;
+ driver->pd_logging_mode[j] = 0;
+ driver->num_pd_session -= 1;
+ params.pd_mask = p_mask;
+ }
+ }
+ }
+
diag_switch_logging(¶ms);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -654,6 +689,11 @@
driver->polling_reg_flag = 0;
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ return;
+ }
polling = diag_cmd_chk_polling(&item->entry);
if (polling == DIAG_CMD_POLLING) {
driver->polling_reg_flag = 1;
@@ -793,6 +833,12 @@
mutex_lock(&driver->cmd_reg_mutex);
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
if (item->pid == pid) {
list_del(&item->link);
kfree(item);
@@ -811,6 +857,12 @@
mutex_lock(&driver->cmd_reg_mutex);
list_for_each_safe(start, temp, &driver->cmd_reg_list) {
item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (&item->entry == NULL) {
+ pr_err("diag: In %s, unable to search command\n",
+ __func__);
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
if (item->proc == proc) {
list_del(&item->link);
kfree(item);
@@ -975,14 +1027,34 @@
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled) {
+ if (len < 4) {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
payload = *(uint16_t *)(buf + 2);
+ if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, payload size is %d\n",
+ payload);
+ return -EBADMSG;
+ }
driver->hdlc_encode_buf_len = payload;
/*
- * Adding 4 bytes for start (1 byte), version (1 byte) and
- * payload (2 bytes)
+ * Adding 5 bytes for start (1 byte), version (1 byte),
+ * payload (2 bytes) and end (1 byte)
*/
- memcpy(driver->hdlc_encode_buf, buf + 4, payload);
- goto send_data;
+ if (len == (payload + 5)) {
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte)
+ * and payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ } else {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
}
if (hdlc_flag) {
@@ -1543,17 +1615,20 @@
ret |= (1 << PERIPHERAL_WDSP);
if (peripheral_mask & DIAG_CON_CDSP)
ret |= (1 << PERIPHERAL_CDSP);
+ if (peripheral_mask & DIAG_CON_UPD_WLAN)
+ ret |= (1 << UPD_WLAN);
return ret;
}
static int diag_switch_logging(struct diag_logging_mode_param_t *param)
{
- int new_mode;
+ int new_mode, i = 0;
int curr_mode;
int err = 0;
uint8_t do_switch = 1;
uint32_t peripheral_mask = 0;
+ uint8_t peripheral, upd;
if (!param)
return -EINVAL;
@@ -1564,8 +1639,41 @@
return -EINVAL;
}
- peripheral_mask = diag_translate_mask(param->peripheral_mask);
- param->peripheral_mask = peripheral_mask;
+ if (param->pd_mask) {
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ upd = UPD_WLAN;
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "asking for mode switch with no pd mask set\n");
+ return -EINVAL;
+ }
+
+ if (driver->md_session_map[peripheral] &&
+ (MD_PERIPHERAL_MASK(peripheral) &
+ diag_mux->mux_mask)) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag_fr: User PD is already logging onto active peripheral logging\n");
+ i = upd - UPD_WLAN;
+ driver->pd_session_clear[i] = 0;
+ return -EINVAL;
+ }
+ peripheral_mask =
+ diag_translate_mask(param->pd_mask);
+ param->peripheral_mask = peripheral_mask;
+ i = upd - UPD_WLAN;
+ if (!driver->pd_session_clear[i]) {
+ driver->pd_logging_mode[i] = 1;
+ driver->num_pd_session += 1;
+ }
+ driver->pd_session_clear[i] = 0;
+ } else {
+ peripheral_mask =
+ diag_translate_mask(param->peripheral_mask);
+ param->peripheral_mask = peripheral_mask;
+ }
switch (param->req_mode) {
case CALLBACK_MODE:
@@ -1585,7 +1693,7 @@
curr_mode = driver->logging_mode;
DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "request to switch logging from %d mask:%0x to %d mask:%0x\n",
+ "request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
@@ -1713,14 +1821,18 @@
{
int i;
+ mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == current->tgid)
break;
- if (i == driver->num_clients)
+ if (i == driver->num_clients) {
+ mutex_unlock(&driver->diagchar_mutex);
return -EINVAL;
+ }
driver->data_ready[i] |= DEINIT_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
return 1;
@@ -1905,6 +2017,52 @@
return 0;
}
+static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
+{
+ int ret = -EINVAL;
+ int peripheral;
+ char *p_str = NULL;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->pd_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "query with no pd mask set, returning error\n");
+ return -EINVAL;
+ }
+
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ p_str = "MODEM";
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Invalid pd mask, returning EINVAL\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Untagging support on APPS is %s\n", __func__,
+ ((driver->supports_apps_header_untagging) ?
+ "present" : "absent"));
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Tagging support on %s is %s\n",
+ __func__, p_str,
+ (driver->feature[peripheral].untag_header ?
+ "present" : "absent"));
+
+ if (driver->supports_apps_header_untagging &&
+ driver->feature[peripheral].untag_header)
+ ret = 0;
+
+ mutex_unlock(&driver->diag_cntl_mutex);
+ return ret;
+}
+
static int diag_ioctl_register_callback(unsigned long ioarg)
{
int err = 0;
@@ -2142,6 +2300,12 @@
case DIAG_IOCTL_HDLC_TOGGLE:
result = diag_ioctl_hdlc_toggle(ioarg);
break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
+ break;
}
return result;
}
@@ -2265,6 +2429,12 @@
case DIAG_IOCTL_HDLC_TOGGLE:
result = diag_ioctl_hdlc_toggle(ioarg);
break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
+ break;
}
return result;
}
@@ -3318,7 +3488,7 @@
* to be logged to IPC
*/
diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
- DIAG_DEBUG_BRIDGE;
+ DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
}
#else
static void diag_debug_init(void)
@@ -3448,6 +3618,11 @@
poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
driver->num_clients = max_clients;
driver->logging_mode = DIAG_USB_MODE;
+ for (i = 0; i < NUM_UPD; i++) {
+ driver->pd_logging_mode[i] = 0;
+ driver->pd_session_clear[i] = 0;
+ }
+ driver->num_pd_session = 0;
driver->mask_check = 0;
driver->in_busy_pktdata = 0;
driver->in_busy_dcipktdata = 0;
@@ -3463,8 +3638,12 @@
mutex_init(&driver->diag_file_mutex);
mutex_init(&driver->delayed_rsp_mutex);
mutex_init(&apps_data_mutex);
- for (i = 0; i < NUM_PERIPHERALS; i++)
+ mutex_init(&driver->msg_mask_lock);
+ mutex_init(&driver->hdlc_recovery_mutex);
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
mutex_init(&driver->diagfwd_channel_mutex[i]);
+ driver->diag_id_sent[i] = 0;
+ }
init_waitqueue_head(&driver->wait_q);
INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
INIT_WORK(&(driver->update_user_clients),
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index cd49f00..b59f245 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -38,6 +38,7 @@
#include "diag_masks.h"
#include "diag_usb.h"
#include "diag_mux.h"
+#include "diag_ipc_logging.h"
#define STM_CMD_VERSION_OFFSET 4
#define STM_CMD_MASK_OFFSET 5
@@ -1443,7 +1444,9 @@
if (start_ptr) {
/* Discard any partial packet reads */
+ mutex_lock(&driver->hdlc_recovery_mutex);
driver->incoming_pkt.processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_process_non_hdlc_pkt(start_ptr, len - i, info);
}
}
@@ -1457,18 +1460,24 @@
const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
struct diag_pkt_frame_t *actual_pkt = NULL;
unsigned char *data_ptr = NULL;
- struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt;
+ struct diag_partial_pkt_t *partial_pkt = NULL;
- if (!buf || len <= 0)
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ if (!buf || len <= 0) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
return;
-
- if (!partial_pkt->processing)
+ }
+ partial_pkt = &driver->incoming_pkt;
+ if (!partial_pkt->processing) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto start;
+ }
if (partial_pkt->remaining > len) {
if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
pr_err("diag: Invalid length %d, %d received in %s\n",
partial_pkt->read_len, len, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
@@ -1482,6 +1491,7 @@
pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
partial_pkt->read_len,
partial_pkt->remaining, __func__);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
memcpy(partial_pkt->data + partial_pkt->read_len, buf,
@@ -1495,20 +1505,27 @@
if (partial_pkt->remaining == 0) {
actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
data_ptr = partial_pkt->data + header_len;
- if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
err = diag_process_apps_pkt(data_ptr,
actual_pkt->length, info);
if (err) {
pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
__func__, err);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
}
partial_pkt->read_len = 0;
partial_pkt->total_len = 0;
partial_pkt->processing = 0;
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto start;
}
+ mutex_unlock(&driver->hdlc_recovery_mutex);
goto end;
start:
@@ -1521,14 +1538,14 @@
diag_send_error_rsp(buf, len);
goto end;
}
-
+ mutex_lock(&driver->hdlc_recovery_mutex);
if (pkt_len + header_len > partial_pkt->capacity) {
pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
__func__, pkt_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
break;
}
-
if ((pkt_len + header_len) > (len - read_bytes)) {
partial_pkt->read_len = len - read_bytes;
partial_pkt->total_len = pkt_len + header_len;
@@ -1536,19 +1553,27 @@
partial_pkt->read_len;
partial_pkt->processing = 1;
memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+ mutex_unlock(&driver->hdlc_recovery_mutex);
break;
}
data_ptr = buf + header_len;
- if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+ CONTROL_CHAR) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
diag_hdlc_start_recovery(buf, len, info);
+ mutex_lock(&driver->hdlc_recovery_mutex);
+ }
else
hdlc_reset = 0;
err = diag_process_apps_pkt(data_ptr,
actual_pkt->length, info);
- if (err)
+ if (err) {
+ mutex_unlock(&driver->hdlc_recovery_mutex);
break;
+ }
read_bytes += header_len + pkt_len + 1;
buf += header_len + pkt_len + 1; /* advance to next pkt */
+ mutex_unlock(&driver->hdlc_recovery_mutex);
}
end:
return;
@@ -1639,6 +1664,9 @@
driver->real_time_mode[i] = 1;
driver->supports_separate_cmdrsp = 1;
driver->supports_apps_hdlc_encoding = 1;
+ driver->supports_apps_header_untagging = 1;
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->peripheral_untag[i] = 0;
mutex_init(&driver->diag_hdlc_mutex);
mutex_init(&driver->diag_cntl_mutex);
mutex_init(&driver->mode_lock);
@@ -1668,9 +1696,12 @@
driver->feature[i].rcvd_feature_mask = 0;
driver->feature[i].peripheral_buffering = 0;
driver->feature[i].encode_hdlc = 0;
+ driver->feature[i].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
driver->feature[i].mask_centralization = 0;
driver->feature[i].log_on_demand = 0;
driver->feature[i].sent_feature_mask = 0;
+ driver->feature[i].diag_id_support = 0;
driver->buffering_mode[i].peripheral = i;
driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 47c8555..677099f 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,9 +19,11 @@
*/
#define SET_BUF_CTXT(p, d, n) \
(((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define SET_PD_CTXT(u) ((u & 0xFF) << 24)
#define GET_BUF_PERIPHERAL(p) ((p & 0xFF0000) >> 16)
#define GET_BUF_TYPE(d) ((d & 0x00FF00) >> 8)
#define GET_BUF_NUM(n) ((n & 0x0000FF))
+#define GET_PD_CTXT(u) ((u & 0xFF000000) >> 24)
#define CHK_OVERFLOW(bufStart, start, end, length) \
((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index e13871e..d7e24fc 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -68,9 +68,9 @@
driver->feature[peripheral].sent_feature_mask = 0;
driver->feature[peripheral].rcvd_feature_mask = 0;
- flush_workqueue(driver->cntl_wq);
reg_dirty |= PERIPHERAL_MASK(peripheral);
diag_cmd_remove_reg_by_proc(peripheral);
+ driver->diag_id_sent[peripheral] = 0;
driver->feature[peripheral].stm_support = DISABLE_STM;
driver->feature[peripheral].log_on_demand = 0;
driver->stm_state[peripheral] = DISABLE_STM;
@@ -199,6 +199,20 @@
}
}
+static void process_upd_header_untagging_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (driver->supports_apps_header_untagging) {
+ driver->feature[peripheral].untag_header =
+ ENABLE_PKT_HEADER_UNTAGGING;
+ } else {
+ driver->feature[peripheral].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
+ }
+}
+
static void process_command_deregistration(uint8_t *buf, uint32_t len,
uint8_t peripheral)
{
@@ -375,6 +389,8 @@
driver->feature[peripheral].separate_cmd_rsp = 1;
if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
process_hdlc_encoding_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_PKT_HEADER_UNTAG))
+ process_upd_header_untagging_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_STM))
enable_stm_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
@@ -383,6 +399,8 @@
driver->feature[peripheral].peripheral_buffering = 1;
if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
enable_socket_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_DIAGID_SUPPORT))
+ driver->feature[peripheral].diag_id_support = 1;
}
process_socket_feature(peripheral);
@@ -511,6 +529,7 @@
/* Don't account for pkt_id and length */
read_len += header_len - (2 * sizeof(uint32_t));
+ mutex_lock(&driver->msg_mask_lock);
driver->max_ssid_count[peripheral] = header->count;
for (i = 0; i < header->count && read_len < len; i++) {
ssid_range = (struct diag_ssid_range_t *)ptr;
@@ -554,6 +573,7 @@
}
driver->msg_mask_tbl_count += 1;
}
+ mutex_unlock(&driver->msg_mask_lock);
}
static void diag_build_time_mask_update(uint8_t *buf,
@@ -578,11 +598,11 @@
__func__, range->ssid_first, range->ssid_last);
return;
}
-
+ mutex_lock(&driver->msg_mask_lock);
build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
num_items = range->ssid_last - range->ssid_first + 1;
- for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
if (build_mask->ssid_first != range->ssid_first)
continue;
found = 1;
@@ -601,7 +621,7 @@
if (found)
goto end;
- new_size = (driver->msg_mask_tbl_count + 1) *
+ new_size = (driver->bt_msg_mask_tbl_count + 1) *
sizeof(struct diag_msg_mask_t);
temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
if (!temp) {
@@ -616,8 +636,9 @@
__func__, err);
goto end;
}
- driver->msg_mask_tbl_count += 1;
+ driver->bt_msg_mask_tbl_count += 1;
end:
+ mutex_unlock(&driver->msg_mask_lock);
return;
}
@@ -704,12 +725,24 @@
{
struct diag_ctrl_diagid *header = NULL;
struct diag_ctrl_diagid ctrl_pkt;
+ struct diagfwd_info *fwd_info_data = NULL;
+ struct diagfwd_info *fwd_info_cmd = NULL;
char *process_name = NULL;
int err = 0;
uint8_t local_diag_id = 0;
+ uint8_t new_request = 0;
if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
return;
+
+ fwd_info_data = &peripheral_info[TYPE_DATA][peripheral];
+ if (!fwd_info_data)
+ return;
+
+ fwd_info_cmd = &peripheral_info[TYPE_CMD][peripheral];
+ if (!fwd_info_cmd)
+ return;
+
header = (struct diag_ctrl_diagid *)buf;
process_name = (char *)&header->process_name;
if (diag_query_diag_id(process_name, &local_diag_id))
@@ -718,7 +751,27 @@
diag_id++;
diag_add_diag_id_to_list(diag_id, process_name);
ctrl_pkt.diag_id = diag_id;
+ new_request = 1;
}
+
+ if (new_request) {
+ fwd_info_data->num_pd++;
+ fwd_info_cmd->num_pd++;
+ }
+
+ if (strnstr(process_name, DIAG_ID_ROOT_STRING, strlen(process_name))) {
+ fwd_info_cmd->diagid_root = diag_id;
+ fwd_info_data->diagid_root = diag_id;
+ driver->diag_id_sent[peripheral] = 0;
+ } else {
+ fwd_info_cmd->diagid_user[fwd_info_cmd->num_pd - 2] = diag_id;
+ fwd_info_data->diagid_user[fwd_info_data->num_pd - 2] = diag_id;
+ }
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: peripheral = %d: diag_id string = %s,diag_id = %d\n",
+ peripheral, process_name, ctrl_pkt.diag_id);
+
ctrl_pkt.pkt_id = DIAG_CTRL_MSG_DIAGID;
ctrl_pkt.version = 1;
strlcpy((char *)&ctrl_pkt.process_name, process_name,
@@ -728,8 +781,21 @@
err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, ctrl_pkt.len +
sizeof(ctrl_pkt.pkt_id) + sizeof(ctrl_pkt.len));
if (err && err != -ENODEV) {
- pr_err("diag: Unable to send diag id ctrl packet to peripheral %d, err: %d\n",
+ pr_err("diag: Unable to send diag id ctrl packet to peripheral %d, err: %d\n",
peripheral, err);
+ } else {
+ /*
+ * Masks (F3, logs and events) will be sent to
+ * peripheral immediately following feature mask update only
+ * if diag_id support is not present or
+ * diag_id support is present and diag_id has been sent to
+ * peripheral.
+ * With diag_id being sent now, mask will be updated
+ * to peripherals.
+ */
+ driver->diag_id_sent[peripheral] = 1;
+ diag_send_updates_peripheral(peripheral);
+ diagfwd_buffers_init(fwd_info_data);
}
}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 7823040..8b22d7e 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -68,6 +68,7 @@
#define F_DIAG_SOCKETS_ENABLED 13
#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
#define F_DIAG_DIAGID_SUPPORT 15
+#define F_DIAG_PKT_HEADER_UNTAG 16
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -82,6 +83,9 @@
#define ENABLE_APPS_HDLC_ENCODING 1
#define DISABLE_APPS_HDLC_ENCODING 0
+#define ENABLE_PKT_HEADER_UNTAGGING 1
+#define DISABLE_PKT_HEADER_UNTAGGING 0
+
#define DIAG_MODE_PKT_LEN 36
struct diag_ctrl_pkt_header_t {
@@ -279,6 +283,7 @@
void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
int len);
int diag_send_real_time_update(uint8_t peripheral, int real_time);
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral);
int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index ebc37f6..e9683e0 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -361,13 +361,44 @@
diagfwd_channel_read(glink_info->fwd_ctxt);
}
+struct diag_glink_read_work {
+ struct diag_glink_info *glink_info;
+ const void *ptr_read_done;
+ const void *ptr_rx_done;
+ size_t ptr_read_size;
+ struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+ struct diag_glink_read_work *read_work = container_of(work,
+ struct diag_glink_read_work, work);
+ struct diag_glink_info *glink_info = read_work->glink_info;
+
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
+ return;
+ }
+
+ diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)(read_work->ptr_read_done),
+ read_work->ptr_read_size);
+
+ glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+ read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+ glink_info->peripheral, glink_info->type);
+ kfree(read_work);
+}
static void diag_glink_notify_rx(void *hdl, const void *priv,
const void *pkt_priv, const void *ptr,
size_t size)
{
struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
- int err = 0;
+ struct diag_glink_read_work *read_work;
if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
return;
@@ -379,12 +410,25 @@
"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
+ read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+ if (!read_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
+ return;
+ }
+
memcpy((void *)pkt_priv, ptr, size);
- err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
- (unsigned char *)pkt_priv, size);
- glink_rx_done(glink_info->hdl, ptr, false);
+
+ read_work->glink_info = glink_info;
+ read_work->ptr_read_done = pkt_priv;
+ read_work->ptr_rx_done = ptr;
+ read_work->ptr_read_size = size;
+ INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+ queue_work(glink_info->wq, &read_work->work);
+
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
}
@@ -462,6 +506,45 @@
return err;
}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
+static void diag_glink_late_init_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ late_init_work);
+ if (!glink_info || !glink_info->hdl)
+ return;
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned int event)
{
@@ -475,9 +558,7 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel connect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 1);
- diagfwd_channel_open(glink_info->fwd_ctxt);
- diagfwd_late_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &glink_info->connect_work);
break;
case GLINK_LOCAL_DISCONNECTED:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +570,7 @@
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 0);
- diagfwd_channel_close(glink_info->fwd_ctxt);
- atomic_set(&glink_info->tx_intent_ready, 0);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -596,7 +675,7 @@
glink_info->inited = 1;
if (atomic_read(&glink_info->opened))
- diagfwd_channel_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &(glink_info->late_init_work));
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
glink_info->name);
@@ -641,6 +720,10 @@
INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
+ INIT_WORK(&(glink_info->late_init_work), diag_glink_late_init_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
@@ -681,6 +764,8 @@
struct diag_glink_info *glink_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
glink_info = &glink_cntl[peripheral];
__diag_glink_init(glink_info);
diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +804,8 @@
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_cntl[peripheral]);
glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
}
@@ -729,6 +816,8 @@
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_data[peripheral]);
__diag_glink_exit(&glink_cmd[peripheral]);
__diag_glink_exit(&glink_dci[peripheral]);
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index 73f2fe8..6cad445 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -35,6 +35,9 @@
struct work_struct open_work;
struct work_struct close_work;
struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
+ struct work_struct late_init_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 5a8ef04..955d81f 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -45,6 +45,8 @@
static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len);
static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
@@ -58,7 +60,7 @@
static struct diag_channel_ops data_ch_ops = {
.open = NULL,
.close = NULL,
- .read_done = diagfwd_data_read_done
+ .read_done = diagfwd_data_read_untag_done
};
static struct diag_channel_ops cntl_ch_ops = {
@@ -213,6 +215,317 @@
return buf->len;
}
+int diag_md_get_peripheral(int ctxt)
+{
+ int pd = 0, i = 0;
+ int type = 0, peripheral = -EINVAL;
+ struct diagfwd_info *fwd_info = NULL;
+
+ peripheral = GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral < 0 || peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (peripheral == APPS_DATA)
+ return peripheral;
+
+ type = GET_BUF_TYPE(ctxt);
+ if (type < 0 || type >= NUM_TYPES)
+ return -EINVAL;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info)
+ return -EINVAL;
+
+ pd = GET_PD_CTXT(ctxt);
+
+ if (driver->num_pd_session) {
+ if (pd == fwd_info->diagid_root) {
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ } else {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (pd == fwd_info->diagid_user[i]) {
+ switch (peripheral) {
+ case PERIPHERAL_MODEM:
+ if (driver->pd_logging_mode[0])
+ peripheral = UPD_WLAN;
+ break;
+ default:
+ peripheral = -EINVAL;
+ break;
+ }
+ }
+ }
+ }
+ }
+ return peripheral;
+}
+
+static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
+ struct diagfwd_buf_t *buf, int len)
+{
+ int err = 0;
+ int write_len = 0, peripheral = 0;
+ unsigned char *write_buf = NULL;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&fwd_info->data_mutex);
+
+ peripheral =
+ diag_md_get_peripheral(buf->ctxt);
+ if (peripheral < 0) {
+ pr_err("diag:%s:%d invalid peripheral = %d\n",
+ __func__, __LINE__, peripheral);
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_ws_release();
+ return;
+ }
+
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+
+ if (hdlc_disabled) {
+ /* The data is raw and and on APPS side HDLC is disabled */
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ if (len > PERIPHERAL_BUF_SZ) {
+ pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+ __func__, len, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ if (write_len <= 0)
+ goto end;
+ write_buf = buf->data_raw;
+ } else {
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+
+ write_len = check_bufsize_for_encoding(buf, len);
+ if (write_len <= 0) {
+ pr_err("diag: error in checking buf for encoding\n");
+ goto end;
+ }
+ write_buf = buf->data;
+ err = diag_add_hdlc_encoding(write_buf, &write_len,
+ buf->data_raw, len);
+ if (err) {
+ pr_err("diag: error in adding hdlc encoding\n");
+ goto end;
+ }
+ }
+
+ if (write_len > 0) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+ buf->ctxt);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+ __func__, err);
+ goto end;
+ }
+ }
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diagfwd_queue_read(fwd_info);
+ return;
+
+end:
+ diag_ws_release();
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (buf) {
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ int i = 0;
+ int len_cpd = 0;
+ int ctxt_cpd = 0;
+ int len_upd[MAX_PERIPHERAL_UPD] = {0};
+ int ctxt_upd[MAX_PERIPHERAL_UPD] = {0};
+ int packet_len = 0, processed = 0;
+ unsigned char *temp_buf_main = NULL;
+ unsigned char *temp_buf_cpd = NULL;
+ unsigned char *temp_buf_upd[MAX_PERIPHERAL_UPD] = {NULL};
+ struct diagfwd_buf_t *temp_fwdinfo_cpd = NULL;
+ struct diagfwd_buf_t *temp_fwdinfo_upd = NULL;
+ int flag_buf_1 = 0, flag_buf_2 = 0;
+ uint8_t peripheral;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+ peripheral = fwd_info->peripheral;
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (driver->feature[peripheral].encode_hdlc &&
+ driver->feature[peripheral].untag_header &&
+ driver->peripheral_untag[peripheral]) {
+ temp_buf_cpd = buf;
+ temp_buf_main = buf;
+ if (fwd_info->buf_1 &&
+ fwd_info->buf_1->data_raw == buf) {
+ flag_buf_1 = 1;
+ temp_fwdinfo_cpd = fwd_info->buf_1;
+ if (fwd_info->type == TYPE_DATA) {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++)
+ temp_buf_upd[i] =
+ fwd_info->buf_upd[i][0]->data_raw;
+ }
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ flag_buf_2 = 1;
+ temp_fwdinfo_cpd = fwd_info->buf_2;
+ if (fwd_info->type == TYPE_DATA) {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++)
+ temp_buf_upd[i] =
+ fwd_info->buf_upd[i][1]->data_raw;
+ }
+ } else {
+ pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, peripheral,
+ fwd_info->type);
+ goto end;
+ }
+
+ while (processed < len) {
+ pr_debug("diag_fr:untagged packet buf contents: %02x %02x %02x %02x\n",
+ *temp_buf_main, *(temp_buf_main+1),
+ *(temp_buf_main+2), *(temp_buf_main+3));
+ packet_len =
+ *(uint16_t *) (temp_buf_main + 2);
+ if (packet_len > PERIPHERAL_BUF_SZ)
+ goto end;
+ if ((*temp_buf_main) == fwd_info->diagid_root) {
+ ctxt_cpd = fwd_info->diagid_root;
+ len_cpd += packet_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), packet_len);
+ temp_buf_cpd += packet_len;
+ }
+ } else {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++)
+ if ((*temp_buf_main) ==
+ fwd_info->diagid_user[i])
+ break;
+ ctxt_upd[i] = fwd_info->diagid_user[i];
+ if (temp_buf_upd[i]) {
+ memcpy(temp_buf_upd[i],
+ (temp_buf_main + 4), packet_len);
+ temp_buf_upd[i] += packet_len;
+ }
+ len_upd[i] += packet_len;
+ }
+ len = len - 4;
+ temp_buf_main += (packet_len + 4);
+ processed += packet_len;
+ }
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (fwd_info->type == TYPE_DATA && len_upd[i]) {
+ if (flag_buf_1) {
+ fwd_info->upd_len[i][0] = len_upd[i];
+ temp_fwdinfo_upd =
+ fwd_info->buf_upd[i][0];
+ } else {
+ fwd_info->upd_len[i][1] = len_upd[i];
+ temp_fwdinfo_upd =
+ fwd_info->buf_upd[i][1];
+ }
+ temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
+ temp_fwdinfo_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd[i]));
+ atomic_set(&temp_fwdinfo_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_fwdinfo_upd, len_upd[i]);
+ } else {
+ if (flag_buf_1)
+ fwd_info->upd_len[i][0] = 0;
+ if (flag_buf_2)
+ fwd_info->upd_len[i][1] = 0;
+ }
+ }
+ if (len_cpd) {
+ if (flag_buf_1)
+ fwd_info->cpd_len_1 = len_cpd;
+ else
+ fwd_info->cpd_len_2 = len_cpd;
+ temp_fwdinfo_cpd->ctxt &= 0x00FFFFFF;
+ temp_fwdinfo_cpd->ctxt |=
+ (SET_PD_CTXT(ctxt_cpd));
+ diagfwd_data_process_done(fwd_info,
+ temp_fwdinfo_cpd, len_cpd);
+ } else {
+ if (flag_buf_1)
+ fwd_info->cpd_len_1 = 0;
+ if (flag_buf_2)
+ fwd_info->cpd_len_2 = 0;
+ }
+ } else {
+ diagfwd_data_read_done(fwd_info, buf, len);
+ }
+ return;
+end:
+ diag_ws_release();
+ if (temp_fwdinfo_cpd) {
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len)
{
@@ -413,6 +726,7 @@
uint8_t peripheral;
uint8_t transport;
uint8_t type;
+ int i = 0;
struct diagfwd_info *fwd_info = NULL;
for (transport = 0; transport < NUM_TRANSPORT; transport++) {
@@ -436,9 +750,20 @@
fwd_info->inited = 1;
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
+ fwd_info->cpd_len_1 = 0;
+ fwd_info->cpd_len_2 = 0;
+ fwd_info->num_pd = 0;
mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
spin_lock_init(&fwd_info->write_buf_lock);
+
+ for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
+ fwd_info->diagid_user[i] = 0;
+ fwd_info->upd_len[i][0] = 0;
+ fwd_info->upd_len[i][1] = 0;
+ fwd_info->buf_upd[i][0] = NULL;
+ fwd_info->buf_upd[i][1] = NULL;
+ }
}
}
@@ -452,9 +777,20 @@
fwd_info->ch_open = 0;
fwd_info->read_bytes = 0;
fwd_info->write_bytes = 0;
+ fwd_info->num_pd = 0;
+ fwd_info->cpd_len_1 = 0;
+ fwd_info->cpd_len_2 = 0;
spin_lock_init(&fwd_info->write_buf_lock);
mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
+
+ for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
+ fwd_info->diagid_user[i] = 0;
+ fwd_info->upd_len[i][0] = 0;
+ fwd_info->upd_len[i][1] = 0;
+ fwd_info->buf_upd[i][0] = NULL;
+ fwd_info->buf_upd[i][1] = NULL;
+ }
/*
* This state shouldn't be set for Control channels
* during initialization. This is set when the feature
@@ -668,7 +1004,16 @@
dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
if (!check_channel_state(dest_info->ctxt))
diagfwd_late_open(dest_info);
- diagfwd_cntl_open(dest_info);
+
+ /*
+ * Open control channel to update masks after buffers are
+ * initialized for peripherals that have transport other than
+ * GLINK. GLINK supported peripheral mask update will
+ * happen after glink buffers are initialized.
+ */
+
+ if (dest_info->transport != TRANSPORT_GLINK)
+ diagfwd_cntl_open(dest_info);
init_fn(peripheral);
mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
@@ -721,6 +1066,16 @@
if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
return -ENODEV;
+ if (type == TYPE_CMD) {
+ if (driver->feature[peripheral].untag_header)
+ if (!fwd_info->diagid_root ||
+ (!driver->diag_id_sent[peripheral])) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: diag_id is not assigned yet\n");
+ return 0;
+ }
+ }
+
if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
return -EIO;
@@ -848,10 +1203,21 @@
__func__, fwd_info->peripheral, fwd_info->type);
return 0;
}
-
+ mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 1;
diagfwd_buffers_init(fwd_info);
- diagfwd_write_buffers_init(fwd_info);
+
+ /*
+ * Initialize buffers for glink supported
+ * peripherals only. Open control channel to update
+ * masks after buffers are initialized.
+ */
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ diagfwd_write_buffers_init(fwd_info);
+ if (fwd_info->type == TYPE_CNTL)
+ diagfwd_cntl_open(fwd_info);
+ }
+
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
@@ -866,7 +1232,7 @@
if (fwd_info->p_ops && fwd_info->p_ops->open)
fwd_info->p_ops->open(fwd_info->ctxt);
}
-
+ mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
return 0;
}
@@ -877,6 +1243,10 @@
if (!fwd_info)
return -EIO;
+ if (fwd_info->type == TYPE_CNTL)
+ flush_workqueue(driver->cntl_wq);
+
+ mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 0;
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
fwd_info->c_ops->close(fwd_info);
@@ -892,7 +1262,7 @@
}
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
fwd_info->peripheral, fwd_info->type);
-
+ mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
return 0;
}
@@ -924,17 +1294,45 @@
void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
{
+ int i = 0;
struct diagfwd_info *fwd_info = NULL;
if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
return;
fwd_info = &peripheral_info[type][peripheral];
- if (ctxt == 1 && fwd_info->buf_1)
+ if (!fwd_info)
+ return;
+
+ if (ctxt == 1 && fwd_info->buf_1) {
+ /* Buffer 1 for core PD is freed */
atomic_set(&fwd_info->buf_1->in_busy, 0);
- else if (ctxt == 2 && fwd_info->buf_2)
+ fwd_info->cpd_len_1 = 0;
+ } else if (ctxt == 2 && fwd_info->buf_2) {
+ /* Buffer 2 for core PD is freed */
atomic_set(&fwd_info->buf_2->in_busy, 0);
- else
+ fwd_info->cpd_len_2 = 0;
+ } else if (ctxt >= 3 && (ctxt % 2)) {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (fwd_info->buf_upd[i][0]) {
+ /* Buffer 1 for ith user PD is freed */
+ atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
+ fwd_info->upd_len[i][0] = 0;
+ }
+ if (!fwd_info->cpd_len_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ }
+ } else if (ctxt >= 4 && !(ctxt % 2)) {
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (fwd_info->buf_upd[i][1]) {
+ /* Buffer 2 for ith user PD is freed */
+ atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
+ fwd_info->upd_len[i][1] = 0;
+ }
+ if (!fwd_info->cpd_len_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+ } else
pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
diagfwd_queue_read(fwd_info);
@@ -1063,8 +1461,65 @@
fwd_info->p_ops->queue_read(fwd_info->ctxt);
}
+static int diagfwd_buffers_allocate(struct diagfwd_info *fwd_info)
+{
+ int i, j;
+
+ for (i = 0; ((fwd_info->num_pd > 1) &&
+ (i <= (fwd_info->num_pd - 2))); i++) {
+ for (j = 0; j < NUM_WRITE_BUFFERS; j++) {
+ if (!fwd_info->buf_upd[i][j]) {
+ fwd_info->buf_upd[i][j] =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_upd[i][j]))
+ return -ENOMEM;
+ kmemleak_not_leak(fwd_info->buf_upd[i][j]);
+ }
+
+ if (fwd_info->buf_upd[i][j] &&
+ !fwd_info->buf_upd[i][j]->data) {
+ fwd_info->buf_upd[i][j]->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(
+ fwd_info->buf_upd[i][j]->data))
+ return -ENOMEM;
+ fwd_info->buf_upd[i][j]->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(
+ fwd_info->buf_upd[i][j]->data);
+ fwd_info->buf_upd[i][j]->ctxt =
+ SET_BUF_CTXT(fwd_info->peripheral,
+ fwd_info->type, ((2 * i) + (j + 3)));
+ }
+
+ if (driver->supports_apps_hdlc_encoding) {
+ if (fwd_info->buf_upd[i][j] &&
+ !fwd_info->buf_upd[i][j]->data_raw) {
+ fwd_info->buf_upd[i][j]->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(
+ fwd_info->buf_upd[i][j]->data_raw))
+ return -ENOMEM;
+ fwd_info->buf_upd[i][j]->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(
+ fwd_info->buf_upd[i][j]->data_raw);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
{
+ int ret = 0;
+ unsigned char *temp_char_buf;
if (!fwd_info)
return;
@@ -1076,18 +1531,20 @@
}
mutex_lock(&fwd_info->buf_mutex);
+
if (!fwd_info->buf_1) {
fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
GFP_KERNEL);
- if (!fwd_info->buf_1)
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_1))
goto err;
kmemleak_not_leak(fwd_info->buf_1);
}
+
if (!fwd_info->buf_1->data) {
fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_1->data)
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_1->data))
goto err;
fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
kmemleak_not_leak(fwd_info->buf_1->data);
@@ -1099,7 +1556,7 @@
if (!fwd_info->buf_2) {
fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
GFP_KERNEL);
- if (!fwd_info->buf_2)
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_2))
goto err;
kmemleak_not_leak(fwd_info->buf_2);
}
@@ -1108,7 +1565,7 @@
fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_2->data)
+ if (ZERO_OR_NULL_PTR(fwd_info->buf_2->data))
goto err;
fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
kmemleak_not_leak(fwd_info->buf_2->data);
@@ -1117,6 +1574,11 @@
fwd_info->type, 2);
}
+ if (driver->feature[fwd_info->peripheral].untag_header)
+ ret = diagfwd_buffers_allocate(fwd_info);
+ if (ret)
+ goto err;
+
if (driver->supports_apps_hdlc_encoding) {
/* In support of hdlc encoding */
if (!fwd_info->buf_1->data_raw) {
@@ -1124,34 +1586,44 @@
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_1->data_raw)
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
- fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(fwd_info->buf_1->data_raw);
+ fwd_info->buf_1->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
}
+
if (!fwd_info->buf_2->data_raw) {
fwd_info->buf_2->data_raw =
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_2->data_raw)
+ temp_char_buf =
+ fwd_info->buf_2->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
- fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(fwd_info->buf_2->data_raw);
+ fwd_info->buf_2->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
}
}
}
- if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
+ if (fwd_info->type == TYPE_CMD &&
+ driver->supports_apps_hdlc_encoding) {
/* In support of hdlc encoding */
if (!fwd_info->buf_1->data_raw) {
fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_1->data_raw)
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(fwd_info->buf_1->data_raw);
+ kmemleak_not_leak(temp_char_buf);
}
}
@@ -1161,10 +1633,12 @@
err:
mutex_unlock(&fwd_info->buf_mutex);
diagfwd_buffers_exit(fwd_info);
+ return;
}
static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
{
+ int i = 0;
if (!fwd_info)
return;
@@ -1186,6 +1660,24 @@
kfree(fwd_info->buf_2);
fwd_info->buf_2 = NULL;
}
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (fwd_info->buf_upd[i][0]) {
+ kfree(fwd_info->buf_upd[i][0]->data);
+ fwd_info->buf_upd[i][0]->data = NULL;
+ kfree(fwd_info->buf_upd[i][0]->data_raw);
+ fwd_info->buf_upd[i][0]->data_raw = NULL;
+ kfree(fwd_info->buf_upd[i][0]);
+ fwd_info->buf_upd[i][0] = NULL;
+ }
+ if (fwd_info->buf_upd[i][1]) {
+ kfree(fwd_info->buf_upd[i][1]->data);
+ fwd_info->buf_upd[i][1]->data = NULL;
+ kfree(fwd_info->buf_upd[i][1]->data_raw);
+ fwd_info->buf_upd[i][1]->data_raw = NULL;
+ kfree(fwd_info->buf_upd[i][1]);
+ fwd_info->buf_upd[i][1] = NULL;
+ }
+ }
mutex_unlock(&fwd_info->buf_mutex);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index 5884a12..b16670e 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,12 @@
uint8_t transport;
uint8_t inited;
uint8_t ch_open;
+ uint8_t num_pd;
+ uint8_t diagid_root;
+ uint8_t diagid_user[MAX_PERIPHERAL_UPD];
+ int cpd_len_1;
+ int cpd_len_2;
+ int upd_len[MAX_PERIPHERAL_UPD][2];
atomic_t opened;
unsigned long read_bytes;
unsigned long write_bytes;
@@ -77,6 +83,7 @@
void *ctxt;
struct diagfwd_buf_t *buf_1;
struct diagfwd_buf_t *buf_2;
+ struct diagfwd_buf_t *buf_upd[MAX_PERIPHERAL_UPD][2];
struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
struct diag_peripheral_ops *p_ops;
struct diag_channel_ops *c_ops;
@@ -94,6 +101,9 @@
void diagfwd_late_open(struct diagfwd_info *fwd_info);
void diagfwd_close(uint8_t peripheral, uint8_t type);
+
+int diag_md_get_peripheral(int ctxt);
+
int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
void *ctxt, struct diag_peripheral_ops *ops,
struct diagfwd_info **fwd_ctxt);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886..172a9dc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3877,6 +3877,9 @@
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
+
+ rcu_read_lock();
+
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -3899,6 +3902,8 @@
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
+ rcu_read_unlock();
+
handle_new_recv_msgs(intf);
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 6958b5c..510fc10 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -762,6 +762,11 @@
result, len, data[2]);
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+ * Don't abort here, maybe it was a queued
+ * response to a previous command.
+ */
+ ipmi_ssif_unlock_cond(ssif_info, flags);
pr_warn(PFX "Invalid response getting flags: %x %x\n",
data[0], data[1]);
} else {
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index a7511a1..8069b36 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -59,7 +59,7 @@
/*
* Assigned numbers, used for dynamic minors
*/
-#define DYNAMIC_MINORS 75 /* like dynamic majors */
+#define DYNAMIC_MINORS 64 /* like dynamic majors */
static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 08d1dd5..ee737ef 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2044,8 +2044,8 @@
struct batched_entropy {
union {
- unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
- unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
+ u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
+ u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
};
@@ -2055,52 +2055,51 @@
* number is either as good as RDRAND or as good as /dev/urandom, with the
* goal of being quite fast and not depleting entropy.
*/
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
-unsigned long get_random_long(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+u64 get_random_u64(void)
{
- unsigned long ret;
+ u64 ret;
struct batched_entropy *batch;
- if (arch_get_random_long(&ret))
+#if BITS_PER_LONG == 64
+ if (arch_get_random_long((unsigned long *)&ret))
return ret;
+#else
+ if (arch_get_random_long((unsigned long *)&ret) &&
+ arch_get_random_long((unsigned long *)&ret + 1))
+ return ret;
+#endif
- batch = &get_cpu_var(batched_entropy_long);
- if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
- extract_crng((u8 *)batch->entropy_long);
+ batch = &get_cpu_var(batched_entropy_u64);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
- ret = batch->entropy_long[batch->position++];
- put_cpu_var(batched_entropy_long);
+ ret = batch->entropy_u64[batch->position++];
+ put_cpu_var(batched_entropy_u64);
return ret;
}
-EXPORT_SYMBOL(get_random_long);
+EXPORT_SYMBOL(get_random_u64);
-#if BITS_PER_LONG == 32
-unsigned int get_random_int(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+u32 get_random_u32(void)
{
- return get_random_long();
-}
-#else
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
-unsigned int get_random_int(void)
-{
- unsigned int ret;
+ u32 ret;
struct batched_entropy *batch;
if (arch_get_random_int(&ret))
return ret;
- batch = &get_cpu_var(batched_entropy_int);
- if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
- extract_crng((u8 *)batch->entropy_int);
+ batch = &get_cpu_var(batched_entropy_u32);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
- ret = batch->entropy_int[batch->position++];
- put_cpu_var(batched_entropy_int);
+ ret = batch->entropy_u32[batch->position++];
+ put_cpu_var(batched_entropy_u32);
return ret;
}
-#endif
-EXPORT_SYMBOL(get_random_int);
+EXPORT_SYMBOL(get_random_u32);
/**
* randomize_page - Generate a random, page aligned address
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
new file mode 100644
index 0000000..8612112
--- /dev/null
+++ b/drivers/char/rdbg.c
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/smem.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+
+#define SMP2P_NUM_PROCS 16
+#define MAX_RETRIES 20
+
+#define SM_VERSION 1
+#define SM_BLOCKSIZE 128
+
+#define SMQ_MAGIC_INIT 0xFF00FF00
+#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
+#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
+
+enum SMQ_STATUS {
+ SMQ_SUCCESS = 0,
+ SMQ_ENOMEMORY = -1,
+ SMQ_EBADPARM = -2,
+ SMQ_UNDERFLOW = -3,
+ SMQ_OVERFLOW = -4
+};
+
+enum smq_type {
+ PRODUCER = 1,
+ CONSUMER = 2,
+ INVALID = 3
+};
+
+struct smq_block_map {
+ uint32_t index_read;
+ uint32_t num_blocks;
+ uint8_t *map;
+};
+
+struct smq_node {
+ uint16_t index_block;
+ uint16_t num_blocks;
+} __attribute__ ((__packed__));
+
+struct smq_hdr {
+ uint8_t producer_version;
+ uint8_t consumer_version;
+} __attribute__ ((__packed__));
+
+struct smq_out_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset;
+ uint32_t index_sent_write;
+ uint32_t index_free_read;
+} __attribute__ ((__packed__));
+
+struct smq_out {
+ struct smq_out_state s;
+ struct smq_node sent[1];
+};
+
+struct smq_in_state {
+ uint32_t init;
+ uint32_t index_check_queue_for_reset_ack;
+ uint32_t index_sent_read;
+ uint32_t index_free_write;
+} __attribute__ ((__packed__));
+
+struct smq_in {
+ struct smq_in_state s;
+ struct smq_node free[1];
+};
+
+struct smq {
+ struct smq_hdr *hdr;
+ struct smq_out *out;
+ struct smq_in *in;
+ uint8_t *blocks;
+ uint32_t num_blocks;
+ struct mutex *lock;
+ uint32_t initialized;
+ struct smq_block_map block_map;
+ enum smq_type type;
+};
+
+struct gpio_info {
+ int gpio_base_id;
+ int irq_base_id;
+};
+
+struct rdbg_data {
+ struct device *device;
+ struct completion work;
+ struct gpio_info in;
+ struct gpio_info out;
+ bool device_initialized;
+ int gpio_out_offset;
+ bool device_opened;
+ void *smem_addr;
+ size_t smem_size;
+ struct smq producer_smrb;
+ struct smq consumer_smrb;
+ struct mutex write_mutex;
+};
+
+struct rdbg_device {
+ struct cdev cdev;
+ struct class *class;
+ dev_t dev_no;
+ int num_devices;
+ struct rdbg_data *rdbg_data;
+};
+
+static struct rdbg_device g_rdbg_instance = {
+ { {0} },
+ NULL,
+ 0,
+ SMP2P_NUM_PROCS,
+ NULL
+};
+
+struct processor_specific_info {
+ char *name;
+ unsigned int smem_buffer_addr;
+ size_t smem_buffer_size;
+};
+
+static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
+ {0}, /*APPS*/
+ {"rdbg_modem", 0, 0}, /*MODEM*/
+ {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
+ {0}, /*SMP2P_RESERVED_PROC_1*/
+ {"rdbg_wcnss", 0, 0}, /*WCNSS*/
+ {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
+ {NULL}, /*SMP2P_POWER_PROC*/
+ {NULL}, /*SMP2P_TZ_PROC*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
+};
+
+static int smq_blockmap_get(struct smq_block_map *block_map,
+ uint32_t *block_index, uint32_t n)
+{
+ uint32_t start;
+ uint32_t mark = 0;
+ uint32_t found = 0;
+ uint32_t i = 0;
+
+ start = block_map->index_read;
+
+ if (n == 1) {
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ *block_index = block_map->index_read;
+ block_map->map[block_map->index_read] = 1;
+ block_map->index_read++;
+ block_map->index_read %= block_map->num_blocks;
+ return SMQ_SUCCESS;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ } else {
+ mark = block_map->num_blocks;
+
+ do {
+ if (!block_map->map[block_map->index_read]) {
+ if (mark > block_map->index_read) {
+ mark = block_map->index_read;
+ start = block_map->index_read;
+ found = 0;
+ }
+
+ found++;
+ if (found == n) {
+ *block_index = mark;
+ for (i = 0; i < n; i++)
+ block_map->map[mark + i] =
+ (uint8_t)(n - i);
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ return SMQ_SUCCESS;
+ }
+ } else {
+ found = 0;
+ block_map->index_read += block_map->map
+ [block_map->index_read] - 1;
+ mark = block_map->num_blocks;
+ }
+ block_map->index_read++;
+ } while (start != (block_map->index_read %=
+ block_map->num_blocks));
+ }
+
+ return SMQ_ENOMEMORY;
+}
+
+static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
+{
+ uint32_t num_blocks = block_map->map[i];
+
+ while (num_blocks--) {
+ block_map->map[i] = 0;
+ i++;
+ }
+}
+
+static int smq_blockmap_reset(struct smq_block_map *block_map)
+{
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+ memset(block_map->map, 0, block_map->num_blocks + 1);
+ block_map->index_read = 0;
+
+ return SMQ_SUCCESS;
+}
+
+static int smq_blockmap_ctor(struct smq_block_map *block_map,
+ uint32_t num_blocks)
+{
+ if (num_blocks <= 1)
+ return SMQ_ENOMEMORY;
+
+ block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
+ if (!block_map->map)
+ return SMQ_ENOMEMORY;
+
+ block_map->num_blocks = num_blocks - 1;
+ smq_blockmap_reset(block_map);
+
+ return SMQ_SUCCESS;
+}
+
+static void smq_blockmap_dtor(struct smq_block_map *block_map)
+{
+ kfree(block_map->map);
+ block_map->map = NULL;
+}
+
+static int smq_free(struct smq *smq, void *data)
+{
+ struct smq_node node;
+ uint32_t index_block;
+ int err = SMQ_SUCCESS;
+
+ if (smq->lock)
+ mutex_lock(smq->lock);
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER)) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
+ if (index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ node.index_block = (uint16_t)index_block;
+ node.num_blocks = 0;
+ *((struct smq_node *)(smq->in->free + smq->in->
+ s.index_free_write)) = node;
+
+ smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (smq->lock)
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
+{
+ struct smq_node *node;
+ int err = SMQ_SUCCESS;
+ int more = 0;
+
+ if ((smq->hdr->producer_version != SM_VERSION) &&
+ (smq->out->s.init != SMQ_MAGIC_PRODUCER))
+ return SMQ_UNDERFLOW;
+
+ if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
+ err = SMQ_UNDERFLOW;
+ goto bail;
+ }
+
+ node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
+ % smq->num_blocks;
+
+ *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
+ *pnsize = SM_BLOCKSIZE * node->num_blocks;
+
+ /*
+ * Ensure that the reads and writes are updated in the memory
+ * when they are done and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are shared between two cores.
+ */
+ rmb();
+ if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
+ more = 1;
+
+bail:
+ *pbmore = more;
+ return err;
+}
+
+static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
+{
+ void *pv = 0;
+ int num_blocks;
+ uint32_t index_block = 0;
+ int err = SMQ_SUCCESS;
+ struct smq_node *node = NULL;
+
+ mutex_lock(smq->lock);
+
+ if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) &&
+ (smq->hdr->consumer_version == SM_VERSION)) {
+ if (smq->out->s.index_check_queue_for_reset ==
+ smq->in->s.index_check_queue_for_reset_ack) {
+ while (smq->out->s.index_free_read !=
+ smq->in->s.index_free_write) {
+ node = (struct smq_node *)(
+ smq->in->free +
+ smq->out->s.index_free_read);
+ if (node->index_block >= smq->num_blocks) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ smq->out->s.index_free_read =
+ (smq->out->s.index_free_read + 1)
+ % smq->num_blocks;
+
+ smq_blockmap_put(&smq->block_map,
+ node->index_block);
+ /*
+ * Ensure that the reads and writes are
+ * updated in the memory when they are done
+ * and not cached. Also, ensure that the reads
+ * and writes are not reordered as they are
+ * shared between two cores.
+ */
+ rmb();
+ }
+ }
+ }
+
+ num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
+ err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ pv = smq->blocks + (SM_BLOCKSIZE * index_block);
+
+ err = copy_from_user((void *)pv, (void *)pcb, nsize);
+ if (err != 0)
+ goto bail;
+
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->index_block
+ = (uint16_t)index_block;
+ ((struct smq_node *)(smq->out->sent +
+ smq->out->s.index_sent_write))->num_blocks
+ = (uint16_t)num_blocks;
+
+ smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
+ % smq->num_blocks;
+
+bail:
+ if (err != SMQ_SUCCESS) {
+ if (pv)
+ smq_blockmap_put(&smq->block_map, index_block);
+ }
+ mutex_unlock(smq->lock);
+ return err;
+}
+
+static int smq_reset_producer_queue_internal(struct smq *smq,
+ uint32_t reset_num)
+{
+ int retval = 0;
+ uint32_t i;
+
+ if (smq->type != PRODUCER)
+ goto bail;
+
+ mutex_lock(smq->lock);
+ if (smq->out->s.index_check_queue_for_reset != reset_num) {
+ smq->out->s.index_check_queue_for_reset = reset_num;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ smq_blockmap_reset(&smq->block_map);
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ retval = 1;
+ }
+ mutex_unlock(smq->lock);
+
+bail:
+ return retval;
+}
+
+static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
+{
+ int retval = 0;
+ uint32_t reset_num, i;
+
+ if ((p_cons->type != CONSUMER) ||
+ (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) ||
+ (p_cons->hdr->producer_version != SM_VERSION))
+ goto bail;
+
+ reset_num = p_cons->out->s.index_check_queue_for_reset;
+ if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
+ p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
+ for (i = 0; i < p_cons->num_blocks; i++)
+ (p_cons->in->free + i)->index_block = 0xFFFF;
+
+ p_cons->in->s.index_sent_read = 0;
+ p_cons->in->s.index_free_write = 0;
+
+ retval = smq_reset_producer_queue_internal(p_prod, reset_num);
+ }
+
+bail:
+ return retval;
+}
+
+static int check_subsystem_debug_enabled(void *base_addr, int size)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ struct smq smq;
+ int err = 0;
+
+ pb = pb_orig = (uint8_t *)base_addr;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq.out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq.in = (struct smq_in *)pb;
+
+ if (smq.in->s.init != SMQ_MAGIC_CONSUMER) {
+ pr_err("%s, smq in consumer not initialized", __func__);
+ err = -ECOMM;
+ }
+
+bail:
+ return err;
+}
+
+static void smq_dtor(struct smq *smq)
+{
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ switch (smq->type) {
+ case PRODUCER:
+ smq->out->s.init = 0;
+ smq_blockmap_dtor(&smq->block_map);
+ break;
+ case CONSUMER:
+ smq->in->s.init = 0;
+ break;
+ default:
+ case INVALID:
+ break;
+ }
+
+ smq->initialized = 0;
+ }
+}
+
+/*
+ * The shared memory is used as a circular ring buffer in each direction.
+ * Thus we have a bi-directional shared memory channel between the AP
+ * and a subsystem. We call this SMQ. Each memory channel contains a header,
+ * data and a control mechanism that is used to synchronize read and write
+ * of data between the AP and the remote subsystem.
+ *
+ * Overall SMQ memory view:
+ *
+ * +------------------------------------------------+
+ * | SMEM buffer |
+ * |-----------------------+------------------------|
+ * |Producer: LA | Producer: Remote |
+ * |Consumer: Remote | subsystem |
+ * | subsystem | Consumer: LA |
+ * | | |
+ * | Producer| Consumer|
+ * +-----------------------+------------------------+
+ * | |
+ * | |
+ * | +--------------------------------------+
+ * | |
+ * | |
+ * v v
+ * +--------------------------------------------------------------+
+ * | Header | Data | Control |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | | b | b | b | | S |n |n | | S |n |n | |
+ * | Producer | l | l | l | | M |o |o | | M |o |o | |
+ * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
+ * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
+ * | | k | k | k | | O | | | | I | | | |
+ * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
+ * | Ver | 0 | 1 | 2 | | t | | | | | | | |
+ * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
+ * | |
+ * + |
+ * |
+ * +------------------------+
+ * |
+ * v
+ * +----+----+----+----+
+ * | SMQ Nodes |
+ * |----|----|----|----|
+ * Node # | 0 | 1 | 2 | ...|
+ * |----|----|----|----|
+ * Starting Block Index # | 0 | 3 | 8 | ...|
+ * |----|----|----|----|
+ * # of blocks | 3 | 5 | 1 | ...|
+ * +----+----+----+----+
+ *
+ * Header: Contains version numbers for software compatibility to ensure
+ * that both producers and consumers on the AP and subsystems know how to
+ * read from and write to the queue.
+ * Both the producer and consumer versions are 1.
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 1 byte | Producer Version |
+ * +---------+-------------------+
+ * | 1 byte | Consumer Version |
+ * +---------+-------------------+
+ *
+ * Data: The data portion contains multiple blocks [0..N] of a fixed size.
+ * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
+ * Payload sent from the debug agent app is split (if necessary) and placed
+ * in these blocks. The first data block is placed at the next 8 byte aligned
+ * address after the header.
+ *
+ * The number of blocks for a given SMEM allocation is derived as follows:
+ * Number of Blocks = ((Total Size - Alignment - Size of Header
+ * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
+ *
+ * The producer maintains a private block map of each of these blocks to
+ * determine which of these blocks in the queue is available and which are free.
+ *
+ * Control:
+ * The control portion contains a list of nodes [0..N] where N is number
+ * of available data blocks. Each node identifies the data
+ * block indexes that contain a particular debug message to be transferred,
+ * and the number of blocks it took to hold the contents of the message.
+ *
+ * Each node has the following structure:
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 2 bytes |Staring Block Index|
+ * +---------+-------------------+
+ * | 2 bytes |Number of Blocks |
+ * +---------+-------------------+
+ *
+ * The producer and the consumer update different parts of the control channel
+ * (SMQOut / SMQIn) respectively. Each of these control data structures contains
+ * information about the last node that was written / read, and the actual nodes
+ * that were written/read.
+ *
+ * SMQOut Structure (R/W by producer, R by consumer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset |
+ * +---------+-------------------+
+ * | 4 bytes | Last Sent Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Read |
+ * +---------+-------------------+
+ *
+ * SMQIn Structure (R/W by consumer, R by producer):
+ * +---------+-------------------+
+ * | Size | Field |
+ * +---------+-------------------+
+ * | 4 bytes | Magic Init Number |
+ * +---------+-------------------+
+ * | 4 bytes | Reset ACK |
+ * +---------+-------------------+
+ * | 4 bytes | Last Read Index |
+ * +---------+-------------------+
+ * | 4 bytes | Index Free Write |
+ * +---------+-------------------+
+ *
+ * Magic Init Number:
+ * Both SMQ Out and SMQ In initialize this field with a predefined magic
+ * number so as to make sure that both the consumer and producer blocks
+ * have fully initialized and have valid data in the shared memory control area.
+ * Producer Magic #: 0xFF00FF01
+ * Consumer Magic #: 0xFF00FF02
+ */
+static int smq_ctor(struct smq *smq, void *base_addr, int size,
+ enum smq_type type, struct mutex *lock_ptr)
+{
+ int num_blocks;
+ uint8_t *pb_orig;
+ uint8_t *pb;
+ uint32_t i;
+ int err;
+
+ if (smq->initialized == SMQ_MAGIC_INIT) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (!base_addr || !size) {
+ err = SMQ_EBADPARM;
+ goto bail;
+ }
+
+ if (type == PRODUCER)
+ smq->lock = lock_ptr;
+
+ pb_orig = (uint8_t *)base_addr;
+ smq->hdr = (struct smq_hdr *)pb_orig;
+ pb = pb_orig;
+ pb += sizeof(struct smq_hdr);
+ pb = PTR_ALIGN(pb, 8);
+ size -= pb - (uint8_t *)pb_orig;
+ num_blocks = (int)((size - sizeof(struct smq_out_state) -
+ sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
+ sizeof(struct smq_node) * 2));
+ if (num_blocks <= 0) {
+ err = SMQ_ENOMEMORY;
+ goto bail;
+ }
+
+ smq->blocks = pb;
+ smq->num_blocks = num_blocks;
+ pb += num_blocks * SM_BLOCKSIZE;
+ smq->out = (struct smq_out *)pb;
+ pb += sizeof(struct smq_out_state) + (num_blocks *
+ sizeof(struct smq_node));
+ smq->in = (struct smq_in *)pb;
+ smq->type = type;
+ if (type == PRODUCER) {
+ smq->hdr->producer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->out->sent + i)->index_block = 0xFFFF;
+
+ err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
+ if (err != SMQ_SUCCESS)
+ goto bail;
+
+ smq->out->s.index_sent_write = 0;
+ smq->out->s.index_free_read = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->out->s.index_check_queue_for_reset += 1;
+ } else {
+ smq->out->s.index_check_queue_for_reset = 1;
+ smq->out->s.init = SMQ_MAGIC_PRODUCER;
+ }
+ } else {
+ smq->hdr->consumer_version = SM_VERSION;
+ for (i = 0; i < smq->num_blocks; i++)
+ (smq->in->free + i)->index_block = 0xFFFF;
+
+ smq->in->s.index_sent_read = 0;
+ smq->in->s.index_free_write = 0;
+ if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
+ smq->in->s.index_check_queue_for_reset_ack =
+ smq->out->s.index_check_queue_for_reset;
+ } else {
+ smq->in->s.index_check_queue_for_reset_ack = 0;
+ }
+
+ smq->in->s.init = SMQ_MAGIC_CONSUMER;
+ }
+ smq->initialized = SMQ_MAGIC_INIT;
+ err = SMQ_SUCCESS;
+
+bail:
+ return err;
+}
+
+static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
+{
+ int offset = rdbgdata->gpio_out_offset;
+ int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
+
+ gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
+ rdbgdata->gpio_out_offset = (offset + 1) % 32;
+
+ dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
+ __func__, val);
+}
+
+static irqreturn_t on_interrupt_from(int irq, void *ptr)
+{
+ struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
+
+ dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
+ __func__, irq);
+
+ complete(&(rdbgdata->work));
+ return IRQ_HANDLED;
+}
+
+static int initialize_smq(struct rdbg_data *rdbgdata)
+{
+ int err = 0;
+ unsigned char *smem_consumer_buffer = rdbgdata->smem_addr;
+
+ smem_consumer_buffer += (rdbgdata->smem_size/2);
+
+ if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
+ ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
+ dev_err(rdbgdata->device, "%s: smq producer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer,
+ ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
+ dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
+ __func__);
+ err = -ENOMEM;
+ }
+
+bail:
+ return err;
+
+}
+
+static int rdbg_open(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *device = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !device->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &device->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened) {
+ dev_err(rdbgdata->device, "%s: Device already opened",
+ __func__);
+ err = -EEXIST;
+ goto bail;
+ }
+
+ rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
+ if (!rdbgdata->smem_size) {
+ dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr,
+ rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG);
+ if (!rdbgdata->smem_addr) {
+ dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
+ __func__);
+ err = -ENOMEM;
+ goto bail;
+ }
+ dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d",
+ __func__, (unsigned long)rdbgdata->smem_addr,
+ (unsigned int)rdbgdata->smem_size);
+
+ if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
+ rdbgdata->smem_size/2)) {
+ dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
+ __func__, proc_info[device_id].name);
+ err = -ECOMM;
+ goto bail;
+ }
+
+ init_completion(&rdbgdata->work);
+
+ err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ proc_info[device_id].name,
+ (void *)&device->rdbg_data[device_id]);
+ if (err) {
+ dev_err(rdbgdata->device,
+ "%s: Failed to register interrupt.Err=%d,irqid=%d.",
+ __func__, err, rdbgdata->in.irq_base_id);
+ goto irq_bail;
+ }
+
+ err = enable_irq_wake(rdbgdata->in.irq_base_id);
+ if (err < 0) {
+ dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
+ err);
+ err = 0;
+ }
+
+ mutex_init(&rdbgdata->write_mutex);
+
+ err = initialize_smq(rdbgdata);
+ if (err) {
+ dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
+ err);
+ goto smq_bail;
+ }
+
+ rdbgdata->device_opened = 1;
+
+ filp->private_data = (void *)rdbgdata;
+
+ return 0;
+
+smq_bail:
+ smq_dtor(&(rdbgdata->producer_smrb));
+ smq_dtor(&(rdbgdata->consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+irq_bail:
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &device->rdbg_data[device_id]);
+bail:
+ return err;
+}
+
+static int rdbg_release(struct inode *inode, struct file *filp)
+{
+ int device_id = -1;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ struct rdbg_data *rdbgdata = NULL;
+ int err = 0;
+
+ if (!inode || !rdbgdevice->rdbg_data) {
+ pr_err("Memory not allocated yet");
+ err = -ENODEV;
+ goto bail;
+ }
+
+ device_id = MINOR(inode->i_rdev);
+ rdbgdata = &rdbgdevice->rdbg_data[device_id];
+
+ if (rdbgdata->device_opened == 1) {
+ dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
+ proc_info[device_id].name);
+ rdbgdata->device_opened = 0;
+ complete(&(rdbgdata->work));
+ free_irq(rdbgdata->in.irq_base_id, (void *)
+ &rdbgdevice->rdbg_data[device_id]);
+ if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ producer_smrb));
+ if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
+ smq_dtor(&(rdbgdevice->rdbg_data[device_id].
+ consumer_smrb));
+ mutex_destroy(&rdbgdata->write_mutex);
+ }
+
+ filp->private_data = NULL;
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *offset)
+{
+ int err = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+ void *p_sent_buffer = NULL;
+ int nsize = 0;
+ int more = 0;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ dev_dbg(rdbgdata->device, "%s: In receive", __func__);
+ err = wait_for_completion_interruptible(&(rdbgdata->work));
+ if (err) {
+ dev_err(rdbgdata->device, "%s: Error in wait", __func__);
+ goto bail;
+ }
+
+ smq_check_queue_reset(&(rdbgdata->consumer_smrb),
+ &(rdbgdata->producer_smrb));
+ if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer,
+ &nsize, &more) != SMQ_SUCCESS) {
+ dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ size = ((size < nsize) ? size : nsize);
+ err = copy_to_user(buf, p_sent_buffer, size);
+ if (err != 0) {
+ dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
+ __func__, err);
+ err = -ENODATA;
+ goto bail;
+ }
+
+ smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
+ err = size;
+ dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx",
+ __func__, (unsigned long) buf);
+
+bail:
+ return err;
+}
+
+static ssize_t rdbg_write(struct file *filp, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ int err = 0;
+ int num_retries = 0;
+ struct rdbg_data *rdbgdata = filp->private_data;
+
+ if (!rdbgdata) {
+ pr_err("Invalid argument");
+ err = -EINVAL;
+ goto bail;
+ }
+
+ do {
+ err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size);
+ dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.",
+ __func__, err);
+ } while (err != 0 && num_retries++ < MAX_RETRIES);
+
+ if (err != 0) {
+ err = -ECOMM;
+ goto bail;
+ }
+
+ send_interrupt_to_subsystem(rdbgdata);
+
+ err = size;
+
+bail:
+ return err;
+}
+
+
+static const struct file_operations rdbg_fops = {
+ .open = rdbg_open,
+ .read = rdbg_read,
+ .write = rdbg_write,
+ .release = rdbg_release,
+};
+
+static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
+{
+ struct device_node *node = NULL;
+ int cnt = 0;
+ int id = 0;
+
+ node = of_find_compatible_node(NULL, NULL, node_name);
+ if (node) {
+ cnt = of_gpio_count(node);
+ if (cnt && gpio_info_ptr) {
+ id = of_get_gpio(node, 0);
+ gpio_info_ptr->gpio_base_id = id;
+ gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int __init rdbg_init(void)
+{
+ int err = 0;
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor = 0;
+ int major = 0;
+ int minor_nodes_created = 0;
+
+ char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
+ int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
+
+ char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
+
+ if (!node_name) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ if (rdbgdevice->num_devices < 1 ||
+ rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
+ pr_err("rgdb: invalid num_devices");
+ err = -EDOM;
+ goto name_bail;
+ }
+
+ rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
+ sizeof(struct rdbg_data), GFP_KERNEL);
+ if (!rdbgdevice->rdbg_data) {
+ err = -ENOMEM;
+ goto name_bail;
+ }
+
+ err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
+ rdbgdevice->num_devices, "rdbgctl");
+ if (err) {
+ pr_err("Error in alloc_chrdev_region.");
+ goto data_bail;
+ }
+ major = MAJOR(rdbgdevice->dev_no);
+
+ cdev_init(&rdbgdevice->cdev, &rdbg_fops);
+ rdbgdevice->cdev.owner = THIS_MODULE;
+ err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
+ rdbgdevice->num_devices);
+ if (err) {
+ pr_err("Error in cdev_add");
+ goto chrdev_bail;
+ }
+
+ rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
+ if (IS_ERR(rdbgdevice->class)) {
+ err = PTR_ERR(rdbgdevice->class);
+ pr_err("Error in class_create");
+ goto cdev_bail;
+ }
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (!proc_info[minor].name)
+ continue;
+
+ if (snprintf(node_name, max_len, "%s%d_in",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].in)) {
+ pr_debug("No incoming device tree entry found for %s",
+ proc_info[minor].name);
+ continue;
+ }
+
+ if (snprintf(node_name, max_len, "%s%d_out",
+ rdbg_compatible_string, minor) <= 0) {
+ pr_err("Error in snprintf");
+ err = -ENOMEM;
+ goto device_bail;
+ }
+
+ if (register_smp2p(node_name,
+ &rdbgdevice->rdbg_data[minor].out)) {
+ pr_err("No outgoing device tree entry found for %s",
+ proc_info[minor].name);
+ err = -EINVAL;
+ goto device_bail;
+ }
+
+ rdbgdevice->rdbg_data[minor].device = device_create(
+ rdbgdevice->class, NULL, MKDEV(major, minor),
+ NULL, "%s", proc_info[minor].name);
+ if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
+ err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
+ pr_err("Error in device_create");
+ goto device_bail;
+ }
+ rdbgdevice->rdbg_data[minor].device_initialized = 1;
+ minor_nodes_created++;
+ dev_dbg(rdbgdevice->rdbg_data[minor].device,
+ "%s: created /dev/%s c %d %d'", __func__,
+ proc_info[minor].name, major, minor);
+ }
+
+ if (!minor_nodes_created) {
+ pr_err("No device tree entries found");
+ err = -EINVAL;
+ goto class_bail;
+ }
+
+ goto name_bail;
+
+device_bail:
+ for (--minor; minor >= 0; minor--) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized)
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+class_bail:
+ class_destroy(rdbgdevice->class);
+cdev_bail:
+ cdev_del(&rdbgdevice->cdev);
+chrdev_bail:
+ unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
+data_bail:
+ kfree(rdbgdevice->rdbg_data);
+name_bail:
+ kfree(node_name);
+bail:
+ return err;
+}
+
+static void __exit rdbg_exit(void)
+{
+ struct rdbg_device *rdbgdevice = &g_rdbg_instance;
+ int minor;
+
+ for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
+ if (rdbgdevice->rdbg_data[minor].device_initialized) {
+ device_destroy(rdbgdevice->class,
+ MKDEV(MAJOR(rdbgdevice->dev_no), minor));
+ }
+ }
+ class_destroy(rdbgdevice->class);
+ cdev_del(&rdbgdevice->cdev);
+ unregister_chrdev_region(rdbgdevice->dev_no, 1);
+ kfree(rdbgdevice->rdbg_data);
+}
+
+module_init(rdbg_init);
+module_exit(rdbg_exit);
+
+MODULE_DESCRIPTION("rdbg module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index a017ccd..9ff8532 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -130,6 +130,41 @@
kfree(chip);
}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ down_write(&chip->ops_sem);
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+ }
+ /* Allow bus- and device-specific code to run. Note: since chip->ops
+ * is NULL, more-specific shutdown code will not be able to issue TPM
+ * commands.
+ */
+ if (dev->bus && dev->bus->shutdown)
+ dev->bus->shutdown(dev);
+ else if (dev->driver && dev->driver->shutdown)
+ dev->driver->shutdown(dev);
+ return 0;
+}
+
+
/**
* tpm_chip_alloc() - allocate a new struct tpm_chip instance
* @pdev: device to which the chip is associated
@@ -168,6 +203,7 @@
device_initialize(&chip->dev);
chip->dev.class = tpm_class;
+ chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4a..edf8c59 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -35,9 +35,10 @@
ssize_t err;
int i, rc;
char *str = buf;
-
struct tpm_chip *chip = to_tpm_chip(dev);
+ memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+
tpm_cmd.header.in = tpm_readpubek_header;
err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
@@ -284,6 +285,11 @@
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ /* XXX: If you wish to remove this restriction, you must first update
+ * tpm_sysfs to explicitly lock chip->ops.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
* is called before ops is null'd and the sysfs core synchronizes this
* removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 946025a..84eca4f 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -295,6 +295,8 @@
}
kfree(ibmvtpm);
+ /* For tpm_ibmvtpm_get_desired_dma */
+ dev_set_drvdata(&vdev->dev, NULL);
return 0;
}
@@ -309,13 +311,16 @@
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
+ struct ibmvtpm_dev *ibmvtpm;
- /* ibmvtpm initializes at probe time, so the data we are
- * asking for may not be set yet. Estimate that 4K required
- * for TCE-mapped buffer in addition to CRQ.
- */
- if (!ibmvtpm)
+ /*
+ * ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (chip)
+ ibmvtpm = dev_get_drvdata(&chip->dev);
+ else
return CRQ_RES_BUF_SIZE + PAGE_SIZE;
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 471a301..8f890c1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1870,7 +1870,7 @@
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 3d6754e..3e13186 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -258,6 +258,9 @@
{
if (flags & CLK_DIVIDER_ROUND_CLOSEST)
return abs(rate - now) < abs(rate - best);
+ else if (flags & CLK_DIVIDER_ROUND_KHZ)
+ return (DIV_ROUND_CLOSEST(abs(rate - now), 1000)
+ < DIV_ROUND_CLOSEST(abs(rate - best), 1000));
return now <= rate && now > best;
}
@@ -280,10 +283,14 @@
const struct clk_div_table *table, u8 width,
unsigned long flags)
{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
int i, bestdiv = 0;
unsigned long parent_rate, best = 0, now, maxdiv;
unsigned long parent_rate_saved = *best_parent_rate;
+ if (!parent)
+ return -EINVAL;
+
if (!rate)
rate = 1;
@@ -314,8 +321,7 @@
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- rate * i);
+ parent_rate = clk_hw_round_rate(parent, rate * i);
now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
@@ -326,7 +332,7 @@
if (!bestdiv) {
bestdiv = _get_maxdiv(table, width, flags);
- *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ *best_parent_rate = clk_hw_round_rate(parent, 1);
}
return bestdiv;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 2a3e9d8..96d37175 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -290,13 +290,15 @@
of_node_put(child);
return ret;
}
- }
- /* Add the virtual cpufreq device */
- cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
- -1, NULL, 0);
- if (IS_ERR(cpufreq_dev))
- pr_warn("unable to register cpufreq device");
+ if (match->data != &scpi_dvfs_ops)
+ continue;
+ /* Add the virtual cpufreq device if it's DVFS clock provider */
+ cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
+ -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ pr_warn("unable to register cpufreq device");
+ }
return 0;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7cdf45b..5638333 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2631,7 +2631,46 @@
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
-int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
+/*
+ * clock_debug_print_enabled_debug_suspend() - Print names of enabled clocks
+ * during suspend.
+ */
+static void clock_debug_print_enabled_debug_suspend(struct seq_file *s)
+{
+ struct clk_core *core;
+ int cnt = 0;
+
+ if (!mutex_trylock(&clk_debug_lock))
+ return;
+
+ clock_debug_output(s, 0, "Enabled clocks:\n");
+
+ hlist_for_each_entry(core, &clk_debug_list, debug_node) {
+ if (!core || !core->prepare_count)
+ continue;
+
+ if (core->vdd_class)
+ clock_debug_output(s, 0, " %s:%u:%u [%ld, %d]",
+ core->name, core->prepare_count,
+ core->enable_count, core->rate,
+ clk_find_vdd_level(core, core->rate));
+
+ else
+ clock_debug_output(s, 0, " %s:%u:%u [%ld]",
+ core->name, core->prepare_count,
+ core->enable_count, core->rate);
+ cnt++;
+ }
+
+ mutex_unlock(&clk_debug_lock);
+
+ if (cnt)
+ clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+ else
+ clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
{
char *start = "";
struct clk *clk;
@@ -3005,7 +3044,7 @@
if (likely(!debug_suspend))
return;
- clock_debug_print_enabled_clocks(NULL);
+ clock_debug_print_enabled_debug_suspend(NULL);
}
EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index f0db049..b52aa25 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -10,6 +10,7 @@
*/
struct clk_hw;
+struct clk_core;
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 86e148d..1984d4a 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -2059,7 +2059,7 @@
{
return platform_driver_register(&cam_cc_sdm845_driver);
}
-core_initcall(cam_cc_sdm845_init);
+subsys_initcall(cam_cc_sdm845_init);
static void __exit cam_cc_sdm845_exit(void)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 4d3b427..e7d3ee4 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -815,7 +815,7 @@
return -EINVAL;
return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ pll->width, CLK_DIVIDER_ROUND_KHZ);
}
static int clk_generic_pll_postdiv_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
index f6aeb19..ff229fb 100644
--- a/drivers/clk/qcom/clk-aop-qmp.c
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -30,7 +30,7 @@
void *data;
};
-#define DEFINE_CLK_AOP_QMP(_name, _class, _res, _estate, _dstate) \
+#define DEFINE_CLK_AOP_QMP(_name, _class, _res, _estate, _dstate, _flags) \
static struct clk_aop_qmp _name = { \
.msg.class = #_class, \
.msg.res = #_res, \
@@ -40,7 +40,7 @@
.ops = &aop_qmp_clk_ops, \
.name = #_name, \
.num_parents = 0, \
- .flags = CLK_ENABLE_HAND_OFF, \
+ .flags = _flags, \
}, \
}
@@ -214,13 +214,25 @@
.is_enabled = clk_aop_qmp_is_enabled,
};
-DEFINE_CLK_AOP_QMP(qdss_qmp_clk, clock, qdss,
- QDSS_CLK_LEVEL_DYNAMIC, QDSS_CLK_LEVEL_OFF);
+DEFINE_CLK_AOP_QMP(qdss_qmp_clk, clock, qdss, QDSS_CLK_LEVEL_DYNAMIC,
+ QDSS_CLK_LEVEL_OFF, CLK_ENABLE_HAND_OFF);
+DEFINE_CLK_AOP_QMP(qdss_ao_qmp_clk, clock, qdss_ao, QDSS_CLK_LEVEL_DYNAMIC,
+ QDSS_CLK_LEVEL_OFF, 0);
static struct clk_hw *aop_qmp_clk_hws[] = {
[QDSS_CLK] = &qdss_qmp_clk.hw,
+ [QDSS_AO_CLK] = &qdss_ao_qmp_clk.hw,
};
+/*
+ * Due to HW limitations on v1, the qdss_ao clock was not supported by the clock
+ * driver on AOP.
+ */
+static void aop_qmp_fixup_v1(void)
+{
+ aop_qmp_clk_hws[QDSS_AO_CLK] = NULL;
+}
+
static int qmp_update_client(struct clk_hw *hw, struct device *dev,
struct mbox_chan *mbox)
{
@@ -250,7 +262,7 @@
static int aop_qmp_clk_probe(struct platform_device *pdev)
{
- struct clk *clk;
+ struct clk *clk = NULL;
struct device_node *np = pdev->dev.of_node;
struct mbox_chan *mbox = NULL;
int num_clks = ARRAY_SIZE(aop_qmp_clk_hws);
@@ -264,7 +276,12 @@
if (ret < 0)
return ret;
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,aop-qmp-clk-v1"))
+ aop_qmp_fixup_v1();
+
for (i = 1; i < num_clks; i++) {
+ if (!aop_qmp_clk_hws[i])
+ continue;
ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to update QMP client %d\n",
@@ -273,13 +290,17 @@
}
}
- for (i = 0; i < num_clks; i++) {
- ret = clk_aop_qmp_prepare(aop_qmp_clk_hws[i]);
- if (ret < 0)
- goto fail;
- }
+ /*
+ * Proxy vote on the QDSS clock. This is needed to avoid issues with
+ * excessive requests on the QMP layer during the QDSS driver probe.
+ */
+ ret = clk_aop_qmp_prepare(&qdss_qmp_clk.hw);
+ if (ret < 0)
+ goto fail;
for (i = 0; i < num_clks; i++) {
+ if (!aop_qmp_clk_hws[i])
+ continue;
clk = devm_clk_register(&pdev->dev, aop_qmp_clk_hws[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -303,7 +324,8 @@
}
static const struct of_device_id aop_qmp_clk_of_match[] = {
- { .compatible = "qcom,aop-qmp-clk", },
+ { .compatible = "qcom,aop-qmp-clk-v1" },
+ { .compatible = "qcom,aop-qmp-clk-v2" },
{}
};
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 3ca8e1c..fa0ca36 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -92,7 +92,7 @@
} else if (br->halt_check == BRANCH_HALT_ENABLE ||
br->halt_check == BRANCH_HALT ||
(enabling && voted)) {
- int count = 200;
+ int count = 500;
while (count-- > 0) {
if (check_halt(br, enabling))
@@ -287,20 +287,30 @@
static int clk_branch2_prepare(struct clk_hw *hw)
{
- struct clk_branch *branch = to_clk_branch(hw);
- struct clk_hw *parent = clk_hw_get_parent(hw);
- unsigned long curr_rate, branch_rate = branch->rate;
+ struct clk_branch *branch;
+ struct clk_hw *parent;
+ unsigned long curr_rate;
int ret = 0;
+ if (!hw)
+ return -EINVAL;
+
+ branch = to_clk_branch(hw);
+ parent = clk_hw_get_parent(hw);
+ if (!branch)
+ return -EINVAL;
+
/*
* Do the rate aggregation and scaling of the RCG in the prepare/
* unprepare functions to avoid potential RPM(/h) communication due to
* votes on the voltage rails.
*/
if (branch->aggr_sibling_rates) {
+ if (!parent)
+ return -EINVAL;
curr_rate = clk_aggregate_rate(hw, parent->core);
- if (branch_rate > curr_rate) {
- ret = clk_set_rate(parent->clk, branch_rate);
+ if (branch->rate > curr_rate) {
+ ret = clk_set_rate(parent->clk, branch->rate);
if (ret)
goto exit;
}
@@ -316,13 +326,23 @@
static void clk_branch2_unprepare(struct clk_hw *hw)
{
- struct clk_branch *branch = to_clk_branch(hw);
- struct clk_hw *parent = clk_hw_get_parent(hw);
- unsigned long curr_rate, new_rate, branch_rate = branch->rate;
+ struct clk_branch *branch;
+ struct clk_hw *parent;
+ unsigned long curr_rate, new_rate;
+
+ if (!hw)
+ return;
+
+ branch = to_clk_branch(hw);
+ parent = clk_hw_get_parent(hw);
+ if (!branch)
+ return;
if (branch->aggr_sibling_rates) {
+ if (!parent)
+ return;
new_rate = clk_aggregate_rate(hw, parent->core);
- curr_rate = max(new_rate, branch_rate);
+ curr_rate = max(new_rate, branch->rate);
if (new_rate < curr_rate)
if (clk_set_rate(parent->clk, new_rate))
pr_err("Failed to scale %s to %lu\n",
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 7aef887..f93aba1 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -45,6 +45,7 @@
#include "clk-debug.h"
#define OSM_INIT_RATE 300000000UL
+#define XO_RATE 19200000UL
#define OSM_TABLE_SIZE 40
#define SINGLE_CORE 1
#define MAX_CLUSTER_CNT 3
@@ -65,6 +66,7 @@
#define PERFCL_EFUSE_MASK 0x7
#define ENABLE_REG 0x0
+#define ENABLE_OSM BIT(0)
#define FREQ_REG 0x110
#define VOLT_REG 0x114
#define OVERRIDE_REG 0x118
@@ -121,6 +123,7 @@
#define MIN_VCO_VAL 0x2b
#define MAX_VC 63
+#define MEM_ACC_LEVELS_LUT 2
#define MAX_MEM_ACC_LEVELS 3
#define MAX_MEM_ACC_VAL_PER_LEVEL 3
#define MAX_MEM_ACC_VALUES (MAX_MEM_ACC_LEVELS * \
@@ -150,12 +153,17 @@
#define DATA_MEM(n) (0x400 + (n) * 4)
-#define DCVS_PERF_STATE_DESIRED_REG_0 0x780
-#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
- (4 * n))
-#define OSM_CYCLE_COUNTER_STATUS_REG_0 0x7d0
-#define OSM_CYCLE_COUNTER_STATUS_REG(n) (OSM_CYCLE_COUNTER_STATUS_REG_0 + \
- (4 * n))
+#define DCVS_PERF_STATE_DESIRED_REG_0_V1 0x780
+#define DCVS_PERF_STATE_DESIRED_REG_0_V2 0x920
+#define DCVS_PERF_STATE_DESIRED_REG(n, v2) \
+ (((v2) ? DCVS_PERF_STATE_DESIRED_REG_0_V2 \
+ : DCVS_PERF_STATE_DESIRED_REG_0_V1) + 4 * (n))
+
+#define OSM_CYCLE_COUNTER_STATUS_REG_0_V1 0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG_0_V2 0x9c0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n, v2) \
+ (((v2) ? OSM_CYCLE_COUNTER_STATUS_REG_0_V2 \
+ : OSM_CYCLE_COUNTER_STATUS_REG_0_V1) + 4 * (n))
/* ACD registers */
#define ACD_HW_VERSION 0x0
@@ -260,6 +268,7 @@
u32 speedbin;
u32 mem_acc_crossover_vc_addr;
u32 mem_acc_addr[MEM_ACC_ADDRS];
+ u32 mem_acc_level_vc[MEM_ACC_LEVELS_LUT];
u32 ramp_ctl_addr;
u32 apm_mode_ctl;
u32 apm_status_ctl;
@@ -444,6 +453,9 @@
return 0;
}
+static bool is_v2;
+static bool osm_tz_enabled;
+
static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
{
return container_of(_hw, struct clk_osm, hw);
@@ -511,6 +523,9 @@
int i;
unsigned long rrate = 0;
+ if (!hw)
+ return -EINVAL;
+
/*
* If the rate passed in is 0, return the first frequency in the
* FMAX table.
@@ -534,23 +549,12 @@
static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
{
- int quad_core_index, single_core_index = 0;
- int core_count;
+ int index = 0;
- for (quad_core_index = 0; quad_core_index < entries;
- quad_core_index++) {
- core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
- if (rate == table[quad_core_index].frequency &&
- core_count == SINGLE_CORE) {
- single_core_index = quad_core_index;
- continue;
- }
- if (rate == table[quad_core_index].frequency &&
- core_count == MAX_CORE_COUNT)
- return quad_core_index;
+ for (index = 0; index < entries; index++) {
+ if (rate == table[index].frequency)
+ return index;
}
- if (single_core_index)
- return single_core_index;
return -EINVAL;
}
@@ -573,55 +577,11 @@
}
const struct clk_ops clk_ops_cpu_osm = {
- .enable = clk_osm_enable,
.round_rate = clk_osm_round_rate,
.list_rate = clk_osm_list_rate,
.debug_init = clk_debug_measure_add,
};
-static struct clk_ops clk_ops_core;
-
-static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_osm *cpuclk = to_clk_osm(hw);
- struct clk_hw *p_hw = clk_hw_get_parent(hw);
- struct clk_osm *parent = to_clk_osm(p_hw);
- int index = 0;
- unsigned long r_rate;
-
- if (!cpuclk || !parent)
- return -EINVAL;
-
- r_rate = clk_osm_round_rate(p_hw, rate, NULL);
-
- if (rate != r_rate) {
- pr_err("invalid requested rate=%ld\n", rate);
- return -EINVAL;
- }
-
- /* Convert rate to table index */
- index = clk_osm_search_table(parent->osm_table,
- parent->num_entries, r_rate);
- if (index < 0) {
- pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
- return -EINVAL;
- }
- pr_debug("rate: %lu --> index %d\n", rate, index);
- /*
- * Choose index and send request to OSM hardware.
- * TODO: Program INACTIVE_OS_REQUEST if needed.
- */
- clk_osm_write_reg(parent, index,
- DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num),
- OSM_BASE);
-
- /* Make sure the write goes through before proceeding */
- clk_osm_mb(parent, OSM_BASE);
-
- return 0;
-}
-
static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -648,8 +608,8 @@
}
pr_debug("rate: %lu --> index %d\n", rate, index);
- clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0,
- OSM_BASE);
+ clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG(0, is_v2),
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(cpuclk, OSM_BASE);
@@ -657,38 +617,6 @@
return 0;
}
-static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_hw *parent_hw = clk_hw_get_parent(hw);
-
- if (!parent_hw)
- return -EINVAL;
-
- return clk_hw_round_rate(parent_hw, rate);
-}
-
-static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct clk_osm *cpuclk = to_clk_osm(hw);
- struct clk_hw *p_hw = clk_hw_get_parent(hw);
- struct clk_osm *parent = to_clk_osm(p_hw);
- int index = 0;
-
- if (!cpuclk || !parent)
- return -EINVAL;
-
- index = clk_osm_read_reg(parent,
- DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
-
- pr_debug("%s: Index %d, freq %ld\n", __func__, index,
- parent->osm_table[index].frequency);
-
- /* Convert index to frequency */
- return parent->osm_table[index].frequency;
-}
-
static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -698,7 +626,7 @@
if (!cpuclk)
return -EINVAL;
- index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+ index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG(0, is_v2));
pr_debug("%s: Index %d, freq %ld\n", __func__, index,
cpuclk->osm_table[index].frequency);
@@ -708,7 +636,7 @@
}
-const struct clk_ops clk_ops_l3_osm = {
+static struct clk_ops clk_ops_l3_osm = {
.enable = clk_osm_enable,
.round_rate = clk_osm_round_rate,
.list_rate = clk_osm_list_rate,
@@ -759,7 +687,7 @@
.name = "cpu0_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -771,8 +699,7 @@
.name = "cpu1_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -784,8 +711,7 @@
.name = "cpu2_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -797,8 +723,7 @@
.name = "cpu3_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -816,7 +741,7 @@
.name = "cpu4_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -828,8 +753,7 @@
.name = "cpu5_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -841,8 +765,7 @@
.name = "cpu6_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -854,8 +777,7 @@
.name = "cpu7_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_ops_core,
+ .ops = &clk_dummy_ops,
},
};
@@ -972,7 +894,8 @@
static void
osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
{
- clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+ clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num, is_v2),
+ OSM_BASE);
/* Make sure the write goes through before proceeding */
clk_osm_mb(c, OSM_BASE);
@@ -997,8 +920,8 @@
return 0;
c = policy->driver_data;
- index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
-
+ index = clk_osm_read_reg(c,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num, is_v2));
return policy->freq_table[index].frequency;
}
@@ -1125,21 +1048,6 @@
static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
{
- int curr_level, i, j = 0;
- int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {MAX_VC, MAX_VC, MAX_VC};
-
- curr_level = c->osm_table[0].mem_acc_level;
- for (i = 0; i < c->num_entries; i++) {
- if (curr_level == MAX_MEM_ACC_LEVELS)
- break;
-
- if (c->osm_table[i].mem_acc_level != curr_level) {
- mem_acc_level_map[j++] =
- c->osm_table[i].virtual_corner;
- curr_level = c->osm_table[i].mem_acc_level;
- }
- }
-
if (c->secure_init) {
clk_osm_write_seq_reg(c,
c->pbases[OSM_BASE] + MEMACC_CROSSOVER_VC,
@@ -1149,13 +1057,8 @@
clk_osm_write_seq_reg(c, c->mem_acc_addr[2], DATA_MEM(50));
clk_osm_write_seq_reg(c, c->mem_acc_crossover_vc,
DATA_MEM(78));
- clk_osm_write_seq_reg(c, mem_acc_level_map[0], DATA_MEM(79));
- if (c == &perfcl_clk)
- clk_osm_write_seq_reg(c, c->mem_acc_threshold_vc,
- DATA_MEM(80));
- else
- clk_osm_write_seq_reg(c, mem_acc_level_map[1],
- DATA_MEM(80));
+ clk_osm_write_seq_reg(c, c->mem_acc_level_vc[0], DATA_MEM(79));
+ clk_osm_write_seq_reg(c, c->mem_acc_level_vc[1], DATA_MEM(80));
/*
* Note that DATA_MEM[81] -> DATA_MEM[89] values will be
* confirmed post-si. Use a value of 1 for DATA_MEM[89] and
@@ -1166,13 +1069,9 @@
scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(78),
c->mem_acc_crossover_vc);
scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(79),
- mem_acc_level_map[0]);
- if (c == &perfcl_clk)
- scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
- c->mem_acc_threshold_vc);
- else
- scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
- mem_acc_level_map[1]);
+ c->mem_acc_level_vc[0]);
+ scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+ c->mem_acc_level_vc[1]);
}
}
@@ -1749,7 +1648,8 @@
/* Program LVAL corresponding to first turbo VC */
for (i = 0; i < c->num_entries; i++) {
- if (c->osm_table[i].mem_acc_level == MAX_MEM_ACC_LEVELS) {
+ if (c->osm_table[i].virtual_corner ==
+ c->mem_acc_level_vc[1]) {
lval = c->osm_table[i].freq_data & GENMASK(7, 0);
break;
}
@@ -1954,7 +1854,9 @@
static u64 clk_osm_get_cpu_cycle_counter(int cpu)
{
u32 val;
+ int core_num;
unsigned long flags;
+ u64 cycle_counter_ret;
struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
if (IS_ERR_OR_NULL(c)) {
@@ -1969,12 +1871,9 @@
* Use core 0's copy as proxy for the whole cluster when per
* core DCVS is disabled.
*/
- if (parent->per_core_dcvs)
- val = clk_osm_read_reg_no_log(parent,
- OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
- else
- val = clk_osm_read_reg_no_log(parent,
- OSM_CYCLE_COUNTER_STATUS_REG(0));
+ core_num = parent->per_core_dcvs ? c->core_num : 0;
+ val = clk_osm_read_reg_no_log(parent,
+ OSM_CYCLE_COUNTER_STATUS_REG(core_num, is_v2));
if (val < c->prev_cycle_counter) {
/* Handle counter overflow */
@@ -1985,9 +1884,10 @@
c->total_cycle_counter += val - c->prev_cycle_counter;
c->prev_cycle_counter = val;
}
+ cycle_counter_ret = c->total_cycle_counter;
spin_unlock_irqrestore(&parent->lock, flags);
- return c->total_cycle_counter;
+ return cycle_counter_ret;
}
static void clk_osm_setup_cycle_counters(struct clk_osm *c)
@@ -2143,6 +2043,10 @@
c->osm_table[j].override_data,
c->osm_table[j].mem_acc_level);
+ data = (array[i + FREQ_DATA] & GENMASK(29, 28)) >> 28;
+ if (j && !c->min_cpr_vc && !data)
+ c->min_cpr_vc = c->osm_table[j].virtual_corner;
+
data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
if (!last_entry && data == MAX_CORE_COUNT) {
fmax_temp[k] = array[i];
@@ -2176,6 +2080,49 @@
return rc;
}
+static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
+{
+ u32 data, src, lval, i, j = OSM_TABLE_SIZE;
+
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+ src = ((data & GENMASK(31, 30)) >> 30);
+ lval = (data & GENMASK(7, 0));
+
+ if (!src)
+ c->osm_table[i].frequency = OSM_INIT_RATE;
+ else
+ c->osm_table[i].frequency = XO_RATE * lval;
+
+ data = clk_osm_read_reg(c, VOLT_REG + i * OSM_REG_SIZE);
+ c->osm_table[i].virtual_corner =
+ ((data & GENMASK(21, 16)) >> 16);
+ c->osm_table[i].open_loop_volt = (data & GENMASK(11, 0));
+
+ pr_debug("index=%d freq=%ld virtual_corner=%d open_loop_voltage=%u\n",
+ i, c->osm_table[i].frequency,
+ c->osm_table[i].virtual_corner,
+ c->osm_table[i].open_loop_volt);
+
+ if (i > 0 && j == OSM_TABLE_SIZE && c->osm_table[i].frequency ==
+ c->osm_table[i - 1].frequency)
+ j = i;
+ }
+
+ osm_clks_init[c->cluster_num].rate_max = devm_kcalloc(&pdev->dev,
+ j, sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!osm_clks_init[c->cluster_num].rate_max)
+ return -ENOMEM;
+
+ for (i = 0; i < j; i++)
+ osm_clks_init[c->cluster_num].rate_max[i] =
+ c->osm_table[i].frequency;
+
+ c->num_entries = osm_clks_init[c->cluster_num].num_rate_max = j;
+ return 0;
+}
+
static int clk_osm_parse_acd_dt_configs(struct platform_device *pdev)
{
struct device_node *of = pdev->dev.of_node;
@@ -2323,11 +2270,9 @@
{
struct device_node *of = pdev->dev.of_node;
u32 *array;
+ char memacc_str[40];
int rc = 0;
struct resource *res;
- char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
- char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
- char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
GFP_KERNEL);
@@ -2545,33 +2490,34 @@
return -ENOMEM;
}
- snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
- "qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
- rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
+ snprintf(memacc_str, ARRAY_SIZE(memacc_str),
+ "qcom,l3-memacc-level-vc-bin%d", l3_clk.speedbin);
+ rc = of_property_read_u32_array(of, memacc_str, l3_clk.mem_acc_level_vc,
+ MEM_ACC_LEVELS_LUT);
if (rc) {
dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- l3_min_cpr_vc_str, rc);
- return -EINVAL;
+ memacc_str, rc);
+ return rc;
}
- snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
- "qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
- rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
- &pwrcl_clk.min_cpr_vc);
+ snprintf(memacc_str, ARRAY_SIZE(memacc_str),
+ "qcom,pwrcl-memacc-level-vc-bin%d", pwrcl_clk.speedbin);
+ rc = of_property_read_u32_array(of, memacc_str,
+ pwrcl_clk.mem_acc_level_vc, MEM_ACC_LEVELS_LUT);
if (rc) {
dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- pwrcl_min_cpr_vc_str, rc);
- return -EINVAL;
+ memacc_str, rc);
+ return rc;
}
- snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
- "qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
- rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
- &perfcl_clk.min_cpr_vc);
+ snprintf(memacc_str, ARRAY_SIZE(memacc_str),
+ "qcom,perfcl-memacc-level-vc-bin%d", pwrcl_clk.speedbin);
+ rc = of_property_read_u32_array(of, memacc_str,
+ perfcl_clk.mem_acc_level_vc, MEM_ACC_LEVELS_LUT);
if (rc) {
dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
- perfcl_min_cpr_vc_str, rc);
- return -EINVAL;
+ memacc_str, rc);
+ return rc;
}
l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
@@ -2683,6 +2629,12 @@
return -ENOMEM;
}
+ /* Check if OSM has been enabled already by trustzone. */
+ if (readl_relaxed(l3_clk.vbases[OSM_BASE] + ENABLE_REG) & ENABLE_OSM) {
+ dev_info(&pdev->dev, "OSM has been initialized and enabled by TZ software\n");
+ osm_tz_enabled = true;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"osm_pwrcl_base");
if (!res) {
@@ -2716,6 +2668,9 @@
return -ENOMEM;
}
+ if (osm_tz_enabled)
+ return rc;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
if (!res) {
dev_err(&pdev->dev,
@@ -3107,6 +3062,9 @@
return PTR_ERR(ext_xo_clk);
}
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,clk-cpu-osm-v2");
+
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
if (!clk_data)
@@ -3119,18 +3077,6 @@
clk_data->clk_num = num_clks;
- rc = clk_osm_parse_dt_configs(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
- return rc;
- }
-
- rc = clk_osm_parse_acd_dt_configs(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
- return rc;
- }
-
rc = clk_osm_resources_init(pdev);
if (rc) {
if (rc != -EPROBE_DEFER)
@@ -3139,234 +3085,282 @@
return rc;
}
- rc = clk_osm_acd_resources_init(pdev);
- if (rc) {
- dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
- rc);
- return rc;
- }
-
- if (l3_clk.vbases[EFUSE_BASE]) {
- /* Multiple speed-bins are supported */
- pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
- l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
- L3_EFUSE_MASK);
- snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+ if (!osm_tz_enabled) {
+ if (l3_clk.vbases[EFUSE_BASE]) {
+ /* Multiple speed-bins are supported */
+ pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+ l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+ L3_EFUSE_MASK);
+ snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
"qcom,l3-speedbin%d-v%d", l3_clk.speedbin, pvs_ver);
- }
+ }
- dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
- l3_clk.speedbin, pvs_ver);
+ dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+ l3_clk.speedbin, pvs_ver);
- rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
- if (rc) {
- dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
- rc);
- return rc;
- }
+ rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+ rc);
+ return rc;
+ }
- if (pwrcl_clk.vbases[EFUSE_BASE]) {
- /* Multiple speed-bins are supported */
- pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
- pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
- PWRCL_EFUSE_MASK);
- snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+ if (pwrcl_clk.vbases[EFUSE_BASE]) {
+ /* Multiple speed-bins are supported */
+ pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+ pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+ PWRCL_EFUSE_MASK);
+ snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
"qcom,pwrcl-speedbin%d-v%d", pwrcl_clk.speedbin,
pvs_ver);
- }
+ }
- dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
- pwrcl_clk.speedbin, pvs_ver);
+ dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+ pwrcl_clk.speedbin, pvs_ver);
- rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
- if (rc) {
- dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
- rc);
- return rc;
- }
-
- if (perfcl_clk.vbases[EFUSE_BASE]) {
- /* Multiple speed-bins are supported */
- pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
- perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
- PERFCL_EFUSE_MASK);
- snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
- "qcom,perfcl-speedbin%d-v%d", perfcl_clk.speedbin,
- pvs_ver);
- }
-
- dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
- perfcl_clk.speedbin, pvs_ver);
-
- rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
- if (rc) {
- dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
- rc);
- return rc;
- }
-
- rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
- if (rc) {
- if (rc == -EPROBE_DEFER)
+ rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+ rc);
return rc;
- dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
- rc);
- return rc;
- }
+ }
- rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
- if (rc) {
- if (rc == -EPROBE_DEFER)
+ if (perfcl_clk.vbases[EFUSE_BASE]) {
+ /* Multiple speed-bins are supported */
+ pte_efuse =
+ readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+ perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT)
+ & PERFCL_EFUSE_MASK);
+ snprintf(perfclspeedbinstr,
+ ARRAY_SIZE(perfclspeedbinstr),
+ "qcom,perfcl-speedbin%d-v%d",
+ perfcl_clk.speedbin, pvs_ver);
+ }
+
+ dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+ perfcl_clk.speedbin, pvs_ver);
+
+ rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+ rc);
return rc;
- dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
- rc);
- return rc;
- }
+ }
- rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
- if (rc) {
- if (rc == -EPROBE_DEFER)
+ rc = clk_osm_parse_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
return rc;
- dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
- rc);
- return rc;
- }
+ }
- rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
- if (rc)
- dev_info(&pdev->dev,
- "No APM crossover corner programmed for L3\n");
+ rc = clk_osm_parse_acd_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
+ return rc;
+ }
- rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
- if (rc)
- dev_info(&pdev->dev,
- "No APM crossover corner programmed for pwrcl_clk\n");
+ rc = clk_osm_acd_resources_init(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+ rc);
+ return rc;
+ }
- rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
- if (rc)
- dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+ rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
- clk_osm_setup_cycle_counters(&l3_clk);
- clk_osm_setup_cycle_counters(&pwrcl_clk);
- clk_osm_setup_cycle_counters(&perfcl_clk);
+ rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+ if (rc)
+ dev_info(&pdev->dev,
+ "No APM crossover corner programmed for L3\n");
+ rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+ if (rc)
+ dev_info(&pdev->dev,
+ "No APM crossover corner programmed for pwrcl_clk\n");
+ rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+ if (rc)
+ dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
- clk_osm_print_osm_table(&l3_clk);
- clk_osm_print_osm_table(&pwrcl_clk);
- clk_osm_print_osm_table(&perfcl_clk);
+ clk_osm_setup_cycle_counters(&l3_clk);
+ clk_osm_setup_cycle_counters(&pwrcl_clk);
+ clk_osm_setup_cycle_counters(&perfcl_clk);
- rc = clk_osm_setup_hw_table(&l3_clk);
- if (rc) {
- dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
- goto exit;
- }
- rc = clk_osm_setup_hw_table(&pwrcl_clk);
- if (rc) {
- dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
- goto exit;
- }
- rc = clk_osm_setup_hw_table(&perfcl_clk);
- if (rc) {
- dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
- goto exit;
- }
+ clk_osm_print_osm_table(&l3_clk);
+ clk_osm_print_osm_table(&pwrcl_clk);
+ clk_osm_print_osm_table(&perfcl_clk);
- /* Policy tuning */
- rc = clk_osm_set_cc_policy(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "cc policy setup failed");
- goto exit;
- }
+ rc = clk_osm_setup_hw_table(&l3_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+ goto exit;
+ }
+ rc = clk_osm_setup_hw_table(&pwrcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+ goto exit;
+ }
+ rc = clk_osm_setup_hw_table(&perfcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+ goto exit;
+ }
- /* LLM Freq Policy Tuning */
- rc = clk_osm_set_llm_freq_policy(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
- goto exit;
- }
+ /* Policy tuning */
+ rc = clk_osm_set_cc_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cc policy setup failed");
+ goto exit;
+ }
- /* LLM Voltage Policy Tuning */
- rc = clk_osm_set_llm_volt_policy(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
- goto exit;
- }
+ /* LLM Freq Policy Tuning */
+ rc = clk_osm_set_llm_freq_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+ goto exit;
+ }
- clk_osm_setup_fsms(&l3_clk);
- clk_osm_setup_fsms(&pwrcl_clk);
- clk_osm_setup_fsms(&perfcl_clk);
+ /* LLM Voltage Policy Tuning */
+ rc = clk_osm_set_llm_volt_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+ goto exit;
+ }
- /* Program VC at which the array power supply needs to be switched */
- clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+ clk_osm_setup_fsms(&l3_clk);
+ clk_osm_setup_fsms(&pwrcl_clk);
+ clk_osm_setup_fsms(&perfcl_clk);
+
+ /*
+ * Program the VC at which the array power supply
+ * needs to be switched.
+ */
+ clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
APM_CROSSOVER_VC, OSM_BASE);
- if (perfcl_clk.secure_init) {
- clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
- DATA_MEM(77));
- clk_osm_write_seq_reg(&perfcl_clk,
+ if (perfcl_clk.secure_init) {
+ clk_osm_write_seq_reg(&perfcl_clk,
+ perfcl_clk.apm_crossover_vc, DATA_MEM(77));
+ clk_osm_write_seq_reg(&perfcl_clk,
(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
DATA_MEM(111));
- } else {
- scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
- perfcl_clk.apm_crossover_vc);
- scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+ } else {
+ scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+ perfcl_clk.apm_crossover_vc);
+ scm_io_write(perfcl_clk.pbases[SEQ_BASE] +
+ DATA_MEM(111),
(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
- }
+ }
- /*
- * Perform typical secure-world HW initialization
- * as necessary.
- */
- clk_osm_do_additional_setup(&l3_clk, pdev);
- clk_osm_do_additional_setup(&pwrcl_clk, pdev);
- clk_osm_do_additional_setup(&perfcl_clk, pdev);
+ /*
+ * Perform typical secure-world HW initialization
+ * as necessary.
+ */
+ clk_osm_do_additional_setup(&l3_clk, pdev);
+ clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+ clk_osm_do_additional_setup(&perfcl_clk, pdev);
- /* MEM-ACC Programming */
- clk_osm_program_mem_acc_regs(&l3_clk);
- clk_osm_program_mem_acc_regs(&pwrcl_clk);
- clk_osm_program_mem_acc_regs(&perfcl_clk);
+ /* MEM-ACC Programming */
+ clk_osm_program_mem_acc_regs(&l3_clk);
+ clk_osm_program_mem_acc_regs(&pwrcl_clk);
+ clk_osm_program_mem_acc_regs(&perfcl_clk);
- if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
- clk_osm_setup_cluster_pll(&l3_clk);
- clk_osm_setup_cluster_pll(&pwrcl_clk);
- clk_osm_setup_cluster_pll(&perfcl_clk);
- }
+ if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,osm-pll-setup")) {
+ clk_osm_setup_cluster_pll(&l3_clk);
+ clk_osm_setup_cluster_pll(&pwrcl_clk);
+ clk_osm_setup_cluster_pll(&perfcl_clk);
+ }
- /* Misc programming */
- clk_osm_misc_programming(&l3_clk);
- clk_osm_misc_programming(&pwrcl_clk);
- clk_osm_misc_programming(&perfcl_clk);
+ /* Misc programming */
+ clk_osm_misc_programming(&l3_clk);
+ clk_osm_misc_programming(&pwrcl_clk);
+ clk_osm_misc_programming(&perfcl_clk);
- pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+ rc = clk_osm_acd_init(&l3_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
+ goto exit;
+ }
+ rc = clk_osm_acd_init(&pwrcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for pwrcl, rc=%d\n",
+ rc);
+ goto exit;
+ }
+ rc = clk_osm_acd_init(&perfcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for perfcl, rc=%d\n",
+ rc);
+ goto exit;
+ }
+
+ pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
of_property_read_bool(pdev->dev.of_node,
"qcom,enable-per-core-dcvs");
- if (pwrcl_clk.per_core_dcvs) {
+ if (pwrcl_clk.per_core_dcvs) {
+ val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+ val |= BIT(0);
+ clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL,
+ OSM_BASE);
+ val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+ val |= BIT(0);
+ clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL,
+ OSM_BASE);
+ }
+ } else {
+ /* OSM has been enabled already by trustzone */
+ rc = clk_osm_read_lut(pdev, &l3_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to read OSM LUT for L3, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_read_lut(pdev, &pwrcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to read OSM LUT for power cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_read_lut(pdev, &perfcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to read OSM LUT for perf cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Check if per-core DCVS is enabled/not */
val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
- val |= BIT(0);
- clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
+ if (val && BIT(0))
+ pwrcl_clk.per_core_dcvs = true;
val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
- val |= BIT(0);
- clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
- }
+ if (val && BIT(0))
+ perfcl_clk.per_core_dcvs = true;
- clk_ops_core = clk_dummy_ops;
- clk_ops_core.set_rate = cpu_clk_set_rate;
- clk_ops_core.round_rate = cpu_clk_round_rate;
- clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
-
- rc = clk_osm_acd_init(&l3_clk);
- if (rc) {
- pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
- goto exit;
- }
- rc = clk_osm_acd_init(&pwrcl_clk);
- if (rc) {
- pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
- goto exit;
- }
- rc = clk_osm_acd_init(&perfcl_clk);
- if (rc) {
- pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
- goto exit;
+ clk_ops_l3_osm.enable = NULL;
}
spin_lock_init(&l3_clk.lock);
@@ -3393,27 +3387,36 @@
get_online_cpus();
- /* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
+ if (!osm_tz_enabled) {
+ populate_debugfs_dir(&l3_clk);
+ populate_debugfs_dir(&pwrcl_clk);
+ populate_debugfs_dir(&perfcl_clk);
+
+ /* Configure default rate to lowest frequency */
+ for (i = 0; i < MAX_CORE_COUNT; i++) {
+ osm_set_index(&pwrcl_clk, 0, i);
+ osm_set_index(&perfcl_clk, 0, i);
+ }
+ }
+ /*
+ * Set the L3 clock to run off GPLL0 and enable OSM for the domain.
+ * In the case that trustzone has already enabled OSM, bring the L3
+ * clock rate to a safe level until the devfreq driver comes up and
+ * votes for its desired frequency.
+ */
rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
if (rc) {
dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
rc);
goto provider_err;
}
- WARN(clk_prepare_enable(l3_clk.hw.clk),
- "clk: Failed to enable clock for L3\n");
+ WARN(clk_prepare_enable(l3_cluster0_vote_clk.hw.clk),
+ "clk: Failed to enable cluster0 clock for L3\n");
+ WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk),
+ "clk: Failed to enable cluster1 clock for L3\n");
udelay(300);
- /* Configure default rate to lowest frequency */
- for (i = 0; i < MAX_CORE_COUNT; i++) {
- osm_set_index(&pwrcl_clk, 0, i);
- osm_set_index(&perfcl_clk, 0, i);
- }
-
populate_opp_table(pdev);
- populate_debugfs_dir(&l3_clk);
- populate_debugfs_dir(&pwrcl_clk);
- populate_debugfs_dir(&perfcl_clk);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
register_cpu_cycle_counter_cb(&cb);
@@ -3439,6 +3442,7 @@
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,clk-cpu-osm" },
+ { .compatible = "qcom,clk-cpu-osm-v2" },
{}
};
@@ -3455,7 +3459,7 @@
{
return platform_driver_register(&clk_cpu_osm_driver);
}
-arch_initcall(clk_cpu_osm_init);
+subsys_initcall(clk_cpu_osm_init);
static void __exit clk_cpu_osm_exit(void)
{
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b63c3c3..7382cfa 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -138,9 +138,6 @@
int ret;
u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
- if (rcg->flags & DFS_ENABLE_RCG)
- return 0;
-
ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
CFG_SRC_SEL_MASK, cfg);
if (ret)
@@ -350,8 +347,9 @@
struct clk_hw *hw = &rcg->clkr.hw;
int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ /* Skip configuration if DFS control has been enabled for the RCG. */
if (rcg->flags & DFS_ENABLE_RCG)
- return -EPERM;
+ return 0;
if (index < 0)
return index;
@@ -481,7 +479,7 @@
}
ret = clk_rcg2_configure(rcg, f);
- if (ret && ret != -EPERM)
+ if (ret)
return ret;
if (rcg->flags & FORCE_ENABLE_RCG) {
@@ -934,10 +932,11 @@
EXPORT_SYMBOL_GPL(clk_byte2_ops);
static const struct frac_entry frac_table_pixel[] = {
+ { 1, 1 },
+ { 2, 3 },
+ { 4, 9 },
{ 3, 8 },
{ 2, 9 },
- { 4, 9 },
- { 1, 1 },
{ }
};
@@ -1028,6 +1027,7 @@
unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
struct freq_tbl f = { 0 };
unsigned long src_rate;
unsigned long num, den;
@@ -1035,7 +1035,12 @@
u32 hid_div, cfg;
int i, num_parents = clk_hw_get_num_parents(hw);
- src_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+ if (!parent) {
+ pr_err("RCG parent isn't initialized\n");
+ return -EINVAL;
+ }
+
+ src_rate = clk_get_rate(parent->clk);
if (src_rate <= 0) {
pr_err("Invalid RCG parent rate\n");
return -EINVAL;
@@ -1196,13 +1201,15 @@
u32 *mode, u32 *pre_div)
{
struct clk_rcg2 *rcg;
- int num_parents = clk_hw_get_num_parents(hw);
+ int num_parents;
u32 cfg, mask;
int i, ret;
if (!hw)
return -EINVAL;
+ num_parents = clk_hw_get_num_parents(hw);
+
rcg = to_clk_rcg2(hw);
ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + offset, &cfg);
@@ -1348,7 +1355,9 @@
"RCG flags %x\n", i, dfs_freq_tbl[i].freq, dfs_freq_tbl[i].src,
dfs_freq_tbl[i].pre_div, dfs_freq_tbl[i].m,
dfs_freq_tbl[i].n, rcg_flags);
-
+ /* Skip the safe configuration if DFS has been enabled for the RCG. */
+ if (clk->enable_safe_config)
+ clk->enable_safe_config = false;
clk->flags |= rcg_flags;
clk->freq_tbl = dfs_freq_tbl;
err:
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 5348491..c314d2c 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -28,8 +28,10 @@
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST |
+ divider->flags);
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -39,8 +41,9 @@
struct clk_regmap *clkr = ÷r->clkr;
u32 div;
- div = divider_get_val(rate, parent_rate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ div = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, CLK_DIVIDER_ROUND_CLOSEST |
+ divider->flags);
return regmap_update_bits(clkr->regmap, divider->reg,
(BIT(divider->width) - 1) << divider->shift,
@@ -58,8 +61,8 @@
div >>= divider->shift;
div &= BIT(divider->width) - 1;
- return divider_recalc_rate(hw, parent_rate, div, NULL,
- CLK_DIVIDER_ROUND_CLOSEST);
+ return divider_recalc_rate(hw, parent_rate, div, divider->table,
+ CLK_DIVIDER_ROUND_CLOSEST | divider->flags);
}
const struct clk_ops clk_regmap_div_ops = {
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
index fc4492e..1c5e087 100644
--- a/drivers/clk/qcom/clk-regmap-divider.h
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,10 +18,12 @@
#include "clk-regmap.h"
struct clk_regmap_div {
- u32 reg;
- u32 shift;
- u32 width;
- struct clk_regmap clkr;
+ u32 reg;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ const struct clk_div_table *table;
+ struct clk_regmap clkr;
};
extern const struct clk_ops clk_regmap_div_ops;
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d3..aa024c2d 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -103,9 +103,12 @@
*/
int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk)
{
- if (dev && dev_get_regmap(dev, NULL))
+ if (!dev || !rclk)
+ return -EINVAL;
+
+ if (dev_get_regmap(dev, NULL))
rclk->regmap = dev_get_regmap(dev, NULL);
- else if (dev && dev->parent)
+ else if (dev->parent)
rclk->regmap = dev_get_regmap(dev->parent, NULL);
return devm_clk_hw_register(dev, &rclk->hw);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 89bae2e..e1cda90 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -451,7 +451,7 @@
{
return platform_driver_register(&clk_rpmh_driver);
}
-core_initcall(clk_rpmh_init);
+subsys_initcall(clk_rpmh_init);
static void __exit clk_rpmh_exit(void)
{
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index d6ecf12..53bfe77 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1130,7 +1130,7 @@
{
return platform_driver_register(&disp_cc_sdm845_driver);
}
-core_initcall(disp_cc_sdm845_init);
+subsys_initcall(disp_cc_sdm845_init);
static void __exit disp_cc_sdm845_exit(void)
{
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index cd47e14..17b2403 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -53,7 +53,9 @@
P_CORE_BI_PLL_TEST_SE,
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -166,6 +168,36 @@
"core_bi_pll_test_se",
};
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_9[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll1",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
static struct clk_dummy measure_only_snoc_clk = {
.rrate = 1000,
.hw.init = &(struct clk_init_data){
@@ -225,6 +257,28 @@
},
};
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .type = FABIA_PLL,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_fabia_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -460,6 +514,7 @@
F(19200000, P_BI_TCXO, 1, 0, 0),
F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(38400000, P_GPLL0_OUT_EVEN, 1, 16, 125),
F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
@@ -814,7 +869,7 @@
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
{ }
};
@@ -822,12 +877,12 @@
.cmd_rcgr = 0x1400c,
.mnd_width = 8,
.hid_width = 5,
- .parent_map = gcc_parent_map_5,
+ .parent_map = gcc_parent_map_10,
.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_5,
+ .parent_names = gcc_parent_names_10,
.num_parents = 5,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -835,7 +890,7 @@
MIN, 9600000,
LOWER, 19200000,
LOW, 100000000,
- LOW_L1, 200000000),
+ LOW_L1, 201500000),
},
};
@@ -1224,6 +1279,49 @@
},
};
+static struct clk_rcg2 gcc_vs_ctrl_clk_src = {
+ .cmd_rcgr = 0x7a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_vsensor_clk_src",
+ .parent_names = gcc_parent_names_9,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 19200000,
+ LOW, 300000000,
+ LOW_L1, 600000000),
+ },
+};
+
static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = {
.halt_reg = 0x90014,
.halt_check = BRANCH_HALT,
@@ -1347,6 +1445,24 @@
},
};
+static struct clk_branch gcc_apc_vs_clk = {
+ .halt_reg = 0x7a050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apc_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_boot_rom_ahb_clk = {
.halt_reg = 0x38004,
.halt_check = BRANCH_HALT_VOTED,
@@ -1372,6 +1488,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1398,6 +1515,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1492,7 +1610,7 @@
"gcc_cpuss_ahb_clk_src",
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1506,6 +1624,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_dvm_bus_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1521,6 +1640,7 @@
.enable_mask = BIT(22),
.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1567,6 +1687,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1627,6 +1748,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1696,6 +1818,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1774,6 +1897,24 @@
},
};
+static struct clk_branch gcc_gpu_vs_clk = {
+ .halt_reg = 0x7a04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_mss_axis2_clk = {
.halt_reg = 0x8a008,
.halt_check = BRANCH_HALT,
@@ -1855,6 +1996,24 @@
},
};
+static struct clk_branch gcc_mss_vs_clk = {
+ .halt_reg = 0x7a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_0_aux_clk = {
.halt_reg = 0x6b01c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1914,14 +2073,15 @@
},
};
-static struct clk_gate2 gcc_pcie_0_pipe_clk = {
- .udelay = 500,
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x5200c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2013,14 +2173,15 @@
},
};
-static struct clk_gate2 gcc_pcie_1_pipe_clk = {
- .udelay = 500,
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d020,
+ .halt_check = BRANCH_VOTED,
.clkr = {
.enable_reg = 0x52004,
.enable_mask = BIT(30),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2613,7 +2774,7 @@
"gcc_cpuss_ahb_clk_src",
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3322,6 +3483,60 @@
},
};
+static struct clk_branch gcc_vdda_vs_clk = {
+ .halt_reg = 0x7a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vdda_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddcx_vs_clk = {
+ .halt_reg = 0x7a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddcx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vddmx_vs_clk = {
+ .halt_reg = 0x7a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vddmx_vs_clk",
+ .parent_names = (const char *[]){
+ "gcc_vsensor_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_video_ahb_clk = {
.halt_reg = 0xb004,
.halt_check = BRANCH_HALT,
@@ -3332,6 +3547,7 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3358,6 +3574,40 @@
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_video_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_ahb_clk = {
+ .halt_reg = 0x7a014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7a014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vs_ctrl_clk = {
+ .halt_reg = 0x7a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vs_ctrl_clk",
+ .parent_names = (const char *[]){
+ "gcc_vs_ctrl_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -3380,6 +3630,7 @@
&gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
@@ -3413,12 +3664,14 @@
[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
[GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
[GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
[GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
[GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
[GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
@@ -3558,11 +3811,19 @@
[GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
[GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
[GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
[GPLL0] = &gpll0.clkr,
[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3818,22 +4079,9 @@
regmap_update_bits(regmap, GCC_MMSS_MISC, 0x3, 0x3);
regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
- /* Keep these CPUSS clocks enabled always */
- clk_prepare_enable(gcc_cpuss_ahb_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_sys_noc_cpuss_ahb_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_cpuss_dvm_bus_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_cpuss_gnoc_clk.clkr.hw.clk);
-
- /* Keep the core XO clock enabled always */
- clk_prepare_enable(gcc_camera_xo_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_disp_xo_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_video_xo_clk.clkr.hw.clk);
-
- /* Enable for core register access */
- clk_prepare_enable(gcc_gpu_cfg_ahb_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_disp_ahb_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_camera_ahb_clk.clkr.hw.clk);
- clk_prepare_enable(gcc_video_ahb_clk.clkr.hw.clk);
+ /* Keep this clock on all the time on SDM845 v1 */
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-sdm845"))
+ clk_prepare_enable(gcc_aggre_noc_pcie_tbu_clk.clkr.hw.clk);
/* DFS clock registration */
ret = qcom_cc_register_rcg_dfs(pdev, &gcc_sdm845_dfs_desc);
@@ -3856,7 +4104,7 @@
{
return platform_driver_register(&gcc_sdm845_driver);
}
-core_initcall(gcc_sdm845_init);
+subsys_initcall(gcc_sdm845_init);
static void __exit gcc_sdm845_exit(void)
{
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 8442890..1e98e08 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -36,8 +36,12 @@
#include "clk-alpha-pll.h"
#include "vdd-level-sdm845.h"
+#define CX_GMU_CBCR_SLEEP_MASK 0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xF
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define F_SLEW(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
static int vdd_gx_corner[] = {
RPMH_REGULATOR_LEVEL_OFF, /* VDD_GX_NONE */
@@ -67,6 +71,7 @@
P_GPU_CC_PLL1_OUT_EVEN,
P_GPU_CC_PLL1_OUT_MAIN,
P_GPU_CC_PLL1_OUT_ODD,
+ P_CRC_DIV,
};
static const struct parent_map gpu_cc_parent_map_0[] = {
@@ -107,8 +112,28 @@
"core_bi_pll_test_se",
};
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CRC_DIV, 1 },
+ { P_GPU_CC_PLL0_OUT_ODD, 2 },
+ { P_GPU_CC_PLL1_OUT_EVEN, 3 },
+ { P_GPU_CC_PLL1_OUT_ODD, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_2[] = {
+ "bi_tcxo",
+ "crc_div",
+ "gpu_cc_pll0_out_odd",
+ "gpu_cc_pll1_out_even",
+ "gpu_cc_pll1_out_odd",
+ "gcc_gpu_gpll0_clk_src",
+ "core_bi_pll_test_se",
+};
+
static struct pll_vco fabia_vco[] = {
- { 250000000, 2000000000, 0 },
+ { 249600000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
};
@@ -117,6 +142,11 @@
.frac = 0x2aaa,
};
+static const struct pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .frac = 0xaaaa,
+};
+
static struct clk_alpha_pll gpu_cc_pll0 = {
.offset = 0x0,
.vco_table = fabia_vco,
@@ -160,6 +190,26 @@
},
};
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .type = FABIA_PLL,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_fabia_pll_ops,
+ VDD_MX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
@@ -167,6 +217,13 @@
{ }
};
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src_sdm845_v2[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gpu_cc_gmu_clk_src = {
.cmd_rcgr = 0x1120,
.mnd_width = 0,
@@ -186,12 +243,39 @@
},
};
+static struct clk_fixed_factor crc_div = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div",
+ .parent_names = (const char *[]){ "gpu_cc_pll0_out_even" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
- F_SLEW(147000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 294000000),
- F_SLEW(210000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 420000000),
- F_SLEW(338000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 676000000),
- F_SLEW(425000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 850000000),
- F_SLEW(600000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 1200000000),
+ F(147000000, P_CRC_DIV, 1, 0, 0),
+ F(210000000, P_CRC_DIV, 1, 0, 0),
+ F(280000000, P_CRC_DIV, 1, 0, 0),
+ F(338000000, P_CRC_DIV, 1, 0, 0),
+ F(425000000, P_CRC_DIV, 1, 0, 0),
+ F(487000000, P_CRC_DIV, 1, 0, 0),
+ F(548000000, P_CRC_DIV, 1, 0, 0),
+ F(600000000, P_CRC_DIV, 1, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src_sdm845_v2[] = {
+ F(180000000, P_CRC_DIV, 1, 0, 0),
+ F(257000000, P_CRC_DIV, 1, 0, 0),
+ F(342000000, P_CRC_DIV, 1, 0, 0),
+ F(414000000, P_CRC_DIV, 1, 0, 0),
+ F(520000000, P_CRC_DIV, 1, 0, 0),
+ F(596000000, P_CRC_DIV, 1, 0, 0),
+ F(675000000, P_CRC_DIV, 1, 0, 0),
+ F(710000000, P_CRC_DIV, 1, 0, 0),
{ }
};
@@ -199,12 +283,12 @@
.cmd_rcgr = 0x101c,
.mnd_width = 0,
.hid_width = 5,
- .parent_map = gpu_cc_parent_map_1,
+ .parent_map = gpu_cc_parent_map_2,
.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
.flags = FORCE_ENABLE_RCG,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpu_cc_gx_gfx3d_clk_src",
- .parent_names = gpu_cc_parent_names_1,
+ .parent_names = gpu_cc_parent_names_2,
.num_parents = 7,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
@@ -470,10 +554,11 @@
[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = NULL,
};
static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
- [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
[GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
[GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
[GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
@@ -513,16 +598,77 @@
static const struct of_device_id gpu_cc_sdm845_match_table[] = {
{ .compatible = "qcom,gpucc-sdm845" },
+ { .compatible = "qcom,gpucc-sdm845-v2" },
{ }
};
MODULE_DEVICE_TABLE(of, gpu_cc_sdm845_match_table);
static const struct of_device_id gpu_cc_gfx_sdm845_match_table[] = {
{ .compatible = "qcom,gfxcc-sdm845" },
+ { .compatible = "qcom,gfxcc-sdm845-v2" },
{},
};
MODULE_DEVICE_TABLE(of, gpu_cc_gfx_sdm845_match_table);
+static void gpu_cc_sdm845_fixup_sdm845v2(struct regmap *regmap)
+{
+ gpu_cc_sdm845_clocks[GPU_CC_PLL1] = &gpu_cc_pll1.clkr;
+ clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ gpu_cc_gmu_clk_src.freq_tbl = ftbl_gpu_cc_gmu_clk_src_sdm845_v2;
+ gpu_cc_gmu_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 500000000;
+}
+
+static void gpu_cc_gfx_sdm845_fixup_sdm845v2(void)
+{
+ gpu_cc_gx_gfx3d_clk_src.freq_tbl =
+ ftbl_gpu_cc_gx_gfx3d_clk_src_sdm845_v2;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] = 180000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOWER] =
+ 257000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] = 342000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW_L1] =
+ 414000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL] =
+ 520000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL_L1] =
+ 596000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] = 675000000;
+ gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH_L1] =
+ 710000000;
+}
+
+static int gpu_cc_gfx_sdm845_fixup(struct platform_device *pdev)
+{
+ const char *compat = NULL;
+ int compatlen = 0;
+
+ compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+ if (!compat || (compatlen <= 0))
+ return -EINVAL;
+
+ if (!strcmp(compat, "qcom,gfxcc-sdm845-v2"))
+ gpu_cc_gfx_sdm845_fixup_sdm845v2();
+
+ return 0;
+}
+
+static int gpu_cc_sdm845_fixup(struct platform_device *pdev,
+ struct regmap *regmap)
+{
+ const char *compat = NULL;
+ int compatlen = 0;
+
+ compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+ if (!compat || (compatlen <= 0))
+ return -EINVAL;
+
+ if (!strcmp(compat, "qcom,gpucc-sdm845-v2"))
+ gpu_cc_sdm845_fixup_sdm845v2(regmap);
+
+ return 0;
+}
+
static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev)
{
struct regmap *regmap;
@@ -532,16 +678,23 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
- dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc.\n");
+ dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc\n");
return -EINVAL;
}
base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (IS_ERR(base)) {
- dev_err(&pdev->dev, "Failed to ioremap the GFX CC base.\n");
+ dev_err(&pdev->dev, "Failed to ioremap the GFX CC base\n");
return PTR_ERR(base);
}
+ /* Register clock fixed factor for CRC divide. */
+ ret = devm_clk_hw_register(&pdev->dev, &crc_div.hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hardware clock\n");
+ return ret;
+ }
+
regmap = devm_regmap_init_mmio(&pdev->dev, base,
gpu_cc_gfx_sdm845_desc.config);
if (IS_ERR(regmap)) {
@@ -549,15 +702,6 @@
return PTR_ERR(regmap);
}
- /* Get MX voltage regulator for GPU PLL graphic clock. */
- vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
- if (IS_ERR(vdd_mx.regulator[0])) {
- if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
- dev_err(&pdev->dev,
- "Unable to get vdd_mx regulator\n");
- return PTR_ERR(vdd_mx.regulator[0]);
- }
-
/* GFX voltage regulators for GFX3D graphic clock. */
vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
if (IS_ERR(vdd_gfx.regulator[0])) {
@@ -569,6 +713,12 @@
/* Avoid turning on the rail during clock registration */
vdd_gfx.skip_handoff = true;
+ ret = gpu_cc_gfx_sdm845_fixup(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to do GFX clock fixup\n");
+ return ret;
+ }
+
clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
ret = qcom_cc_really_probe(pdev, &gpu_cc_gfx_sdm845_desc, regmap);
@@ -577,7 +727,7 @@
return ret;
}
- dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
+ dev_info(&pdev->dev, "Registered GFX CC clocks\n");
return ret;
}
@@ -594,7 +744,7 @@
{
return platform_driver_register(&gpu_cc_gfx_sdm845_driver);
}
-arch_initcall(gpu_cc_gfx_sdm845_init);
+subsys_initcall(gpu_cc_gfx_sdm845_init);
static void __exit gpu_cc_gfx_sdm845_exit(void)
{
@@ -606,6 +756,7 @@
{
struct regmap *regmap;
int ret = 0;
+ unsigned int value, mask;
regmap = qcom_cc_map(pdev, &gpu_cc_sdm845_desc);
if (IS_ERR(regmap))
@@ -620,13 +771,34 @@
return PTR_ERR(vdd_cx.regulator[0]);
}
+ /* Get MX voltage regulator for GPU PLL graphic clock. */
+ vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(vdd_mx.regulator[0])) {
+ if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(vdd_mx.regulator[0]);
+ }
+
+ ret = gpu_cc_sdm845_fixup(pdev, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to do GPU CC clock fixup\n");
+ return ret;
+ }
+
ret = qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
return ret;
}
- dev_info(&pdev->dev, "Registered GPU CC clocks.\n");
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+ mask, value);
+
+ dev_info(&pdev->dev, "Registered GPU CC clocks\n");
return ret;
}
@@ -643,7 +815,7 @@
{
return platform_driver_register(&gpu_cc_sdm845_driver);
}
-core_initcall(gpu_cc_sdm845_init);
+subsys_initcall(gpu_cc_sdm845_init);
static void __exit gpu_cc_sdm845_exit(void)
{
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 93ad1b0..eb6c658 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -94,7 +94,6 @@
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
- u32 pll_outdiv_rate;
u32 pll_lockdet_rate;
u32 decimal_div_start;
u32 frac_div_start_low;
@@ -134,6 +133,165 @@
struct dsi_pll_regs reg_setup;
};
+static inline int pll_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int pll_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(rsc->pll_base, reg, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(rsc->phy_base, reg, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
+ unsigned int reg, unsigned int mask, unsigned int val)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ reg_val &= ~mask;
+ reg_val |= (val & mask);
+ MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+ return rc;
+}
+
+static inline int phy_reg_update_bits(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
+ if (!rc && rsc->slave)
+ rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static inline int pclk_mux_read_sel(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc)
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ else
+ *val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & 0x3);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+ return rc;
+}
+
+
+static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
+ unsigned int reg, unsigned int val)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+ reg_val &= ~0x03;
+ reg_val |= val;
+
+ MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+ return rc;
+}
+
+static inline int pclk_mux_write_sel(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int rc = 0;
+ struct mdss_pll_resources *rsc = context;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pclk_mux_write_sel_sub(rsc, reg, val);
+ if (!rc && rsc->slave)
+ rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
static struct dsi_pll_10nm plls[DSI_PLL_MAX];
@@ -203,54 +361,14 @@
{
struct dsi_pll_config *config = &pll->pll_configuration;
struct dsi_pll_regs *regs = &pll->reg_setup;
- u64 target_freq;
u64 fref = rsc->vco_ref_clk_rate;
- u32 computed_output_div, div_log = 0;
u64 pll_freq;
u64 divider;
u64 dec, dec_multiple;
u32 frac;
u64 multiplier;
- u32 i;
- target_freq = rsc->vco_current_rate;
- pr_debug("target_freq = %llu\n", target_freq);
-
- if (config->div_override) {
- computed_output_div = config->output_div;
-
- /*
- * Computed_output_div = 2 ^ div_log
- * To get div_log from output div just get the index of the
- * 1 bit in the value.
- * div_log ranges from 0-3. so check the 4 lsbs
- */
-
- for (i = 0; i < 4; i++) {
- if (computed_output_div & (1 << i)) {
- div_log = i;
- break;
- }
- }
-
- } else {
- if (target_freq < MHZ_250) {
- computed_output_div = 8;
- div_log = 3;
- } else if (target_freq < MHZ_500) {
- computed_output_div = 4;
- div_log = 2;
- } else if (target_freq < MHZ_1000) {
- computed_output_div = 2;
- div_log = 1;
- } else {
- computed_output_div = 1;
- div_log = 0;
- }
- }
- pr_debug("computed_output_div = %d\n", computed_output_div);
-
- pll_freq = target_freq * computed_output_div;
+ pll_freq = rsc->vco_current_rate;
if (config->disable_prescaler)
divider = fref;
@@ -274,7 +392,6 @@
else
regs->pll_clock_inverters = 0;
- regs->pll_outdiv_rate = div_log;
regs->pll_lockdet_rate = config->lock_timer;
regs->decimal_div_start = dec;
regs->frac_div_start_low = (frac & 0xff);
@@ -394,7 +511,6 @@
MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
reg->frac_div_start_high);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
- MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -605,7 +721,9 @@
}
pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- pr_debug("cfg0=%d,cfg1=%d\n", pll->cached_cfg0, pll->cached_cfg1);
+ pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+ pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
+ pll->cached_cfg1, pll->cached_outdiv);
pll->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(vco);
@@ -646,6 +764,8 @@
pll->cached_cfg0);
MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
pll->cached_cfg1);
+ MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
+ pll->cached_outdiv);
}
rc = dsi_pll_enable(vco);
@@ -855,176 +975,6 @@
return rc;
}
-static int post_vco_clk_get_div(void *context, unsigned int reg,
- unsigned int *div)
-{
- int rc;
- struct mdss_pll_resources *pll = context;
- u32 reg_val;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 2)
- *div = 1;
- else if (reg_val == 3)
- *div = 4;
- else
- *div = 1;
-
- /**
- *Common clock framework the divider value is interpreted as one less
- * hence we return one less for all dividers except when zero
- */
- if (*div != 0)
- *div -= 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- u32 reg_val;
- int rc = 0;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x2;
- } else if (div == 4) {
- reg_val |= 0x3;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_vco_clk_set_div(void *context, unsigned int reg,
- unsigned int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = context;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- /**
- * In common clock framework the divider value provided is one less and
- * and hence adjusting the divider value by one prior to writing it to
- * hardware
- */
- div++;
- rc = post_vco_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_vco_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_bit_clk_get_div(void *context, unsigned int reg,
- unsigned int *div)
-{
- int rc;
- struct mdss_pll_resources *pll = context;
- u32 reg_val;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= 0x3;
-
- if (reg_val == 0)
- *div = 1;
- else if (reg_val == 1)
- *div = 2;
- else
- *div = 1;
-
- /**
- *Common clock framework the divider value is interpreted as one less
- * hence we return one less for all dividers except when zero
- */
- if (*div != 0)
- *div -= 1;
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
-static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
- int rc = 0;
- u32 reg_val;
-
- reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
- reg_val &= ~0x03;
- if (div == 1) {
- reg_val |= 0x0;
- } else if (div == 2) {
- reg_val |= 0x1;
- } else {
- rc = -EINVAL;
- pr_err("unsupported divider %d\n", div);
- goto error;
- }
-
- MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
- return rc;
-}
-
-static int post_bit_clk_set_div(void *context, unsigned int reg,
- unsigned int div)
-{
- int rc = 0;
- struct mdss_pll_resources *pll = context;
-
- rc = mdss_pll_resource_enable(pll, true);
- if (rc) {
- pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
- return rc;
- }
-
- /**
- * In common clock framework the divider value provided is one less and
- * and hence adjusting the divider value by one prior to writing it to
- * hardware
- */
- div++;
- rc = post_bit_clk_set_div_sub(pll, div);
- if (!rc && pll->slave)
- rc = post_bit_clk_set_div_sub(pll->slave, div);
-
- (void)mdss_pll_resource_enable(pll, false);
-
- return rc;
-}
-
static struct regmap_config dsi_pll_10nm_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -1032,14 +982,14 @@
.max_register = 0x7c0,
};
-static struct regmap_bus post_vco_regmap_bus = {
- .reg_write = post_vco_clk_set_div,
- .reg_read = post_vco_clk_get_div,
+static struct regmap_bus pll_regmap_bus = {
+ .reg_write = pll_reg_write,
+ .reg_read = pll_reg_read,
};
-static struct regmap_bus post_bit_regmap_bus = {
- .reg_write = post_bit_clk_set_div,
- .reg_read = post_bit_clk_get_div,
+static struct regmap_bus pclk_mux_regmap_bus = {
+ .reg_read = phy_reg_read,
+ .reg_write = pclk_mux_write_sel,
};
static struct regmap_bus pclk_src_regmap_bus = {
@@ -1073,23 +1023,30 @@
* | vco_clk |
* +-------+-------+
* |
- * +--------------------------------------+
- * | |
- * +-------v-------+ |
- * | bitclk_src | |
- * | DIV(1..15) | |
- * +-------+-------+ |
- * | |
- * +--------------------+ |
- * Shadow Path | | |
- * + +-------v-------+ +------v------+ +------v-------+
- * | | byteclk_src | |post_bit_div | |post_vco_div |
- * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
- * | +-------+-------+ +------+------+ +------+-------+
- * | | | |
- * | | +------+ +----+
- * | +--------+ | |
- * | | +----v-----v------+
+ * |
+ * +---------------+
+ * | pll_out_div |
+ * | DIV(1,2,4,8) |
+ * +-------+-------+
+ * |
+ * +-----------------------------+--------+
+ * | | |
+ * +-------v-------+ | |
+ * | bitclk_src | | |
+ * | DIV(1..15) | | |
+ * +-------+-------+ | |
+ * | | |
+ * +----------+---------+ | |
+ * Shadow Path | | | | |
+ * + +-------v-------+ | +------v------+ | +------v-------+
+ * | | byteclk_src | | |post_bit_div | | |post_vco_div |
+ * | | DIV(8) | | |DIV (2) | | |DIV(4) |
+ * | +-------+-------+ | +------+------+ | +------+-------+
+ * | | | | | | |
+ * | | | +------+ | |
+ * | | +-------------+ | | +----+
+ * | +--------+ | | | |
+ * | | +-v--v-v---v------+
* +-v---------v----+ \ pclk_src_mux /
* \ byteclk_mux / \ /
* \ / +-----+-----+
@@ -1140,13 +1097,45 @@
},
};
+static struct clk_regmap_div dsi0pll_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_pll_out_div",
+ .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div dsi1pll_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_pll_out_div",
+ .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi0pll_bitclk_src = {
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_bitclk_src",
- .parent_names = (const char *[]){"dsi0pll_vco_clk"},
+ .parent_names = (const char *[]){"dsi0pll_pll_out_div"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
@@ -1160,7 +1149,7 @@
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_bitclk_src",
- .parent_names = (const char *[]){"dsi1pll_vco_clk"},
+ .parent_names = (const char *[]){"dsi1pll_pll_out_div"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
@@ -1168,31 +1157,27 @@
},
};
-static struct clk_regmap_div dsi0pll_post_vco_div = {
- .shift = 0,
- .width = 2,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi0pll_post_vco_div",
- .parent_names = (const char *[]){"dsi0pll_vco_clk"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi0pll_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
},
};
-static struct clk_regmap_div dsi1pll_post_vco_div = {
- .shift = 0,
- .width = 2,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi1pll_post_vco_div",
- .parent_names = (const char *[]){"dsi1pll_vco_clk"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi1pll_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_vco_div",
+ .parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
},
};
@@ -1220,31 +1205,27 @@
},
};
-static struct clk_regmap_div dsi0pll_post_bit_div = {
- .shift = 0,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi0pll_post_bit_div",
- .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi0pll_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
},
};
-static struct clk_regmap_div dsi1pll_post_bit_div = {
- .shift = 0,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "dsi1pll_post_bit_div",
- .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_regmap_div_ops,
- },
+static struct clk_fixed_factor dsi1pll_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_post_bit_div",
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
},
};
@@ -1277,30 +1258,36 @@
};
static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
.shift = 0,
- .width = 1,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pclk_src_mux",
- .parent_names = (const char *[]){"dsi0pll_post_bit_div",
- "dsi0pll_post_vco_div"},
- .num_parents = 2,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi0pll_bitclk_src",
+ "dsi0pll_post_bit_div",
+ "dsi0pll_pll_out_div",
+ "dsi0pll_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
.shift = 0,
- .width = 1,
+ .width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pclk_src_mux",
- .parent_names = (const char *[]){"dsi1pll_post_bit_div",
- "dsi1pll_post_vco_div"},
- .num_parents = 2,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi1pll_bitclk_src",
+ "dsi1pll_post_bit_div",
+ "dsi1pll_pll_out_div",
+ "dsi1pll_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1366,24 +1353,25 @@
static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+ [PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
- [POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
- [POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+ [POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
+ [POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+ [PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
- [POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
- [POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+ [POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
+ [POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
-
};
int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1428,13 +1416,10 @@
/* Establish client data */
if (ndx == 0) {
- rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
- pll_res, &dsi_pll_10nm_config);
- dsi0pll_post_vco_div.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
- dsi0pll_post_bit_div.clkr.regmap = rmap;
+ dsi0pll_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1448,10 +1433,9 @@
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_mux.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_src_mux.clkr.regmap = rmap;
-
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_byteclk_mux.clkr.regmap = rmap;
@@ -1475,13 +1459,9 @@
} else {
- rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
- dsi1pll_post_vco_div.clkr.regmap = rmap;
-
- rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
- pll_res, &dsi_pll_10nm_config);
- dsi1pll_post_bit_div.clkr.regmap = rmap;
+ dsi1pll_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1491,14 +1471,13 @@
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src.clkr.regmap = rmap;
- rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+ rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src_mux.clkr.regmap = rmap;
-
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_byteclk_mux.clkr.regmap = rmap;
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 033462d..2f92270 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -99,6 +99,7 @@
unsigned long vco_cached_rate;
u32 cached_cfg0;
u32 cached_cfg1;
+ u32 cached_outdiv;
/* dsi/edp/hmdi pll interface type */
u32 pll_interface_type;
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 362ea0b..ba4e591 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -402,7 +402,7 @@
{
return platform_driver_register(&video_cc_sdm845_driver);
}
-core_initcall(video_cc_sdm845_init);
+subsys_initcall(video_cc_sdm845_init);
static void __exit video_cc_sdm845_exit(void)
{
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 8ca07fe..0cca360 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@
0x12c, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
- 0x12c, 0, 4, 24, 3, BIT(31),
+ 0x130, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT);
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
index 07603fe..e67f12b 100644
--- a/drivers/cpufreq/cpu-boost.c
+++ b/drivers/cpufreq/cpu-boost.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,8 +39,8 @@
static unsigned int input_boost_ms = 40;
module_param(input_boost_ms, uint, 0644);
-static bool sched_boost_on_input;
-module_param(sched_boost_on_input, bool, 0644);
+static unsigned int sched_boost_on_input;
+module_param(sched_boost_on_input, uint, 0644);
static bool sched_boost_active;
@@ -209,8 +209,8 @@
update_policy_online();
/* Enable scheduler boost to migrate tasks to big cluster */
- if (sched_boost_on_input) {
- ret = sched_set_boost(1);
+ if (sched_boost_on_input > 0) {
+ ret = sched_set_boost(sched_boost_on_input);
if (ret)
pr_err("cpu-boost: HMP boost enable failed\n");
else
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e8e16a5..0fe2518 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -169,8 +169,8 @@
int ret;
ret = sscanf(buf, "%u", &input);
- /* cannot be lower than 11 otherwise freq will not fall */
- if (ret != 1 || input < 11 || input > 100 ||
+ /* cannot be lower than 1 otherwise freq will not fall */
+ if (ret != 1 || input < 1 || input > 100 ||
input >= dbs_data->up_threshold)
return -EINVAL;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 12eb6d8..a6edf2f 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -699,7 +699,8 @@
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
cpumask_set_cpu(max_cpu, &speedchange_cpumask);
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- wake_up_process_no_notif(speedchange_task);
+
+ wake_up_process(speedchange_task);
rearm:
cpufreq_interactive_timer_resched(data, false);
@@ -814,7 +815,7 @@
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
if (anyboost)
- wake_up_process_no_notif(speedchange_task);
+ wake_up_process(speedchange_task);
}
static int load_change_callback(struct notifier_block *nb, unsigned long val,
@@ -1926,7 +1927,7 @@
get_task_struct(speedchange_task);
/* NB: wake up so the thread does not look hung to the freezer */
- wake_up_process_no_notif(speedchange_task);
+ wake_up_process(speedchange_task);
return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
}
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index d6d4257..5b2db3c 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -400,7 +400,6 @@
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
- clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index ed239c4..39e0484 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -21,7 +21,6 @@
#include <linux/moduleparam.h>
#include "lpm-levels.h"
-bool use_psci;
enum lpm_type {
IDLE = 0,
SUSPEND,
@@ -306,6 +305,7 @@
struct lpm_level_avail *level_list = NULL;
char cpu_name[20] = {0};
int ret = 0;
+ struct list_head *pos;
cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
cpumask_weight(&p->child_cpus), GFP_KERNEL);
@@ -313,38 +313,45 @@
return -ENOMEM;
cpu_idx = 0;
- for_each_cpu(cpu, &p->child_cpus) {
- snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
- cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
- if (!cpu_kobj[cpu_idx]) {
- ret = -ENOMEM;
- goto release_kobj;
- }
+ list_for_each(pos, &p->cpu) {
+ struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
- level_list = devm_kzalloc(&lpm_pdev->dev,
- p->cpu->nlevels * sizeof(*level_list),
- GFP_KERNEL);
- if (!level_list) {
- ret = -ENOMEM;
- goto release_kobj;
- }
-
- /*
- * Skip enable/disable for WFI. cpuidle expects WFI to be
- * available at all times.
- */
- for (i = 1; i < p->cpu->nlevels; i++) {
-
- level_list[i].latency_us = p->levels[i].pwr.latency_us;
- ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
- cpu_kobj[cpu_idx], &level_list[i],
- (void *)p->cpu, cpu, true);
- if (ret)
+ for_each_cpu(cpu, &lpm_cpu->related_cpus) {
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+ cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
+ parent);
+ if (!cpu_kobj[cpu_idx]) {
+ ret = -ENOMEM;
goto release_kobj;
- }
+ }
- cpu_level_available[cpu] = level_list;
- cpu_idx++;
+ level_list = devm_kzalloc(&lpm_pdev->dev,
+ lpm_cpu->nlevels * sizeof(*level_list),
+ GFP_KERNEL);
+ if (!level_list) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ /*
+ * Skip enable/disable for WFI. cpuidle expects WFI to
+ * be available at all times.
+ */
+ for (i = 1; i < lpm_cpu->nlevels; i++) {
+ level_list[i].latency_us =
+ p->levels[i].pwr.latency_us;
+ ret = create_lvl_avail_nodes(
+ lpm_cpu->levels[i].name,
+ cpu_kobj[cpu_idx],
+ &level_list[i],
+ (void *)lpm_cpu, cpu, true);
+ if (ret)
+ goto release_kobj;
+ }
+
+ cpu_level_available[cpu] = level_list;
+ cpu_idx++;
+ }
}
return ret;
@@ -385,7 +392,7 @@
return ret;
}
- if (p->cpu) {
+ if (!list_empty(&p->cpu)) {
ret = create_cpu_lvl_nodes(p, cluster_kobj);
if (ret)
return ret;
@@ -431,30 +438,27 @@
return ret;
}
- if (use_psci) {
- key = "qcom,psci-mode-shift";
- ret = of_property_read_u32(node, key,
- &c->psci_mode_shift);
- if (ret) {
- pr_err("%s(): Failed to read param: %s\n",
- __func__, key);
- return ret;
- }
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_shift);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
- key = "qcom,psci-mode-mask";
- ret = of_property_read_u32(node, key,
- &c->psci_mode_mask);
- if (ret) {
- pr_err("%s(): Failed to read param: %s\n",
- __func__, key);
- return ret;
- }
+ key = "qcom,psci-mode-mask";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_mask);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
- /* Set ndevice to 1 as default */
- c->ndevices = 1;
+ /* Set ndevice to 1 as default */
+ c->ndevices = 1;
- } else
- pr_warn("Target supports PSCI only\n");
return 0;
}
@@ -503,22 +507,14 @@
if (ret)
goto failed;
- if (use_psci) {
- char *k = "qcom,psci-mode";
+ key = "qcom,psci-mode";
- ret = of_property_read_u32(node, k, &level->psci_id);
- if (ret)
- goto failed;
-
- level->is_reset = of_property_read_bool(node, "qcom,is-reset");
- } else
- pr_warn("Build supports PSCI targets only");
-
- key = "label";
- ret = of_property_read_string(node, key, &level->level_name);
+ ret = of_property_read_u32(node, key, &level->psci_id);
if (ret)
goto failed;
+ level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+
if (cluster->nlevels != cluster->default_level) {
key = "min child idx";
ret = of_property_read_u32(node, "qcom,min-child-idx",
@@ -531,10 +527,6 @@
}
level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
- level->disable_dynamic_routing = of_property_read_bool(node,
- "qcom,disable-dynamic-int-routing");
- level->last_core_only = of_property_read_bool(node,
- "qcom,last-core-only");
key = "parse_power_params";
ret = parse_power_params(node, &level->pwr);
@@ -569,20 +561,16 @@
return ret;
}
- if (use_psci) {
- key = "qcom,psci-cpu-mode";
+ key = "qcom,psci-cpu-mode";
+ ret = of_property_read_u32(n, key, &l->psci_id);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ n->name);
+ return ret;
+ }
+ key = "qcom,hyp-psci";
- ret = of_property_read_u32(n, key, &l->psci_id);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- n->name);
- return ret;
- }
- key = "qcom,hyp-psci";
-
- l->hyp_psci = of_property_read_bool(n, key);
- } else
- pr_warn("Build supports PSCI targets only");
+ l->hyp_psci = of_property_read_bool(n, key);
return 0;
}
@@ -639,51 +627,26 @@
next_pwr->time_overhead_us : residency;
}
-static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
{
+
struct device_node *n;
- int ret = -ENOMEM;
- int i, j;
- char *key;
-
- c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
- if (!c->cpu)
- return ret;
-
- c->cpu->parent = c;
- if (use_psci) {
-
- key = "qcom,psci-mode-shift";
-
- ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- node->name);
- return ret;
- }
- key = "qcom,psci-mode-mask";
-
- ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- node->name);
- return ret;
- }
- }
+ int ret, i, j;
+ const char *key;
for_each_child_of_node(node, n) {
- struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+ struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
- c->cpu->nlevels++;
+ cpu->nlevels++;
ret = parse_cpu_mode(n, l);
if (ret < 0) {
pr_info("Failed %s\n", l->name);
- goto failed;
+ return ret;
}
ret = parse_power_params(n, &l->pwr);
if (ret)
- goto failed;
+ return ret;
key = "qcom,use-broadcast-timer";
l->use_bc_timer = of_property_read_bool(n, key);
@@ -698,32 +661,83 @@
if (ret == -EINVAL)
l->reset_level = LPM_RESET_LVL_NONE;
else if (ret)
- goto failed;
+ return ret;
}
- for (i = 0; i < c->cpu->nlevels; i++) {
- for (j = 0; j < c->cpu->nlevels; j++) {
+ for (i = 0; i < cpu->nlevels; i++) {
+ for (j = 0; j < cpu->nlevels; j++) {
if (i >= j) {
- c->cpu->levels[i].pwr.residencies[j] = 0;
+ cpu->levels[i].pwr.residencies[j] = 0;
continue;
}
- c->cpu->levels[i].pwr.residencies[j] =
- calculate_residency(&c->cpu->levels[i].pwr,
- &c->cpu->levels[j].pwr);
+ cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&cpu->levels[i].pwr,
+ &cpu->levels[j].pwr);
pr_err("%s: idx %d %u\n", __func__, j,
- c->cpu->levels[i].pwr.residencies[j]);
+ cpu->levels[i].pwr.residencies[j]);
}
}
+ for_each_cpu(i, &cpu->related_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
+ sizeof(uint32_t) * cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return -ENOMEM;
+ per_cpu(min_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(min_residency, i))
+ return -ENOMEM;
+ set_optimum_cpu_residency(cpu, i, true);
+ }
return 0;
-failed:
- for (i = 0; i < c->cpu->nlevels; i++) {
- kfree(c->cpu->levels[i].name);
- c->cpu->levels[i].name = NULL;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+ int ret = -ENOMEM, i;
+ char *key;
+ struct lpm_cpu *cpu;
+
+ cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
+ if (!cpu)
+ return ret;
+
+ if (get_cpumask_for_node(node, &cpu->related_cpus))
+ return -EINVAL;
+
+ cpu->parent = c;
+
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
}
- kfree(c->cpu);
- c->cpu = NULL;
+ key = "qcom,psci-mode-mask";
+
+ ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+
+ if (parse_cpu(node, cpu))
+ goto failed;
+ cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
+ list_add(&cpu->list, &c->cpu);
+ return 0;
+failed:
+ for (i = 0; i < cpu->nlevels; i++) {
+ kfree(cpu->levels[i].name);
+ cpu->levels[i].name = NULL;
+ }
+ kfree(cpu);
pr_err("%s(): Failed with error code:%d\n", __func__, ret);
return ret;
}
@@ -731,6 +745,7 @@
void free_cluster_node(struct lpm_cluster *cluster)
{
struct list_head *list;
+ struct lpm_cpu *cpu, *n;
int i;
list_for_each(list, &cluster->child) {
@@ -741,22 +756,21 @@
free_cluster_node(n);
};
- if (cluster->cpu) {
- for (i = 0; i < cluster->cpu->nlevels; i++) {
- kfree(cluster->cpu->levels[i].name);
- cluster->cpu->levels[i].name = NULL;
+ list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
+ struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ kfree(cpu->levels[i].name);
+ cpu->levels[i].name = NULL;
}
+ list_del(list);
}
for (i = 0; i < cluster->nlevels; i++) {
kfree(cluster->levels[i].mode);
cluster->levels[i].mode = NULL;
}
- kfree(cluster->cpu);
kfree(cluster->name);
- kfree(cluster->lpm_dev);
- cluster->cpu = NULL;
cluster->name = NULL;
- cluster->lpm_dev = NULL;
cluster->ndevices = 0;
}
@@ -785,6 +799,7 @@
goto failed_parse_params;
INIT_LIST_HEAD(&c->child);
+ INIT_LIST_HEAD(&c->cpu);
c->parent = parent;
spin_lock_init(&c->sync_lock);
c->min_child_level = NR_LPM_LEVELS;
@@ -795,7 +810,6 @@
continue;
key = "qcom,pm-cluster-level";
if (!of_node_cmp(n->name, key)) {
- WARN_ON(!use_psci && c->no_saw_devices);
if (parse_cluster_level(n, c))
goto failed_parse_cluster;
continue;
@@ -805,7 +819,6 @@
if (!of_node_cmp(n->name, key)) {
struct lpm_cluster *child;
- WARN_ON(!use_psci && c->no_saw_devices);
child = parse_cluster(n, c);
if (!child)
goto failed_parse_cluster;
@@ -819,34 +832,11 @@
key = "qcom,pm-cpu";
if (!of_node_cmp(n->name, key)) {
- /*
- * Parse the the cpu node only if a pm-cpu node
- * is available, though the mask is defined @ the
- * cluster level
- */
- if (get_cpumask_for_node(node, &c->child_cpus))
- goto failed_parse_cluster;
-
if (parse_cpu_levels(n, c))
goto failed_parse_cluster;
c->aff_level = 1;
- for_each_cpu(i, &c->child_cpus) {
- per_cpu(max_residency, i) = devm_kzalloc(
- &lpm_pdev->dev,
- sizeof(uint32_t) * c->cpu->nlevels,
- GFP_KERNEL);
- if (!per_cpu(max_residency, i))
- return ERR_PTR(-ENOMEM);
- per_cpu(min_residency, i) = devm_kzalloc(
- &lpm_pdev->dev,
- sizeof(uint32_t) * c->cpu->nlevels,
- GFP_KERNEL);
- if (!per_cpu(min_residency, i))
- return ERR_PTR(-ENOMEM);
- set_optimum_cpu_residency(c->cpu, i, true);
- }
}
}
@@ -883,8 +873,6 @@
{
struct device_node *top = NULL;
- use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
-
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top) {
pr_err("Failed to find root node\n");
@@ -898,6 +886,7 @@
void cluster_dt_walkthrough(struct lpm_cluster *cluster)
{
struct list_head *list;
+ struct lpm_cpu *cpu;
int i, j;
static int id;
char str[10] = {0};
@@ -918,12 +907,12 @@
&cluster->name[j], &l->mode[i]);
}
- if (cluster->cpu) {
+ list_for_each_entry(cpu, &cluster->cpu, list) {
pr_info("%d\n", __LINE__);
- for (j = 0; j < cluster->cpu->nlevels; j++)
+ for (j = 0; j < cpu->nlevels; j++)
pr_info("%s\tCPU mode: %s id:%d\n", str,
- cluster->cpu->levels[j].name,
- cluster->cpu->levels[j].mode);
+ cpu->levels[j].name,
+ cpu->levels[j].mode);
}
id++;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 8b59bee..5633a8f 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -53,10 +53,9 @@
#include <trace/events/trace_msm_low_power.h>
#define SCLK_HZ (32768)
-#define SCM_HANDOFF_LOCK_ID "S:7"
#define PSCI_POWER_STATE(reset) (reset << 30)
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
-static remote_spinlock_t scm_handoff_lock;
+#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
@@ -68,7 +67,8 @@
CPU_EXIT,
CLUSTER_ENTER,
CLUSTER_EXIT,
- PRE_PC_CB,
+ CPU_HP_STARTING,
+ CPU_HP_DYING,
};
struct lpm_debug {
@@ -94,6 +94,9 @@
static uint32_t tmr_add = 100;
module_param_named(tmr_add, tmr_add, uint, 0664);
+static uint32_t bias_hyst;
+module_param_named(bias_hyst, bias_hyst, uint, 0664);
+
struct lpm_history {
uint32_t resi[MAXSAMPLES];
int mode[MAXSAMPLES];
@@ -106,7 +109,7 @@
static DEFINE_PER_CPU(struct lpm_history, hist);
-static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static struct hrtimer histtimer;
@@ -135,10 +138,16 @@
static bool sleep_disabled;
module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
+/**
+ * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
+ *
+ * Returns an s32 latency value
+ */
s32 msm_cpuidle_get_deep_idle_latency(void)
{
return 10;
}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
void lpm_suspend_wake_time(uint64_t wakeup_time)
{
@@ -209,7 +218,7 @@
struct power_params *pwr_params;
struct lpm_cpu *cpu;
struct lpm_cluster *n;
- uint32_t latency = 0;
+ uint32_t lat = 0;
int i;
list_for_each(list, child) {
@@ -218,19 +227,21 @@
if (strcmp(lat_level->level_name, n->cluster_name))
continue;
}
- cpu = n->cpu;
- for (i = 0; i < cpu->nlevels; i++) {
- level = &cpu->levels[i];
- pwr_params = &level->pwr;
- if (lat_level->reset_level == level->reset_level) {
- if ((latency > pwr_params->latency_us)
- || (!latency))
- latency = pwr_params->latency_us;
- break;
+ list_for_each_entry(cpu, &n->cpu, list) {
+ for (i = 0; i < cpu->nlevels; i++) {
+ level = &cpu->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level
+ == level->reset_level) {
+ if ((lat > pwr_params->latency_us)
+ || (!lat))
+ lat = pwr_params->latency_us;
+ break;
+ }
}
}
}
- return latency;
+ return lat;
}
static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
@@ -239,9 +250,9 @@
struct lpm_cluster *n;
if ((cluster->aff_level == affinity_level)
- || ((cluster->cpu) && (affinity_level == 0)))
+ || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
return cluster;
- else if (!cluster->cpu) {
+ else if (list_empty(&cluster->cpu)) {
n = list_entry(cluster->child.next, typeof(*n), list);
return cluster_aff_match(n, affinity_level);
} else
@@ -316,16 +327,22 @@
static int lpm_dying_cpu(unsigned int cpu)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+ update_debug_pc_event(CPU_HP_DYING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
static int lpm_starting_cpu(unsigned int cpu)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+ update_debug_pc_event(CPU_HP_STARTING, cpu,
+ cluster->num_children_in_sync.bits[0],
+ cluster->child_cpus.bits[0], false);
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@@ -378,7 +395,7 @@
static void clusttimer_cancel(void)
{
int cpu = raw_smp_processor_id();
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
hrtimer_try_to_cancel(&cluster->histtimer);
@@ -414,22 +431,6 @@
hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
}
-static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
- struct lpm_cluster_level *level)
-{
- struct low_power_ops *ops;
-
- if (use_psci)
- return 0;
-
- ops = &cluster->lpm_dev[ndevice];
- if (ops && ops->set_mode)
- return ops->set_mode(ops, level->mode[ndevice],
- level->notify_rpm);
- else
- return -EINVAL;
-}
-
static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
struct lpm_cpu *cpu, int *idx_restrict,
uint32_t *idx_restrict_time)
@@ -575,34 +576,46 @@
static void update_history(struct cpuidle_device *dev, int idx);
+static inline bool is_cpu_biased(int cpu)
+{
+ u64 now = sched_clock();
+ u64 last = sched_get_cpu_last_busy_time(cpu);
+
+ if (!last)
+ return false;
+
+ return (now - last) < BIAS_HYST;
+}
+
static int cpu_power_select(struct cpuidle_device *dev,
struct lpm_cpu *cpu)
{
- int best_level = -1;
+ int best_level = 0;
uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
dev->cpu);
- uint32_t sleep_us =
- (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
+ s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length());
uint32_t modified_time_us = 0;
uint32_t next_event_us = 0;
int i, idx_restrict;
uint32_t lvl_latency_us = 0;
uint64_t predicted = 0;
uint32_t htime = 0, idx_restrict_time = 0;
- uint32_t next_wakeup_us = sleep_us;
+ uint32_t next_wakeup_us = (uint32_t)sleep_us;
uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu);
uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu);
- if (!cpu)
- return -EINVAL;
-
- if (sleep_disabled)
+ if ((sleep_disabled && !cpu_isolated(dev->cpu)) || sleep_us < 0)
return 0;
idx_restrict = cpu->nlevels + 1;
next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+ if (is_cpu_biased(dev->cpu)) {
+ best_level = 0;
+ goto done_select;
+ }
+
for (i = 0; i < cpu->nlevels; i++) {
struct lpm_cpu_level *level = &cpu->levels[i];
struct power_params *pwr_params = &level->pwr;
@@ -637,8 +650,8 @@
if (next_wakeup_us > max_residency[i]) {
predicted = lpm_cpuidle_predict(dev, cpu,
&idx_restrict, &idx_restrict_time);
- if (predicted < min_residency[i])
- predicted = 0;
+ if (predicted && (predicted < min_residency[i]))
+ predicted = min_residency[i];
} else
invalidate_predict_history(dev);
}
@@ -681,6 +694,7 @@
histtimer_start(htime);
}
+done_select:
trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
@@ -953,10 +967,6 @@
if (!lpm_cluster_mode_allow(cluster, i, from_idle))
continue;
- if (level->last_core_only &&
- cpumask_weight(cpu_online_mask) > 1)
- continue;
-
if (!cpumask_equal(&cluster->num_children_in_sync,
&level->num_cpu_votes))
continue;
@@ -972,8 +982,9 @@
best_level = i;
- if (predicted ? (pred_us <= pwr_params->max_residency)
- : (sleep_us <= pwr_params->max_residency))
+ if (from_idle &&
+ (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency)))
break;
}
@@ -1001,7 +1012,6 @@
bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
- int ret, i;
if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
|| is_IPI_pending(&cluster->num_children_in_sync)) {
@@ -1022,30 +1032,16 @@
ktime_to_us(ktime_get()));
}
- for (i = 0; i < cluster->ndevices; i++) {
- ret = set_device_mode(cluster, i, level);
- if (ret)
- goto failed_set_mode;
- }
if (level->notify_rpm) {
- struct cpumask nextcpu, *cpumask;
uint64_t us;
uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu,
- from_idle, &pred_us);
- cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
-
- if (ret) {
- pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
- goto failed_set_mode;
- }
-
+ us = get_cluster_sleep_time(cluster, NULL, from_idle,
+ &pred_us);
us = us + 1;
clear_predict_history();
clear_cl_predict_history();
- do_div(us, USEC_PER_SEC/SCLK_HZ);
system_sleep_enter(us);
}
/* Notify cluster enter event after successfully config completion */
@@ -1056,23 +1052,10 @@
if (predicted && (idx < (cluster->nlevels - 1))) {
struct power_params *pwr_params = &cluster->levels[idx].pwr;
- tick_broadcast_exit();
clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
- tick_broadcast_enter();
}
return 0;
-failed_set_mode:
-
- for (i = 0; i < cluster->ndevices; i++) {
- int rc = 0;
-
- level = &cluster->levels[cluster->default_level];
- // rc = set_device_mode(cluster, i, level);
- WARN_ON(rc);
- }
-
- return ret;
}
static void cluster_prepare(struct lpm_cluster *cluster,
@@ -1122,10 +1105,8 @@
struct power_params *pwr_params =
&cluster->levels[0].pwr;
- tick_broadcast_exit();
clusttimer_start(cluster,
pwr_params->max_residency + tmr_add);
- tick_broadcast_enter();
}
}
@@ -1152,7 +1133,7 @@
{
struct lpm_cluster_level *level;
bool first_cpu;
- int last_level, i, ret;
+ int last_level, i;
if (!cluster)
return;
@@ -1202,13 +1183,8 @@
last_level = cluster->last_level;
cluster->last_level = cluster->default_level;
- for (i = 0; i < cluster->ndevices; i++) {
+ for (i = 0; i < cluster->ndevices; i++)
level = &cluster->levels[cluster->default_level];
- ret = set_device_mode(cluster, i, level);
-
- WARN_ON(ret);
-
- }
cluster_notify(cluster, &cluster->levels[last_level], false);
@@ -1221,12 +1197,11 @@
spin_unlock(&cluster->sync_lock);
}
-static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
bool from_idle)
{
- struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
- bool jtag_save_restore =
- cluster->cpu->levels[cpu_index].jtag_save_restore;
+ struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+ bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
/* Use broadcast timer for aggregating sleep mode within a cluster.
* A broadcast timer could be used in the following scenarios
@@ -1238,9 +1213,6 @@
* next wakeup within a cluster, in which case, CPU switches over to
* use broadcast timer.
*/
- if (from_idle && cpu_level->use_bc_timer)
- tick_broadcast_enter();
-
if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
|| (cpu_level->mode ==
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
@@ -1254,15 +1226,11 @@
msm_jtag_save_state();
}
-static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
bool from_idle)
{
- struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
- bool jtag_save_restore =
- cluster->cpu->levels[cpu_index].jtag_save_restore;
-
- if (from_idle && cpu_level->use_bc_timer)
- tick_broadcast_exit();
+ struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+ bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
|| (cpu_level->mode ==
@@ -1305,17 +1273,21 @@
return state_id;
}
-#if !defined(CONFIG_CPU_V7)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
{
int affinity_level = 0;
- int state_id = get_cluster_id(cluster, &affinity_level);
+ int state_id = get_cluster_id(cpu->parent, &affinity_level);
int power_state =
- PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ PSCI_POWER_STATE(cpu->levels[idx].is_reset);
bool success = false;
/*
* idx = 0 is the default LPM state
*/
+ if (from_idle && cpu->levels[idx].use_bc_timer) {
+ if (tick_broadcast_enter())
+ return false;
+ }
+
if (!idx) {
stop_critical_timings();
wfi();
@@ -1325,7 +1297,7 @@
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
state_id |= (power_state | affinity_level
- | cluster->cpu->levels[idx].psci_id);
+ | cpu->levels[idx].psci_id);
update_debug_pc_event(CPU_ENTER, state_id,
0xdeaffeed, 0xdeaffeed, true);
@@ -1334,59 +1306,22 @@
start_critical_timings();
update_debug_pc_event(CPU_EXIT, state_id,
success, 0xdeaffeed, true);
+
+ if (from_idle && cpu->levels[idx].use_bc_timer)
+ tick_broadcast_exit();
+
return success;
}
-#elif defined(CONFIG_ARM_PSCI)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
- int affinity_level = 0;
- int state_id = get_cluster_id(cluster, &affinity_level);
- int power_state =
- PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
- bool success = false;
-
- if (!idx) {
- stop_critical_timings();
- wfi();
- start_critical_timings();
- return 1;
- }
-
- affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
- state_id |= (power_state | affinity_level
- | cluster->cpu->levels[idx].psci_id);
-
- update_debug_pc_event(CPU_ENTER, state_id,
- 0xdeaffeed, 0xdeaffeed, true);
- stop_critical_timings();
- success = !arm_cpuidle_suspend(state_id);
- start_critical_timings();
- update_debug_pc_event(CPU_EXIT, state_id,
- success, 0xdeaffeed, true);
-}
-#else
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
- WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
- return false;
-}
-#endif
static int lpm_cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
- int idx;
+ struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
- if (!cluster)
+ if (!cpu)
return 0;
- idx = cpu_power_select(dev, cluster->cpu);
-
- if (idx < 0)
- return 0;
-
- return idx;
+ return cpu_power_select(dev, cpu);
}
static void update_history(struct cpuidle_device *dev, int idx)
@@ -1425,34 +1360,33 @@
static int lpm_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
bool success = true;
const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
int64_t start_time = ktime_to_ns(ktime_get()), end_time;
struct power_params *pwr_params;
- pwr_params = &cluster->cpu->levels[idx].pwr;
+ pwr_params = &cpu->levels[idx].pwr;
- pwr_params = &cluster->cpu->levels[idx].pwr;
+ pwr_params = &cpu->levels[idx].pwr;
- cpu_prepare(cluster, idx, true);
- cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+ cpu_prepare(cpu, idx, true);
+ cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
trace_cpu_idle_enter(idx);
lpm_stats_cpu_enter(idx, start_time);
- if (need_resched() || (idx < 0))
+ if (need_resched())
goto exit;
- WARN_ON(!use_psci);
- success = psci_enter_sleep(cluster, idx, true);
+ success = psci_enter_sleep(cpu, idx, true);
exit:
end_time = ktime_to_ns(ktime_get());
lpm_stats_cpu_exit(idx, end_time, success);
- cluster_unprepare(cluster, cpumask, idx, true, end_time);
- cpu_unprepare(cluster, idx, true);
+ cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
+ cpu_unprepare(cpu, idx, true);
sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
end_time = ktime_to_ns(ktime_get()) - start_time;
do_div(end_time, 1000);
@@ -1522,8 +1456,9 @@
int i = 0, ret = 0;
unsigned int cpu;
struct lpm_cluster *p = NULL;
+ struct lpm_cpu *lpm_cpu;
- if (!cl->cpu) {
+ if (list_empty(&cl->cpu)) {
struct lpm_cluster *n;
list_for_each_entry(n, &cl->child, list) {
@@ -1534,51 +1469,56 @@
return ret;
}
- cl->drv = kcalloc(1, sizeof(*cl->drv), GFP_KERNEL);
- if (!cl->drv)
- return -ENOMEM;
+ list_for_each_entry(lpm_cpu, &cl->cpu, list) {
+ lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
+ if (!lpm_cpu->drv)
+ return -ENOMEM;
- cl->drv->name = "msm_idle";
+ lpm_cpu->drv->name = "msm_idle";
- for (i = 0; i < cl->cpu->nlevels; i++) {
- struct cpuidle_state *st = &cl->drv->states[i];
- struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+ for (i = 0; i < lpm_cpu->nlevels; i++) {
+ struct cpuidle_state *st = &lpm_cpu->drv->states[i];
+ struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
- snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
- snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
- st->flags = 0;
- st->exit_latency = cpu_level->pwr.latency_us;
- st->power_usage = cpu_level->pwr.ss_power;
- st->target_residency = 0;
- st->enter = lpm_cpuidle_enter;
- }
-
- cl->drv->state_count = cl->cpu->nlevels;
- cl->drv->safe_state_index = 0;
- for_each_cpu(cpu, &cl->child_cpus)
- per_cpu(cpu_cluster, cpu) = cl;
-
- for_each_possible_cpu(cpu) {
- if (cpu_online(cpu))
- continue;
- p = per_cpu(cpu_cluster, cpu);
- while (p) {
- int j;
-
- spin_lock(&p->sync_lock);
- cpumask_set_cpu(cpu, &p->num_children_in_sync);
- for (j = 0; j < p->nlevels; j++)
- cpumask_copy(&p->levels[j].num_cpu_votes,
- &p->num_children_in_sync);
- spin_unlock(&p->sync_lock);
- p = p->parent;
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
}
- }
- ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
- if (ret) {
- kfree(cl->drv);
- return -ENOMEM;
+ lpm_cpu->drv->state_count = lpm_cpu->nlevels;
+ lpm_cpu->drv->safe_state_index = 0;
+ for_each_cpu(cpu, &lpm_cpu->related_cpus)
+ per_cpu(cpu_lpm, cpu) = lpm_cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ if (per_cpu(cpu_lpm, cpu))
+ p = per_cpu(cpu_lpm, cpu)->parent;
+ while (p) {
+ int j;
+
+ spin_lock(&p->sync_lock);
+ cpumask_set_cpu(cpu, &p->num_children_in_sync);
+ for (j = 0; j < p->nlevels; j++)
+ cpumask_copy(
+ &p->levels[j].num_cpu_votes,
+ &p->num_children_in_sync);
+ spin_unlock(&p->sync_lock);
+ p = p->parent;
+ }
+ }
+ ret = cpuidle_register_cpu(lpm_cpu->drv,
+ &lpm_cpu->related_cpus);
+
+ if (ret) {
+ kfree(lpm_cpu->drv);
+ return -ENOMEM;
+ }
}
return 0;
}
@@ -1608,7 +1548,7 @@
level_name[i] = cpu->levels[i].name;
lpm_stats_config_level("cpu", level_name, cpu->nlevels,
- parent->stats, &parent->child_cpus);
+ parent->stats, &cpu->related_cpus);
kfree(level_name);
}
@@ -1617,8 +1557,9 @@
struct lpm_cluster *parent)
{
const char **level_name;
- int i;
struct lpm_cluster *child;
+ struct lpm_cpu *cpu;
+ int i;
if (!cl)
return;
@@ -1636,10 +1577,12 @@
kfree(level_name);
- if (cl->cpu) {
- register_cpu_lpm_stats(cl->cpu, cl);
- return;
+ list_for_each_entry(cpu, &cl->cpu, list) {
+ pr_err("%s()\n", __func__);
+ register_cpu_lpm_stats(cpu, cl);
}
+ if (!list_empty(&cl->cpu))
+ return;
list_for_each_entry(child, &cl->child, list)
register_cluster_lpm_stats(child, cl);
@@ -1662,8 +1605,8 @@
static int lpm_suspend_enter(suspend_state_t state)
{
int cpu = raw_smp_processor_id();
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
- struct lpm_cpu *lpm_cpu = cluster->cpu;
+ struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
+ struct lpm_cluster *cluster = lpm_cpu->parent;
const struct cpumask *cpumask = get_cpu_mask(cpu);
int idx;
@@ -1676,7 +1619,7 @@
pr_err("Failed suspend\n");
return 0;
}
- cpu_prepare(cluster, idx, false);
+ cpu_prepare(lpm_cpu, idx, false);
cluster_prepare(cluster, cpumask, idx, false, 0);
if (idx > 0)
update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
@@ -1689,15 +1632,14 @@
* LPMs(XO and Vmin).
*/
- WARN_ON(!use_psci);
- psci_enter_sleep(cluster, idx, true);
+ psci_enter_sleep(lpm_cpu, idx, true);
if (idx > 0)
update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
false);
cluster_unprepare(cluster, cpumask, idx, false, 0);
- cpu_unprepare(cluster, idx, false);
+ cpu_unprepare(lpm_cpu, idx, false);
return 0;
}
@@ -1737,14 +1679,6 @@
hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cluster_timer_init(lpm_root_node);
- ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
- if (ret) {
- pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
- __func__, ret);
- put_online_cpus();
- return ret;
- }
-
size = num_dbg_elements * sizeof(struct lpm_debug);
lpm_debug = dma_alloc_coherent(&pdev->dev, size,
&lpm_debug_phys, GFP_KERNEL);
@@ -1813,54 +1747,3 @@
return rc;
}
late_initcall(lpm_levels_module_init);
-
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
-{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
- enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
-
- /*
- * No need to acquire the lock if probe isn't completed yet
- * In the event of the hotplug happening before lpm probe, we want to
- * flush the cache to make sure that L2 is flushed. In particular, this
- * could cause incoherencies for a cluster architecture. This wouldn't
- * affect the idle case as the idle driver wouldn't be registered
- * before the probe function
- */
- if (!cluster)
- return MSM_SCM_L2_OFF;
-
- /*
- * Assumes L2 only. What/How parameters gets passed into TZ will
- * determine how this function reports this info back in msm-pm.c
- */
- spin_lock(&cluster->sync_lock);
-
- if (!cluster->lpm_dev) {
- retflag = MSM_SCM_L2_OFF;
- goto unlock_and_return;
- }
-
- if (!cpumask_equal(&cluster->num_children_in_sync,
- &cluster->child_cpus))
- goto unlock_and_return;
-
- if (cluster->lpm_dev)
- retflag = cluster->lpm_dev->tz_flag;
- /*
- * The scm_handoff_lock will be release by the secure monitor.
- * It is used to serialize power-collapses from this point on,
- * so that both Linux and the secure context have a consistent
- * view regarding the number of running cpus (cpu_count).
- *
- * It must be acquired before releasing the cluster lock.
- */
-unlock_and_return:
- update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
- 0xdeadbeef);
- trace_pre_pc_cb(retflag);
- remote_spin_lock_rlock_id(&scm_handoff_lock,
- REMOTE_SPINLOCK_TID_START + cpu);
- spin_unlock(&cluster->sync_lock);
- return retflag;
-}
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 3d35ae9..c9f272e 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -17,8 +17,6 @@
#define MAXSAMPLES 5
#define CLUST_SMPL_INVLD_TIME 40000
-extern bool use_psci;
-
struct lpm_lookup_table {
uint32_t modes;
const char *mode_name;
@@ -47,10 +45,13 @@
};
struct lpm_cpu {
+ struct list_head list;
+ struct cpumask related_cpus;
struct lpm_cpu_level levels[NR_LPM_LEVELS];
int nlevels;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
+ struct cpuidle_driver *drv;
struct lpm_cluster *parent;
};
@@ -74,21 +75,13 @@
struct cpumask num_cpu_votes;
struct power_params pwr;
bool notify_rpm;
- bool disable_dynamic_routing;
bool sync_level;
- bool last_core_only;
struct lpm_level_avail available;
unsigned int psci_id;
bool is_reset;
int reset_level;
};
-struct low_power_ops {
- struct msm_spm_device *spm;
- int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
- enum msm_pm_l2_scm_flag tz_flag;
-};
-
struct cluster_history {
uint32_t resi[MAXSAMPLES];
int mode[MAXSAMPLES];
@@ -108,16 +101,13 @@
const char *cluster_name;
const char **name;
unsigned long aff_level; /* Affinity level of the node */
- struct low_power_ops *lpm_dev;
int ndevices;
struct lpm_cluster_level levels[NR_LPM_LEVELS];
int nlevels;
- enum msm_pm_l2_scm_flag l2_flag;
int min_child_level;
int default_level;
int last_level;
- struct lpm_cpu *cpu;
- struct cpuidle_driver *drv;
+ struct list_head cpu;
spinlock_t sync_lock;
struct cpumask child_cpus;
struct cpumask num_children_in_sync;
@@ -125,14 +115,10 @@
struct lpm_stats *stats;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
- bool no_saw_devices;
struct cluster_history history;
struct hrtimer histtimer;
};
-int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
void lpm_suspend_wake_time(uint64_t wakeup_time);
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 97e3479..6fcf25f 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1000,7 +1000,9 @@
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c310318..0d743c6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2014,10 +2014,10 @@
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -2037,6 +2037,14 @@
#endif
ablkcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -2047,10 +2055,10 @@
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -2069,6 +2077,14 @@
#endif
ablkcipher_unmap(jrdev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ ivsize, 0);
+
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -2601,8 +2617,7 @@
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, dst_nents = 0, sec4_sg_bytes;
struct ablkcipher_edesc *edesc;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 2474f14..631337c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -491,7 +491,7 @@
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4f..3ce1d5c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -103,7 +103,7 @@
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c
index 0ca28be..d61b6f3 100644
--- a/drivers/crypto/msm/compat_qcedev.c
+++ b/drivers/crypto/msm/compat_qcedev.c
@@ -96,7 +96,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &vbuf32->src[i].vaddr);
- vbuf->src[i].vaddr = NULL;
err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
err |= get_user(len, &vbuf32->src[i].len);
err |= put_user(len, &vbuf->src[i].len);
@@ -104,7 +103,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
- vbuf->dst[i].vaddr = NULL;
err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
err |= get_user(len, &vbuf32->dst[i].len);
err |= put_user(len, &vbuf->dst[i].len);
@@ -122,7 +120,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
- vbuf32->src[i].vaddr = 0;
err |= put_user(vaddr, &vbuf32->src[i].vaddr);
err |= get_user(len, &vbuf->src[i].len);
err |= put_user(len, &vbuf32->src[i].len);
@@ -130,7 +127,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
- vbuf32->dst[i].vaddr = 0;
err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
err |= get_user(len, &vbuf->dst[i].len);
err |= put_user(len, &vbuf32->dst[i].len);
@@ -275,7 +271,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &data32->data[i].vaddr);
- data->data[i].vaddr = 0;
err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
err |= get_user(len, &data32->data[i].len);
err |= put_user(len, &data->data[i].len);
@@ -294,7 +289,6 @@
err |= get_user(diglen, &data32->diglen);
err |= put_user(diglen, &data->diglen);
err |= get_user(authkey, &data32->authkey);
- data->authkey = NULL;
err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
err |= get_user(authklen, &data32->authklen);
err |= put_user(authklen, &data->authklen);
@@ -321,7 +315,6 @@
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
- data32->data[i].vaddr = 0;
err |= put_user(vaddr, &data32->data[i].vaddr);
err |= get_user(len, &data->data[i].len);
err |= put_user(len, &data32->data[i].len);
@@ -340,7 +333,6 @@
err |= get_user(diglen, &data->diglen);
err |= put_user(diglen, &data32->diglen);
err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
- data32->authkey = 0;
err |= put_user(authkey, &data32->authkey);
err |= get_user(authklen, &data->authklen);
err |= put_user(authklen, &data32->authklen);
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index b411726..6ed82ef 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -459,7 +459,7 @@
(ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
if ((reg & 0x80000000) != 0x0) {
- pr_err("%s: Bypass failed for ice = %p",
+ pr_err("%s: Bypass failed for ice = %pK",
__func__, (void *)ice_dev);
WARN_ON(1);
}
@@ -485,7 +485,7 @@
}
ice_dev->ice_hw_version = rev;
- dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+ dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n",
maj_rev, min_rev, step_rev,
ice_dev->mmio);
@@ -1275,7 +1275,7 @@
goto out;
}
- pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+ pr_err("%s: =========== REGISTER DUMP (%pK)===========\n",
ice_dev->ice_instance_type, ice_dev);
pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
@@ -1589,7 +1589,7 @@
struct ice_device *ice_dev = NULL;
if (!node) {
- pr_err("%s: invalid node %p", __func__, node);
+ pr_err("%s: invalid node %pK", __func__, node);
goto out;
}
@@ -1606,13 +1606,14 @@
list_for_each_entry(ice_dev, &ice_devices, list) {
if (ice_dev->pdev->of_node == node) {
- pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ pr_info("%s: found ice device %pK\n", __func__,
+ ice_dev);
break;
}
}
ice_pdev = to_platform_device(ice_dev->pdev);
- pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+ pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
out:
return ice_pdev;
}
@@ -1650,7 +1651,7 @@
}
ret = regulator_enable(ice_dev->reg);
if (ret) {
- pr_err("%s:%p: Could not enable regulator\n",
+ pr_err("%s:%pK: Could not enable regulator\n",
__func__, ice_dev);
goto out;
}
@@ -1658,7 +1659,7 @@
/* Setup Clocks */
if (qcom_ice_enable_clocks(ice_dev, true)) {
- pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+ pr_err("%s:%pK:%s Could not enable clocks\n", __func__,
ice_dev, ice_dev->ice_instance_type);
goto out_reg;
}
@@ -1670,7 +1671,7 @@
ret = qcom_ice_set_bus_vote(ice_dev, vote);
if (ret) {
- pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
goto out_clocks;
}
@@ -1702,19 +1703,19 @@
/* Setup Bus Vote */
vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
if (vote < 0) {
- pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+ pr_err("%s:%pK: Unable to get bus vote\n", __func__, ice_dev);
goto out_disable_clocks;
}
ret = qcom_ice_set_bus_vote(ice_dev, vote);
if (ret)
- pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret);
out_disable_clocks:
/* Setup Clocks */
if (qcom_ice_enable_clocks(ice_dev, false))
- pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+ pr_err("%s:%pK:%s Could not disable clocks\n", __func__,
ice_dev, ice_dev->ice_instance_type);
/* Setup Regulator */
@@ -1725,7 +1726,7 @@
}
ret = regulator_disable(ice_dev->reg);
if (ret) {
- pr_err("%s:%p: Could not disable regulator\n",
+ pr_err("%s:%pK: Could not disable regulator\n",
__func__, ice_dev);
goto out;
}
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
index 3a2a51d..d477815 100644
--- a/drivers/crypto/msm/ota_crypto.c
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -172,7 +172,7 @@
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
@@ -440,7 +440,7 @@
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index d9ebe113..35d7542 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -33,12 +33,17 @@
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <soc/qcom/socinfo.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
#include "qce.h"
#include "qce50.h"
#include "qcryptohw_50.h"
#include "qce_ota.h"
+#define CRYPTO_SMMU_IOVA_START 0x10000000
+#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
+
#define CRYPTO_CONFIG_RESET 0xE01EF
#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
#define QCE_MAX_NUM_DSCR 0x200
@@ -156,6 +161,8 @@
atomic_t last_intr_seq;
bool cadence_flag;
uint8_t *dummyreq_in_buf;
+ struct dma_iommu_mapping *smmu_mapping;
+ bool bypass_s1_smmu;
};
static void print_notify_debug(struct sps_event_notify *notify);
@@ -1157,7 +1164,7 @@
#define QCE_WRITE_REG(val, addr) \
{ \
- pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \
+ pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
writel_relaxed(val, addr); \
}
@@ -2153,6 +2160,10 @@
pce_sps_data = &preq_info->ce_sps;
qce_callback = preq_info->qce_cb;
areq = (struct ahash_request *) preq_info->areq;
+ if (!areq) {
+ pr_err("sha operation error. areq is NULL\n");
+ return -ENXIO;
+ }
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
DMA_TO_DEVICE);
memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
@@ -2728,7 +2739,7 @@
sps_event->callback = NULL;
}
- pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+ pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
goto out;
@@ -2890,7 +2901,7 @@
bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
bam.options |= SPS_BAM_CACHED_WP;
pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
- pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+ pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
/* Register CE Peripheral BAM device to SPS driver */
rc = sps_register_bam_device(&bam, &pbam->handle);
@@ -2965,7 +2976,7 @@
request_index++;
if (request_index >= MAX_QCE_BAM_REQ)
request_index = 0;
- if (xchg(&pce_dev->ce_request_info[request_index].
+ if (atomic_xchg(&pce_dev->ce_request_info[request_index].
in_use, true) == false) {
pce_dev->ce_request_index = request_index;
return request_index;
@@ -2981,7 +2992,8 @@
bool is_complete)
{
pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
- if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+ false) == true) {
if (req_info < MAX_QCE_BAM_REQ && is_complete)
atomic_dec(&pce_dev->no_of_queued_req);
} else
@@ -2993,7 +3005,7 @@
phys_addr_t addr =
DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
notify->data.transfer.iovec.addr);
- pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+ pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
notify->event_id, &addr,
notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags,
@@ -4605,7 +4617,7 @@
{
int ret = 0;
- if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
in_use, true) == false))
return -EBUSY;
ret = qce_process_sha_req(pce_dev, NULL);
@@ -5703,6 +5715,10 @@
pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
}
+
+ if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
+ pce_dev->bypass_s1_smmu = true;
+
pce_dev->ce_bam_info.dest_pipe_index =
2 * pce_dev->ce_bam_info.pipe_pair_index;
pce_dev->ce_bam_info.src_pipe_index =
@@ -5936,6 +5952,48 @@
return 0;
}
+static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
+{
+ if (pce_dev->smmu_mapping)
+ arm_iommu_release_mapping(pce_dev->smmu_mapping);
+
+ pce_dev->smmu_mapping = NULL;
+}
+
+static int qce_smmu_init(struct qce_device *pce_dev)
+{
+ struct dma_iommu_mapping *mapping;
+ int s1_bypass = 1;
+ int ret = 0;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ pr_err("Create mapping failed, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret < 0) {
+ pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
+ goto ext_fail_set_attr;
+ }
+
+ ret = arm_iommu_attach_device(pce_dev->pdev, mapping);
+ if (ret < 0) {
+ pr_err("Attach device failed, err = %d\n", ret);
+ goto ext_fail_set_attr;
+ }
+ pce_dev->smmu_mapping = mapping;
+ return ret;
+
+ext_fail_set_attr:
+ qce_iommu_release_iomapping(pce_dev);
+ return ret;
+}
+
/* crypto engine open function. */
void *qce_open(struct platform_device *pdev, int *rc)
{
@@ -5963,7 +6021,7 @@
}
for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
- pce_dev->ce_request_info[i].in_use = false;
+ atomic_set(&pce_dev->ce_request_info[i].in_use, false);
pce_dev->ce_request_index = 0;
pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
@@ -5993,6 +6051,13 @@
if (*rc)
goto err_enable_clk;
+ if (pce_dev->bypass_s1_smmu) {
+ if (qce_smmu_init(pce_dev)) {
+ *rc = -EIO;
+ goto err_smmu;
+ }
+ }
+
if (_probe_ce_engine(pce_dev)) {
*rc = -ENXIO;
goto err;
@@ -6019,6 +6084,9 @@
mutex_unlock(&qce_iomap_mutex);
return pce_dev;
err:
+ if (pce_dev->bypass_s1_smmu)
+ qce_iommu_release_iomapping(pce_dev);
+err_smmu:
qce_disable_clk(pce_dev);
err_enable_clk:
@@ -6060,6 +6128,9 @@
kfree(pce_dev->dummyreq_in_buf);
kfree(pce_dev->iovec_vmem);
+ if (pce_dev->bypass_s1_smmu)
+ qce_iommu_release_iomapping(pce_dev);
+
qce_disable_clk(pce_dev);
__qce_deinit_clk(pce_dev);
mutex_unlock(&qce_iomap_mutex);
@@ -6128,12 +6199,13 @@
void qce_dump_req(void *handle)
{
int i;
+ bool req_in_use;
struct qce_device *pce_dev = (struct qce_device *)handle;
for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
- pr_info("qce_dump_req %d %d\n", i,
- pce_dev->ce_request_info[i].in_use);
- if (pce_dev->ce_request_info[i].in_use == true)
+ req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+ pr_info("qce_dump_req %d %d\n", i, req_in_use);
+ if (req_in_use == true)
_qce_dump_descr_fifos(pce_dev, i);
}
}
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index 0e60bd2..ab0d21d 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -214,7 +214,7 @@
};
struct ce_request_info {
- bool in_use;
+ atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 5d6e0c2..9f126b3 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -57,6 +57,7 @@
static DEFINE_MUTEX(send_cmd_lock);
static DEFINE_MUTEX(qcedev_sent_bw_req);
+static DEFINE_MUTEX(hash_access_lock);
static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
{
@@ -269,7 +270,7 @@
handle = file->private_data;
podev = handle->cntl;
if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
kzfree(handle);
@@ -1657,7 +1658,7 @@
podev = handle->cntl;
qcedev_areq.handle = handle;
if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
- pr_err("%s: invalid handle %p\n",
+ pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
@@ -1699,12 +1700,18 @@
(void __user *)arg,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
+ }
qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
+ mutex_unlock(&hash_access_lock);
if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
@@ -1722,32 +1729,42 @@
(void __user *)arg,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
+ }
qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
} else {
if (handle->sha_ctxt.init_done == false) {
pr_err("%s Init was not called\n", __func__);
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
}
err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
}
if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
pr_err("Invalid sha_ctxt.diglen %d\n",
handle->sha_ctxt.diglen);
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
}
memcpy(&qcedev_areq.sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
+ mutex_unlock(&hash_access_lock);
if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
@@ -1764,16 +1781,22 @@
(void __user *)arg,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
+ }
qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
err = qcedev_hash_final(&qcedev_areq, handle);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
memcpy(&qcedev_areq.sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
+ mutex_unlock(&hash_access_lock);
if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
@@ -1788,20 +1811,28 @@
(void __user *)arg,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
- if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+ mutex_lock(&hash_access_lock);
+ if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) {
+ mutex_unlock(&hash_access_lock);
return -EINVAL;
+ }
qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
qcedev_hash_init(&qcedev_areq, handle, &sg_src);
err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
err = qcedev_hash_final(&qcedev_areq, handle);
- if (err)
+ if (err) {
+ mutex_unlock(&hash_access_lock);
return err;
+ }
qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
memcpy(&qcedev_areq.sha_op_req.digest[0],
&handle->sha_ctxt.digest[0],
handle->sha_ctxt.diglen);
+ mutex_unlock(&hash_access_lock);
if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
sizeof(struct qcedev_sha_op_req)))
return -EFAULT;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index b979fb9..b3269a6 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -265,7 +265,7 @@
preq->arsp = NULL;
/* free req */
if (xchg(&preq->in_use, false) == false)
- pr_warn("request info %p free already\n", preq);
+ pr_warn("request info %pK free already\n", preq);
else
atomic_dec(&pce->req_count);
}
@@ -1759,7 +1759,7 @@
}
#ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n",
areq, ret);
#endif
if (digest) {
@@ -1818,7 +1818,7 @@
}
#ifdef QCRYPTO_DEBUG
- dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+ dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n",
areq, ret);
#endif
if (iv)
@@ -2520,7 +2520,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2550,7 +2550,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2580,7 +2580,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2768,7 +2768,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2798,7 +2798,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -2828,7 +2828,7 @@
WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
CRYPTO_ALG_TYPE_ABLKCIPHER);
#ifdef QCRYPTO_DEBUG
- dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+ dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req);
#endif
if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
@@ -3394,7 +3394,7 @@
#ifdef QCRYPTO_DEBUG
dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+ "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req);
#endif
rctx = aead_request_ctx(req);
@@ -3425,7 +3425,7 @@
#ifdef QCRYPTO_DEBUG
dev_info(&ctx->pengine->pdev->dev,
- "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+ "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req);
#endif
rctx = aead_request_ctx(req);
rctx->aead = 1;
@@ -4011,7 +4011,7 @@
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
-
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
if (len <= SHA1_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
@@ -4019,16 +4019,19 @@
} else {
sha_ctx->alg = QCE_HASH_SHA1;
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA1 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
if (len <= SHA256_BLOCK_SIZE) {
@@ -4037,11 +4040,13 @@
} else {
sha_ctx->alg = QCE_HASH_SHA256;
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA256 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha_hmac_init_ihash(struct ahash_request *req,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f..571de2f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -816,7 +816,7 @@
* HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
*/
#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
-#define TALITOS_MAX_KEY_SIZE 96
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -1495,6 +1495,11 @@
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ if (keylen > TALITOS_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index e8bfff2..3c50c4e 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -236,7 +236,7 @@
{
unsigned int tz_ca_data[2];
struct scm_desc desc = {0};
- unsigned int *tz_buf;
+ u8 *tz_buf;
int ret;
/* Set data for TZ */
@@ -281,7 +281,7 @@
scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
struct scm_desc desc = {0};
- unsigned int *tz_buf;
+ u8 *tz_buf;
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc59..6204cc3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@
*/
/* have we filled in period_length yet? */
- if (*total_len + control_block->length < period_len)
+ if (*total_len + control_block->length < period_len) {
+ /* update number of bytes in this period so far */
+ *total_len += control_block->length;
return;
+ }
/* calculate the length that remains to reach period_length */
control_block->length = period_len - *total_len;
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 6e6f28f..3cc035c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2564,7 +2564,7 @@
static int gpi_smmu_init(struct gpi_dev *gpi_dev)
{
- u64 size = U64_MAX;
+ u64 size = PAGE_SIZE;
dma_addr_t base = 0x0;
struct dma_iommu_mapping *map;
int attr, ret;
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 4ac880b..5ca93a6 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
@@ -62,7 +63,7 @@
static inline void set_errxmisc_overflow(void)
{
- u64 val = 0x7F7F00000000;
+ u64 val = 0x7F7F00000000ULL;
asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
}
@@ -118,12 +119,14 @@
#define DATA_BUF_ERR 0x2
#define CACHE_DATA_ERR 0x6
#define CACHE_TAG_DIRTY_ERR 0x7
-#define TLB_PARITY_ERR 0x8
-#define BUS_ERROR 0x18
+#define TLB_PARITY_ERR_DATA 0x8
+#define TLB_PARITY_ERR_TAG 0x9
+#define BUS_ERROR 0x12
struct erp_drvdata {
struct edac_device_ctl_info *edev_ctl;
struct erp_drvdata __percpu **erp_cpu_drvdata;
+ struct notifier_block nb_pm;
int ppi;
};
@@ -217,10 +220,13 @@
edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n");
break;
- case TLB_PARITY_ERR:
+ case TLB_PARITY_ERR_DATA:
edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB RAM\n");
break;
+ case TLB_PARITY_ERR_TAG:
+ edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB DATA\n");
+
case BUS_ERROR:
edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
break;
@@ -283,6 +289,16 @@
spin_unlock_irqrestore(&local_handler_lock, flags);
}
+static bool l3_is_bus_error(u64 errxstatus)
+{
+ if (KRYO3XX_ERRXSTATUS_SERR(errxstatus) == BUS_ERROR) {
+ edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
+ return true;
+ }
+
+ return false;
+}
+
static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl)
{
u64 errxstatus = 0;
@@ -296,6 +312,11 @@
if (KRYO3XX_ERRXSTATUS_VALID(errxstatus) &&
KRYO3XX_ERRXMISC_LVL(errxmisc) == L3) {
+ if (l3_is_bus_error(errxstatus)) {
+ if (edev_ctl->panic_on_ue)
+ panic("Causing panic due to Bus Error\n");
+ return;
+ }
if (KRYO3XX_ERRXSTATUS_UE(errxstatus)) {
edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n");
dump_err_reg(KRYO3XX_L3_UE, L3, errxstatus, errxmisc,
@@ -339,6 +360,19 @@
return IRQ_HANDLED;
}
+static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *v)
+{
+ switch (action) {
+ case CPU_PM_EXIT:
+ kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
+ kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static void initialize_registers(void *info)
{
set_errxctlr_el1();
@@ -381,6 +415,7 @@
drv->edev_ctl->ctl_name = "cache";
drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+ drv->nb_pm.notifier_call = kryo3xx_pmu_cpu_pm_notify;
platform_set_drvdata(pdev, drv);
rc = edac_device_add_device(drv->edev_ctl);
@@ -405,6 +440,8 @@
goto out_dev;
}
+ cpu_pm_register_notifier(&(drv->nb_pm));
+
return 0;
out_dev:
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index a8ec359..4b89cbf 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -291,7 +291,7 @@
qcom_llcc_clear_errors(err_type, drv);
- errors[err_type].func(edev_ctl, 0, 0, errors[err_type].msg);
+ errors[err_type].func(edev_ctl, 0, bank, errors[err_type].msg);
}
static void qcom_llcc_check_cache_errors
@@ -353,10 +353,26 @@
struct erp_drvdata *drv;
struct edac_device_ctl_info *edev_ctl;
struct device *dev = &pdev->dev;
+ u32 num_banks;
+ struct regmap *llcc_map = NULL;
+
+ llcc_map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(llcc_map)) {
+ dev_err(dev, "no regmap for syscon llcc parent\n");
+ return -ENOMEM;
+ }
+
+ /* Find the number of LLC banks supported */
+ regmap_read(llcc_map, LLCC_COMMON_STATUS0,
+ &num_banks);
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
- NULL, 0, 1, NULL, 0, edac_device_alloc_index());
+ "bank", num_banks, 1, NULL, 0,
+ edac_device_alloc_index());
if (!edev_ctl)
return -ENOMEM;
@@ -374,64 +390,59 @@
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
drv = edev_ctl->pvt_info;
+ drv->num_banks = num_banks;
+ drv->llcc_map = llcc_map;
- drv->llcc_map = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(drv->llcc_map)) {
- dev_err(dev, "no regmap for syscon llcc parent\n");
- rc = -ENOMEM;
- goto out;
- }
+ rc = edac_device_add_device(edev_ctl);
+ if (rc)
+ goto out_mem;
if (interrupt_mode) {
drv->ecc_irq = platform_get_irq_byname(pdev, "ecc_irq");
if (!drv->ecc_irq) {
rc = -ENODEV;
- goto out;
+ goto out_dev;
}
rc = devm_request_irq(dev, drv->ecc_irq, llcc_ecc_irq_handler,
IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
if (rc) {
dev_err(dev, "failed to request ecc irq\n");
- goto out;
+ goto out_dev;
}
}
- /* Find the number of LLC banks supported */
- regmap_read(drv->llcc_map, LLCC_COMMON_STATUS0,
- &drv->num_banks);
-
- drv->num_banks &= LLCC_LB_CNT_MASK;
- drv->num_banks >>= LLCC_LB_CNT_SHIFT;
-
drv->llcc_banks = devm_kzalloc(&pdev->dev,
sizeof(u32) * drv->num_banks, GFP_KERNEL);
if (!drv->llcc_banks) {
dev_err(dev, "Cannot allocate memory for llcc_banks\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_dev;
}
rc = of_property_read_u32_array(dev->parent->of_node,
"qcom,llcc-banks-off", drv->llcc_banks, drv->num_banks);
if (rc) {
dev_err(dev, "Cannot read llcc-banks-off property\n");
- return -EINVAL;
+ goto out_dev;
}
rc = of_property_read_u32(dev->parent->of_node,
"qcom,llcc-broadcast-off", &drv->b_off);
if (rc) {
dev_err(dev, "Cannot read llcc-broadcast-off property\n");
- return -EINVAL;
+ goto out_dev;
}
platform_set_drvdata(pdev, edev_ctl);
- rc = edac_device_add_device(edev_ctl);
-out:
- if (rc)
- edac_device_free_ctl_info(edev_ctl);
+ return 0;
+
+out_dev:
+ edac_device_del_device(edev_ctl->dev);
+out_mem:
+ edac_device_free_ctl_info(edev_ctl);
return rc;
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 7c1e3a7..0e1d428 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -482,6 +482,21 @@
}
EXPORT_SYMBOL_GPL(extcon_sync);
+int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, bool val)
+{
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ index = find_cable_index_by_id(edev, id);
+ if (index < 0)
+ return index;
+
+ return blocking_notifier_call_chain(&edev->bnh[index], val, edev);
+}
+EXPORT_SYMBOL(extcon_blocking_sync);
+
/**
* extcon_get_state() - Get the state of a external connector.
* @edev: the extcon device that has the cable.
@@ -940,6 +955,38 @@
}
EXPORT_SYMBOL_GPL(extcon_register_notifier);
+int extcon_register_blocking_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
+{
+ int idx = -EINVAL;
+
+ if (!edev || !nb)
+ return -EINVAL;
+
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
+
+ return blocking_notifier_chain_register(&edev->bnh[idx], nb);
+}
+EXPORT_SYMBOL(extcon_register_blocking_notifier);
+
+int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
+{
+ int idx;
+
+ if (!edev || !nb)
+ return -EINVAL;
+
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
+
+ return blocking_notifier_chain_unregister(&edev->bnh[idx], nb);
+}
+EXPORT_SYMBOL(extcon_unregister_blocking_notifier);
+
/**
* extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
* @edev: the extcon device that has the external connecotr.
@@ -1222,6 +1269,13 @@
goto err_dev;
}
+ edev->bnh = devm_kzalloc(&edev->dev,
+ sizeof(*edev->bnh) * edev->max_supported, GFP_KERNEL);
+ if (!edev->bnh) {
+ ret = -ENOMEM;
+ goto err_dev;
+ }
+
for (index = 0; index < edev->max_supported; index++)
RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 5e23e2d..f9a1e98 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -17,6 +17,7 @@
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
+ -D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 1b51d08..9c1c81b 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -477,10 +477,10 @@
static int _disp_tz_interrupt_stats(void)
{
- int i, j, int_info_size;
+ int i, j;
int len = 0;
int *num_int;
- unsigned char *ptr;
+ void *ptr;
struct tzdbg_int_t *tzdbg_ptr;
struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
@@ -488,14 +488,12 @@
(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
ptr = ((unsigned char *)tzdbg.diag_buf +
tzdbg.diag_buf->int_info_off);
- int_info_size = ((tzdbg.diag_buf->ring_off -
- tzdbg.diag_buf->int_info_off)/(*num_int));
pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+ tzdbg_ptr = ptr;
for (i = 0; i < (*num_int); i++) {
- tzdbg_ptr = (struct tzdbg_int_t *)ptr;
len += snprintf(tzdbg.disp_buf + len,
(debug_rw_buf_size - 1) - len,
" Interrupt Number : 0x%x\n"
@@ -519,11 +517,11 @@
__func__);
break;
}
- ptr += int_info_size;
+ tzdbg_ptr++;
}
} else {
+ tzdbg_ptr_tz40 = ptr;
for (i = 0; i < (*num_int); i++) {
- tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr;
len += snprintf(tzdbg.disp_buf + len,
(debug_rw_buf_size - 1) - len,
" Interrupt Number : 0x%x\n"
@@ -547,7 +545,7 @@
__func__);
break;
}
- ptr += int_info_size;
+ tzdbg_ptr_tz40++;
}
}
@@ -962,7 +960,7 @@
for (i = 0; i < TZDBG_STATS_MAX; i++) {
tzdbg.debug_tz[i] = i;
- dent = debugfs_create_file(tzdbg.stat[i].name,
+ dent = debugfs_create_file_unsafe(tzdbg.stat[i].name,
0444, dent_dir,
&tzdbg.debug_tz[i], &tzdbg_fops);
if (dent == NULL) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9215931..f2bb512 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -707,7 +707,8 @@
ge.timestamp = ktime_get_real_ns();
- if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
+ if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
+ && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
int level = gpiod_get_value_cansleep(le->desc);
if (level)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 8e6bf54..f8fdbd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82dc8d2..bfb4b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691..264899d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1419,6 +1419,9 @@
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
while (size) {
unsigned long flags;
uint32_t value;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index f7d236f..57fbde1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -164,7 +164,7 @@
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -177,7 +177,7 @@
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 42448c7..db9b79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1290,8 +1290,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 904dabd..36d5128 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1257,8 +1257,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 6d02bdb..75689a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1054,8 +1054,11 @@
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index b1fb601..ba2321e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1211,8 +1211,11 @@
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6f3c891..4cb347e 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -64,6 +64,7 @@
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
union power_info {
struct _ATOM_POWERPLAY_INFO info;
@@ -7721,10 +7722,11 @@
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))) ||
- ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)))
+ (adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
+ else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665))
+ chip_name = "banks_k_2";
else
chip_name = "hainan";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6feed72..50f0cf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
+
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+ | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -249,7 +256,7 @@
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
@@ -273,7 +280,7 @@
}
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -288,7 +295,7 @@
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -586,17 +593,17 @@
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
@@ -734,7 +741,7 @@
if (adev->vce.harvest_config & (1 << i))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 2028980..5b261c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 9604249..189ec94 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*EPR# 419220 -HW limitation to to */
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
-
+ /*Program HardMin based on the vce_arbiter.ecclk */
+ if (hwmgr->vce_arbiter.ecclk == 0) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
+ /* disable ECLK DPM 0. Otherwise VCE could hang if
+ * switching SCLK from DPM 0 to 6/7 */
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkSoftMin, 1);
+ } else {
+ cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d..3bedcf7 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,11 @@
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ enum {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+ } config_mode;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c642..fb99762 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -62,13 +62,84 @@
return ret;
}
+static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+{
+ struct device_node *np = dev->pdev->dev.of_node;
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, jregd0, jregd1;
+
+ /* Defaults */
+ ast->config_mode = ast_use_defaults;
+ *scu_rev = 0xffffffff;
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
+ scu_rev)) {
+ /* We do, disable P2A access */
+ ast->config_mode = ast_use_dt;
+ DRM_INFO("Using device-tree for configuration\n");
+ return;
+ }
+
+ /* Not all families have a P2A bridge */
+ if (dev->pdev->device != PCI_CHIP_AST2000)
+ return;
+
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+ /* Double check it's actually working */
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF) {
+ /* P2A works, grab silicon revision */
+ ast->config_mode = ast_use_p2a;
+
+ DRM_INFO("Using P2A bridge for configuration\n");
+
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ *scu_rev = ast_read32(ast, 0x1207c);
+ return;
+ }
+ }
+
+ /* We have a P2A bridge but it's disabled */
+ DRM_INFO("P2A bridge disabled, using default configuration\n");
+}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
- uint32_t data, jreg;
+ uint32_t jreg, scu_rev;
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail. We also inform
+ * our caller that it needs to POST the chip
+ * (Assumption: VGA not enabled -> need to POST)
+ */
+ if (!ast_is_vga_enabled(dev)) {
+ ast_enable_vga(dev);
+ DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+ *need_post = true;
+ } else
+ *need_post = false;
+
+
+ /* Enable extended register access */
+ ast_enable_mmio(dev);
ast_open_key(ast);
+ /* Find out whether P2A works or whether to use device-tree */
+ ast_detect_config_mode(dev, &scu_rev);
+
+ /* Identify chipset */
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
@@ -80,12 +151,7 @@
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
- uint32_t data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
- data = ast_read32(ast, 0x1207c);
- switch (data & 0x0300) {
+ switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
DRM_INFO("AST 1100 detected\n");
@@ -110,20 +176,6 @@
}
}
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail. We also inform
- * our caller that it needs to POST the chip
- * (Assumption: VGA not enabled -> need to POST)
- */
- if (!ast_is_vga_enabled(dev)) {
- ast_enable_vga(dev);
- ast_enable_mmio(dev);
- DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
- *need_post = true;
- } else
- *need_post = false;
-
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,14 +192,11 @@
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ if (ast->chip == AST2300 &&
+ (scu_rev & 0x300) == 0x0) /* ast1300 */
ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ if (ast->chip == AST2400 &&
+ (scu_rev & 0x300) == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
break;
@@ -212,29 +261,49 @@
static int ast_get_dram_info(struct drm_device *dev)
{
+ struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
- uint32_t data, data2;
- uint32_t denum, num, div, ref_pll;
+ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
+ uint32_t denum, num, div, ref_pll, dsel;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
+ switch (ast->config_mode) {
+ case ast_use_dt:
+ /*
+ * If some properties are missing, use reasonable
+ * defaults for AST2400
+ */
+ if (of_property_read_u32(np, "aspeed,mcr-configuration",
+ &mcr_cfg))
+ mcr_cfg = 0x00000577;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
+ &mcr_scu_mpll))
+ mcr_scu_mpll = 0x000050C0;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
+ &mcr_scu_strap))
+ mcr_scu_strap = 0;
+ break;
+ case ast_use_p2a:
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+ mcr_scu_mpll = ast_read32(ast, 0x10120);
+ mcr_scu_strap = ast_read32(ast, 0x10170);
+ break;
+ case ast_use_defaults:
+ default:
+ ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ return 0;
+ }
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (mcr_cfg & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
+ switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
@@ -250,13 +319,13 @@
break;
}
} else {
- switch (data & 0x0c) {
+ switch (mcr_cfg & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
- if (data & 0x40)
+ if (mcr_cfg & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
@@ -267,17 +336,15 @@
}
}
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
+ if (mcr_scu_strap & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
+ denum = mcr_scu_mpll & 0x1f;
+ num = (mcr_scu_mpll & 0x3fe0) >> 5;
+ dsel = (mcr_scu_mpll & 0xc000) >> 14;
+ switch (dsel) {
case 3:
div = 0x4;
break;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 30672a3d..c7c58be 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,12 +375,17 @@
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->config_mode == ast_use_p2a) {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ } else {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2db7fb5..0e934a9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
+ mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
@@ -359,6 +360,8 @@
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->mutex);
+
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@
*/
int drm_connector_register(struct drm_connector *connector)
{
- int ret;
+ int ret = 0;
- if (connector->registered)
+ if (!connector->dev->registered)
return 0;
+ mutex_lock(&connector->mutex);
+ if (connector->registered)
+ goto unlock;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
- return ret;
+ goto unlock;
ret = drm_debugfs_connector_add(connector);
if (ret) {
@@ -397,12 +404,14 @@
drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true;
- return 0;
+ goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
err_sysfs:
drm_sysfs_connector_remove(connector);
+unlock:
+ mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@
*/
void drm_connector_unregister(struct drm_connector *connector)
{
- if (!connector->registered)
+ mutex_lock(&connector->mutex);
+ if (!connector->registered) {
+ mutex_unlock(&connector->mutex);
return;
+ }
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@
drm_debugfs_connector_remove(connector);
connector->registered = false;
+ mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f59771d..db7890c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2168,7 +2175,7 @@
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2183,12 +2190,12 @@
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2200,21 +2207,32 @@
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2270,7 +2288,12 @@
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2322,7 +2345,9 @@
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0f2fa90..362b8cd 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -710,6 +710,8 @@
if (ret)
goto err_minors;
+ dev->registered = true;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -749,6 +751,8 @@
drm_lastclose(dev);
+ dev->registered = false;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96..fe0e85b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@
struct list_head list;
bool found;
+ /*
+ * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+ * drm_mm into giving out a low IOVA after address space
+ * rollover. This needs a proper fix.
+ */
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL,
- DRM_MM_SEARCH_DEFAULT);
+ mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
if (ret != -ENOSPC)
break;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ca6efb6..7513e76 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1199,6 +1199,15 @@
goto out_free_priv;
pci_set_drvdata(pdev, &dev_priv->drm);
+ /*
+ * Disable the system suspend direct complete optimization, which can
+ * leave the device suspended skipping the driver's suspend handlers
+ * if the device was already runtime suspended. This is needed due to
+ * the difference in our runtime and system suspend sequence and
+ * becaue the HDA driver may require us to enable the audio power
+ * domain during system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
/*
* notifications from guest to vgpu device model
*/
@@ -55,8 +51,8 @@
struct vgt_if {
u64 magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
+ u16 version_major;
+ u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dae340c..125adcc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- uint64_t magic;
- uint32_t version;
+ u64 magic;
+ u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@@ -69,10 +69,8 @@
if (magic != VGT_MAGIC)
return;
- version = INTEL_VGT_IF_VERSION_ENCODE(
- __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
- __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
- if (version != INTEL_VGT_IF_VERSION) {
+ version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5dc6082..f8efd20 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2253,6 +2253,9 @@
intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
+ if (WARN_ON_ONCE(!vma))
+ return;
+
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
}
@@ -13764,6 +13767,15 @@
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
*/
if (IS_GEN2(dev)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2c6d59d..49de476 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4114,11 +4114,19 @@
struct drm_crtc_state *cstate;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_wm_values *results = &intel_state->wm_results;
+ struct drm_device *dev = state->dev;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
/*
+ * When we distrust bios wm we always need to recompute to set the
+ * expected DDB allocations for each CRTC.
+ */
+ if (to_i915(dev)->wm.distrust_bios_wm)
+ changed = true;
+
+ /*
* If this transaction isn't actually touching any CRTC's, don't
* bother with watermark calculation. Note that if we pass this
* test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4128,6 +4136,7 @@
*/
for_each_crtc_in_state(state, crtc, cstate, i)
changed = true;
+
if (!changed)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index dbed12c..64f4e2e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -81,10 +81,13 @@
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -136,6 +139,24 @@
drm_crtc_vblank_put(&crtc->base);
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0e8c4d9..e097780 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1061,7 +1061,7 @@
}
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err) {
+ if (err < 0) {
dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
err);
return err;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 70b47ca..55c484e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@
select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
+ select HDCP_QSEECOM
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -28,16 +29,28 @@
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
- default y
+ default n
help
- Choose this option to enable HDCP state machine
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_HDMI
+ bool "Enable HDMI support in MSM DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Compile in support for HDMI driver in msm drm
+ driver. HDMI external display support is enabled
+ through this config option. It can be primary or
+ secondary display on device.
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
select DRM_PANEL
select DRM_MIPI_DSI
- default y
+ default n
help
Choose this option if you have a need for MIPI DSI connector
support.
@@ -83,6 +96,17 @@
help
Choose this option if the 28nm DSI PHY 8960 variant is used on the
platform.
+
+config DRM_MSM_MDP5
+ tristate "MSM MDP5 DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Choose this option if MSM MDP5 revision support is
+ needed in DRM/KMS. This is not required if sde/mdp4
+ only target enabled. MDP5 supports DSI and HDMI
+ displays.
+
config DRM_MSM_MDP4
tristate "MSM MDP4 DRM driver"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 9ded825..b625996 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -3,6 +3,7 @@
ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
ccflags-y += -Idrivers/gpu/drm/msm/sde
ccflags-y += -Idrivers/media/platform/msm/sde/rotator
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
msm_drm-y := \
dp/dp_usbpd.o \
@@ -15,32 +16,7 @@
dp/dp_ctrl.o \
dp/dp_display.o \
dp/dp_drm.o \
- hdmi/hdmi.o \
- hdmi/hdmi_audio.o \
- hdmi/hdmi_bridge.o \
- hdmi/hdmi_connector.o \
- hdmi/hdmi_i2c.o \
- hdmi/hdmi_phy.o \
- hdmi/hdmi_phy_8960.o \
- hdmi/hdmi_phy_8x60.o \
- hdmi/hdmi_phy_8x74.o \
- edp/edp.o \
- edp/edp_aux.o \
- edp/edp_bridge.o \
- edp/edp_connector.o \
- edp/edp_ctrl.o \
- edp/edp_phy.o \
- mdp/mdp_format.o \
- mdp/mdp_kms.o \
- mdp/mdp5/mdp5_cfg.o \
- mdp/mdp5/mdp5_ctl.o \
- mdp/mdp5/mdp5_crtc.o \
- mdp/mdp5/mdp5_encoder.o \
- mdp/mdp5/mdp5_irq.o \
- mdp/mdp5/mdp5_mdss.o \
- mdp/mdp5/mdp5_kms.o \
- mdp/mdp5/mdp5_plane.o \
- mdp/mdp5/mdp5_smp.o \
+ dp/dp_hdcp2p2.o \
sde/sde_crtc.o \
sde/sde_encoder.o \
sde/sde_encoder_phys_vid.o \
@@ -61,7 +37,37 @@
sde/sde_hw_reg_dma_v1_color_proc.o \
sde/sde_hw_color_proc_v4.o \
sde/sde_hw_ad4.o \
- sde_edid_parser.o
+ sde_edid_parser.o \
+ sde_hdcp_1x.o
+
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi.o \
+ hdmi/hdmi_audio.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_connector.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8x60.o \
+ hdmi/hdmi_phy_8x74.o \
+
+msm_drm-$(CONFIG_DRM_MSM_EDP) += edp/edp.o \
+ edp/edp_aux.o \
+ edp/edp_bridge.o \
+ edp/edp_connector.o \
+ edp/edp_ctrl.o \
+ edp/edp_phy.o \
+
+msm_drm-$(CONFIG_DRM_MSM_MDP5) += mdp/mdp_format.o \
+ mdp/mdp_kms.o \
+ mdp/mdp5/mdp5_cfg.o \
+ mdp/mdp5/mdp5_ctl.o \
+ mdp/mdp5/mdp5_crtc.o \
+ mdp/mdp5/mdp5_encoder.o \
+ mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_mdss.o \
+ mdp/mdp5/mdp5_kms.o \
+ mdp/mdp5/mdp5_plane.o \
+ mdp/mdp5/mdp5_smp.o \
msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
sde_rsc_hw.o \
@@ -85,9 +91,9 @@
msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm_drm-$(CONFIG_SYNC_FILE) += sde/sde_fence.o
-msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm_drm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_lvds_pll.o
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi_pll_8960.o
+msm_drm-$(CONFIG_DRM_MSM_HDMI) += hdmi/hdmi_phy_8996.o
msm_drm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
@@ -166,6 +172,7 @@
msm_gem_prime.o \
msm_gem_submit.o \
msm_gem_shrinker.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_smmu.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed..156abf0 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -583,7 +583,7 @@
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index d0d3c7b..2dc9412 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -672,7 +672,7 @@
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f386f46..961d47f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -64,7 +64,7 @@
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -381,7 +381,7 @@
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@@ -406,7 +406,7 @@
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -423,8 +423,7 @@
msm_gem_put_vaddr(gpu->memptrs_bo);
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 70581e2..9106027 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -28,11 +28,13 @@
struct device *dev;
struct dp_aux dp_aux;
struct dp_catalog_aux *catalog;
+ struct dp_aux_cfg *cfg;
struct mutex mutex;
struct completion comp;
u32 aux_error_num;
+ u32 retry_cnt;
bool cmd_busy;
bool native;
bool read;
@@ -127,17 +129,18 @@
timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
if (!timeout) {
- pr_err("aux write timeout\n");
+ pr_err("aux %s timeout\n", (aux->read ? "read" : "write"));
return -ETIMEDOUT;
}
- pr_debug("aux status %s\n",
- dp_aux_get_error(aux->aux_error_num));
-
- if (aux->aux_error_num == DP_AUX_ERR_NONE)
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
ret = len;
- else
+ } else {
+ pr_err_ratelimited("aux err: %s\n",
+ dp_aux_get_error(aux->aux_error_num));
+
ret = -EINVAL;
+ }
return ret;
}
@@ -231,6 +234,22 @@
dp_aux_i2c_handler(aux);
}
+static void dp_aux_reconfig(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->catalog->update_aux_cfg(aux->catalog,
+ aux->cfg, PHY_AUX_CFG1);
+ aux->catalog->reset(aux->catalog);
+}
+
/*
* This function does the real job to process an AUX transaction.
* It will call aux_reset() function to reset the AUX channel,
@@ -242,6 +261,7 @@
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
+ int const retry_count = 5;
struct dp_aux_private *aux = container_of(drm_aux,
struct dp_aux_private, drm_aux);
@@ -269,8 +289,14 @@
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
- if (ret < 0) {
- aux->catalog->reset(aux->catalog); /* reset aux */
+ if ((ret < 0) && aux->native) {
+ aux->retry_cnt++;
+ if (!(aux->retry_cnt % retry_count))
+ aux->catalog->update_aux_cfg(aux->catalog,
+ aux->cfg, PHY_AUX_CFG1);
+ aux->catalog->reset(aux->catalog);
+ goto unlock_exit;
+ } else if (ret < 0) {
goto unlock_exit;
}
@@ -288,6 +314,7 @@
/* Return requested size for success or retry */
ret = msg->size;
+ aux->retry_cnt = 0;
unlock_exit:
aux->cmd_busy = false;
@@ -295,11 +322,19 @@
return ret;
}
-static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
+static void dp_aux_reset_phy_config_indices(struct dp_aux_cfg *aux_cfg)
+{
+ int i = 0;
+
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++)
+ aux_cfg[i].current_index = 0;
+}
+
+static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
{
struct dp_aux_private *aux;
- if (!dp_aux) {
+ if (!dp_aux || !aux_cfg) {
pr_err("invalid input\n");
return;
}
@@ -308,6 +343,8 @@
aux->catalog->reset(aux->catalog);
aux->catalog->enable(aux->catalog, true);
+ aux->retry_cnt = 0;
+ dp_aux_reset_phy_config_indices(aux_cfg);
aux->catalog->setup(aux->catalog, aux_cfg);
}
@@ -364,13 +401,14 @@
drm_dp_aux_unregister(&aux->drm_aux);
}
-struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
+struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
+ struct dp_aux_cfg *aux_cfg)
{
int rc = 0;
struct dp_aux_private *aux;
struct dp_aux *dp_aux;
- if (!catalog) {
+ if (!catalog || !aux_cfg) {
pr_err("invalid input\n");
rc = -ENODEV;
goto error;
@@ -388,13 +426,16 @@
aux->dev = dev;
aux->catalog = catalog;
+ aux->cfg = aux_cfg;
dp_aux = &aux->dp_aux;
+ aux->retry_cnt = 0;
dp_aux->isr = dp_aux_isr;
dp_aux->init = dp_aux_init;
dp_aux->deinit = dp_aux_deinit;
dp_aux->drm_aux_register = dp_aux_register;
dp_aux->drm_aux_deregister = dp_aux_deregister;
+ dp_aux->reconfig = dp_aux_reconfig;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index f08c12b..5d96fd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -32,11 +32,13 @@
int (*drm_aux_register)(struct dp_aux *aux);
void (*drm_aux_deregister)(struct dp_aux *aux);
void (*isr)(struct dp_aux *aux);
- void (*init)(struct dp_aux *aux, u32 *aux_cfg);
+ void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);
void (*deinit)(struct dp_aux *aux);
+ void (*reconfig)(struct dp_aux *aux);
};
-struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog);
+struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
+ struct dp_aux_cfg *aux_cfg);
void dp_aux_put(struct dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 9361b52..95a7dc4 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -17,196 +17,7 @@
#include <linux/delay.h>
#include "dp_catalog.h"
-
-/* DP_TX Registers */
-#define DP_HW_VERSION (0x00000000)
-#define DP_SW_RESET (0x00000010)
-#define DP_PHY_CTRL (0x00000014)
-#define DP_CLK_CTRL (0x00000018)
-#define DP_CLK_ACTIVE (0x0000001C)
-#define DP_INTR_STATUS (0x00000020)
-#define DP_INTR_STATUS2 (0x00000024)
-#define DP_INTR_STATUS3 (0x00000028)
-
-#define DP_DP_HPD_CTRL (0x00000200)
-#define DP_DP_HPD_INT_STATUS (0x00000204)
-#define DP_DP_HPD_INT_ACK (0x00000208)
-#define DP_DP_HPD_INT_MASK (0x0000020C)
-#define DP_DP_HPD_REFTIMER (0x00000218)
-#define DP_DP_HPD_EVENT_TIME_0 (0x0000021C)
-#define DP_DP_HPD_EVENT_TIME_1 (0x00000220)
-#define DP_AUX_CTRL (0x00000230)
-#define DP_AUX_DATA (0x00000234)
-#define DP_AUX_TRANS_CTRL (0x00000238)
-#define DP_TIMEOUT_COUNT (0x0000023C)
-#define DP_AUX_LIMITS (0x00000240)
-#define DP_AUX_STATUS (0x00000244)
-
-#define DP_DPCD_CP_IRQ (0x201)
-#define DP_DPCD_RXSTATUS (0x69493)
-
-#define DP_INTERRUPT_TRANS_NUM (0x000002A0)
-
-#define DP_MAINLINK_CTRL (0x00000400)
-#define DP_STATE_CTRL (0x00000404)
-#define DP_CONFIGURATION_CTRL (0x00000408)
-#define DP_SOFTWARE_MVID (0x00000410)
-#define DP_SOFTWARE_NVID (0x00000418)
-#define DP_TOTAL_HOR_VER (0x0000041C)
-#define DP_START_HOR_VER_FROM_SYNC (0x00000420)
-#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424)
-#define DP_ACTIVE_HOR_VER (0x00000428)
-#define DP_MISC1_MISC0 (0x0000042C)
-#define DP_VALID_BOUNDARY (0x00000430)
-#define DP_VALID_BOUNDARY_2 (0x00000434)
-#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING (0x00000438)
-
-#define DP_MAINLINK_READY (0x00000440)
-#define DP_MAINLINK_LEVELS (0x00000444)
-#define DP_TU (0x0000044C)
-
-#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000454)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000004C0)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000004C4)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000004C8)
-
-#define MMSS_DP_MISC1_MISC0 (0x0000042C)
-#define MMSS_DP_AUDIO_TIMING_GEN (0x00000480)
-#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000484)
-#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000488)
-#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000048C)
-#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000490)
-#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000494)
-#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000498)
-
-#define MMSS_DP_PSR_CRC_RG (0x00000554)
-#define MMSS_DP_PSR_CRC_B (0x00000558)
-
-#define MMSS_DP_AUDIO_CFG (0x00000600)
-#define MMSS_DP_AUDIO_STATUS (0x00000604)
-#define MMSS_DP_AUDIO_PKT_CTRL (0x00000608)
-#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000060C)
-#define MMSS_DP_AUDIO_ACR_CTRL (0x00000610)
-#define MMSS_DP_AUDIO_CTRL_RESET (0x00000614)
-
-#define MMSS_DP_SDP_CFG (0x00000628)
-#define MMSS_DP_SDP_CFG2 (0x0000062C)
-#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000630)
-#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000634)
-
-#define MMSS_DP_AUDIO_STREAM_0 (0x00000640)
-#define MMSS_DP_AUDIO_STREAM_1 (0x00000644)
-
-#define MMSS_DP_EXTENSION_0 (0x00000650)
-#define MMSS_DP_EXTENSION_1 (0x00000654)
-#define MMSS_DP_EXTENSION_2 (0x00000658)
-#define MMSS_DP_EXTENSION_3 (0x0000065C)
-#define MMSS_DP_EXTENSION_4 (0x00000660)
-#define MMSS_DP_EXTENSION_5 (0x00000664)
-#define MMSS_DP_EXTENSION_6 (0x00000668)
-#define MMSS_DP_EXTENSION_7 (0x0000066C)
-#define MMSS_DP_EXTENSION_8 (0x00000670)
-#define MMSS_DP_EXTENSION_9 (0x00000674)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000678)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000067C)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000680)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000684)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000688)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000068C)
-#define MMSS_DP_AUDIO_ISRC_0 (0x00000690)
-#define MMSS_DP_AUDIO_ISRC_1 (0x00000694)
-#define MMSS_DP_AUDIO_ISRC_2 (0x00000698)
-#define MMSS_DP_AUDIO_ISRC_3 (0x0000069C)
-#define MMSS_DP_AUDIO_ISRC_4 (0x000006A0)
-#define MMSS_DP_AUDIO_ISRC_5 (0x000006A4)
-#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000006A8)
-#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000006AC)
-#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000006B0)
-
-#define MMSS_DP_GENERIC0_0 (0x00000700)
-#define MMSS_DP_GENERIC0_1 (0x00000704)
-#define MMSS_DP_GENERIC0_2 (0x00000708)
-#define MMSS_DP_GENERIC0_3 (0x0000070C)
-#define MMSS_DP_GENERIC0_4 (0x00000710)
-#define MMSS_DP_GENERIC0_5 (0x00000714)
-#define MMSS_DP_GENERIC0_6 (0x00000718)
-#define MMSS_DP_GENERIC0_7 (0x0000071C)
-#define MMSS_DP_GENERIC0_8 (0x00000720)
-#define MMSS_DP_GENERIC0_9 (0x00000724)
-#define MMSS_DP_GENERIC1_0 (0x00000728)
-#define MMSS_DP_GENERIC1_1 (0x0000072C)
-#define MMSS_DP_GENERIC1_2 (0x00000730)
-#define MMSS_DP_GENERIC1_3 (0x00000734)
-#define MMSS_DP_GENERIC1_4 (0x00000738)
-#define MMSS_DP_GENERIC1_5 (0x0000073C)
-#define MMSS_DP_GENERIC1_6 (0x00000740)
-#define MMSS_DP_GENERIC1_7 (0x00000744)
-#define MMSS_DP_GENERIC1_8 (0x00000748)
-#define MMSS_DP_GENERIC1_9 (0x0000074C)
-
-#define MMSS_DP_TIMING_ENGINE_EN (0x00000A10)
-#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000A88)
-
-/*DP PHY Register offsets */
-#define DP_PHY_REVISION_ID0 (0x00000000)
-#define DP_PHY_REVISION_ID1 (0x00000004)
-#define DP_PHY_REVISION_ID2 (0x00000008)
-#define DP_PHY_REVISION_ID3 (0x0000000C)
-
-#define DP_PHY_CFG (0x00000010)
-#define DP_PHY_PD_CTL (0x00000018)
-#define DP_PHY_MODE (0x0000001C)
-
-#define DP_PHY_AUX_CFG0 (0x00000020)
-#define DP_PHY_AUX_CFG1 (0x00000024)
-#define DP_PHY_AUX_CFG2 (0x00000028)
-#define DP_PHY_AUX_CFG3 (0x0000002C)
-#define DP_PHY_AUX_CFG4 (0x00000030)
-#define DP_PHY_AUX_CFG5 (0x00000034)
-#define DP_PHY_AUX_CFG6 (0x00000038)
-#define DP_PHY_AUX_CFG7 (0x0000003C)
-#define DP_PHY_AUX_CFG8 (0x00000040)
-#define DP_PHY_AUX_CFG9 (0x00000044)
-#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048)
-#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
-
-#define DP_PHY_SPARE0 (0x00AC)
-
-#define TXn_TX_EMP_POST1_LVL (0x000C)
-#define TXn_TX_DRV_LVL (0x001C)
-
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
-
-/* DP MMSS_CC registers */
-#define MMSS_DP_LINK_CMD_RCGR (0x0138)
-#define MMSS_DP_LINK_CFG_RCGR (0x013C)
-#define MMSS_DP_PIXEL_M (0x0174)
-#define MMSS_DP_PIXEL_N (0x0178)
-
-/* DP HDCP 1.3 registers */
-#define DP_HDCP_CTRL (0x0A0)
-#define DP_HDCP_STATUS (0x0A4)
-#define DP_HDCP_SW_UPPER_AKSV (0x298)
-#define DP_HDCP_SW_LOWER_AKSV (0x29C)
-#define DP_HDCP_ENTROPY_CTRL0 (0x750)
-#define DP_HDCP_ENTROPY_CTRL1 (0x75C)
-#define DP_HDCP_SHA_STATUS (0x0C8)
-#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
-#define DP_HDCP_RCVPORT_DATA3 (0x2A4)
-#define DP_HDCP_RCVPORT_DATA4 (0x2A8)
-#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
-#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
-
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
-#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
+#include "dp_reg.h"
#define dp_read(offset) readl_relaxed((offset))
#define dp_write(offset, data) writel_relaxed((data), (offset))
@@ -364,11 +175,37 @@
dp_write(base + DP_AUX_CTRL, aux_ctrl);
}
-static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, u32 *aux_cfg)
+static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux,
+ struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type)
{
struct dp_catalog_private *catalog;
+ u32 new_index = 0, current_index = 0;
- if (!aux || !aux_cfg) {
+ if (!aux || !cfg || (type >= PHY_AUX_CFG_MAX)) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_catalog_get_priv(aux);
+
+ current_index = cfg[type].current_index;
+ new_index = (current_index + 1) % cfg[type].cfg_cnt;
+ pr_debug("Updating %s from 0x%08x to 0x%08x\n",
+ dp_phy_aux_config_type_to_string(type),
+ cfg[type].lut[current_index], cfg[type].lut[new_index]);
+
+ dp_write(catalog->io->phy_io.base + cfg[type].offset,
+ cfg[type].lut[new_index]);
+ cfg[type].current_index = new_index;
+}
+
+static void dp_catalog_aux_setup(struct dp_catalog_aux *aux,
+ struct dp_aux_cfg *cfg)
+{
+ struct dp_catalog_private *catalog;
+ int i = 0;
+
+ if (!aux || !cfg) {
pr_err("invalid input\n");
return;
}
@@ -384,16 +221,13 @@
QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
/* DP AUX CFG register programming */
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG0, aux_cfg[0]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG1, aux_cfg[1]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG2, aux_cfg[2]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG3, aux_cfg[3]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG4, aux_cfg[4]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG5, aux_cfg[5]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG6, aux_cfg[6]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG7, aux_cfg[7]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG8, aux_cfg[8]);
- dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG9, aux_cfg[9]);
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
+ pr_debug("%s: offset=0x%08x, value=0x%08x\n",
+ dp_phy_aux_config_type_to_string(i),
+ cfg[i].offset, cfg[i].lut[cfg[i].current_index]);
+ dp_write(catalog->io->phy_io.base + cfg[i].offset,
+ cfg[i].lut[cfg[i].current_index]);
+ }
dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
}
@@ -424,6 +258,22 @@
}
/* controller related catalog functions */
+static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp_catalog_get_priv(ctrl);
+ base = catalog->io->ctrl_io.base;
+
+ return dp_read(base + DP_HDCP_STATUS);
+}
+
static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
{
struct dp_catalog_private *catalog;
@@ -471,6 +321,8 @@
dp_catalog_get_priv(ctrl);
base = catalog->io->ctrl_io.base;
+ pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+
dp_write(base + DP_CONFIGURATION_CTRL, cfg);
dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
@@ -507,10 +359,7 @@
dp_catalog_get_priv(ctrl);
base = catalog->io->ctrl_io.base;
- mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
-
if (enable) {
- mainlink_ctrl |= BIT(0);
dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
wmb(); /* make sure mainlink is turned off before reset */
dp_write(base + DP_MAINLINK_CTRL, 0x02000002);
@@ -520,8 +369,9 @@
dp_write(base + DP_MAINLINK_CTRL, 0x02000001);
wmb(); /* make sure mainlink turned on */
} else {
+ mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
mainlink_ctrl &= ~BIT(0);
- dp_write(base + DP_MAINLINK_CTRL, 0x0);
+ dp_write(base + DP_MAINLINK_CTRL, mainlink_ctrl);
}
}
@@ -543,14 +393,16 @@
misc_val |= (tb << 5);
misc_val |= BIT(0); /* Configure clock to synchronous mode */
- pr_debug("isc settings = 0x%x\n", misc_val);
+ pr_debug("misc settings = 0x%x\n", misc_val);
dp_write(base + DP_MISC1_MISC0, misc_val);
}
-static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl,
+ u32 rate)
{
u32 pixel_m, pixel_n;
u32 mvid, nvid;
+ u32 const link_rate = 540000;
struct dp_catalog_private *catalog;
void __iomem *base_cc, *base_ctrl;
@@ -570,6 +422,11 @@
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+ pr_debug("rate = %d\n", rate);
+
+ if (link_rate == rate)
+ nvid *= 2;
+
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
@@ -593,7 +450,7 @@
bit = 1;
bit <<= (pattern - 1);
- pr_debug("bit=%d train=%d\n", bit, pattern);
+ pr_debug("hw: bit=%d train=%d\n", bit, pattern);
dp_write(base + DP_STATE_CTRL, bit);
bit = 8;
@@ -792,7 +649,7 @@
base0 = catalog->io->ln_tx0_io.base;
base1 = catalog->io->ln_tx1_io.base;
- pr_debug("v=%d p=%d\n", v_level, p_level);
+ pr_debug("hw: v=%d p=%d\n", v_level, p_level);
value0 = vm_voltage_swing[v_level][p_level];
value1 = vm_pre_emphasis[v_level][p_level];
@@ -814,8 +671,11 @@
dp_write(base0 + TXn_TX_EMP_POST1_LVL, value1);
dp_write(base1 + TXn_TX_EMP_POST1_LVL, value1);
- pr_debug("host PHY settings: value0=0x%x value1=0x%x",
- value0, value1);
+ pr_debug("hw: vx_value=0x%x px_value=0x%x\n",
+ value0, value1);
+ } else {
+ pr_err("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n",
+ v_level, value0, p_level, value1);
}
}
@@ -886,6 +746,7 @@
.write_data = dp_catalog_aux_write_data,
.write_trans = dp_catalog_aux_write_trans,
.reset = dp_catalog_aux_reset,
+ .update_aux_cfg = dp_catalog_aux_update_cfg,
.enable = dp_catalog_aux_enable,
.setup = dp_catalog_aux_setup,
.get_irq = dp_catalog_aux_get_irq,
@@ -907,6 +768,7 @@
.update_vx_px = dp_catalog_ctrl_update_vx_px,
.get_interrupt = dp_catalog_ctrl_get_interrupt,
.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
+ .read_hdcp_status = dp_catalog_ctrl_read_hdcp_status,
};
struct dp_catalog_audio audio = {
.acr_ctrl = dp_catalog_audio_acr_ctrl,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index ce88569..7fde025 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -43,7 +43,10 @@
int (*write_trans)(struct dp_catalog_aux *aux);
void (*reset)(struct dp_catalog_aux *aux);
void (*enable)(struct dp_catalog_aux *aux, bool enable);
- void (*setup)(struct dp_catalog_aux *aux, u32 *aux_cfg);
+ void (*update_aux_cfg)(struct dp_catalog_aux *aux,
+ struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type);
+ void (*setup)(struct dp_catalog_aux *aux,
+ struct dp_aux_cfg *aux_cfg);
void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
};
@@ -58,7 +61,7 @@
void (*lane_mapping)(struct dp_catalog_ctrl *ctrl);
void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*config_misc)(struct dp_catalog_ctrl *ctrl, u32 cc, u32 tb);
- void (*config_msa)(struct dp_catalog_ctrl *ctrl);
+ void (*config_msa)(struct dp_catalog_ctrl *ctrl, u32 rate);
void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
void (*reset)(struct dp_catalog_ctrl *ctrl);
bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
@@ -71,6 +74,7 @@
u8 p_level);
void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
+ u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
};
struct dp_catalog_audio {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 706398db2..b78f0df 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -67,6 +67,7 @@
bool psm_enabled;
bool orientation;
+ atomic_t aborted;
u32 pixel_rate;
u32 vic;
@@ -92,6 +93,20 @@
complete(&ctrl->video_comp);
}
+static void dp_ctrl_abort(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ if (!dp_ctrl) {
+ pr_err("Invalid input data\n");
+ return;
+ }
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ atomic_set(&ctrl->aborted, 1);
+}
+
static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
{
ctrl->catalog->state_ctrl(ctrl->catalog, state);
@@ -109,8 +124,6 @@
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- drm_dp_link_power_down(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
-
reinit_completion(&ctrl->idle_comp);
dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
@@ -135,6 +148,10 @@
tbd = ctrl->link->get_test_bits_depth(ctrl->link,
ctrl->panel->pinfo.bpp);
+
+ if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN)
+ tbd = DP_TEST_BIT_DEPTH_8;
+
config |= tbd << 8;
/* Num of Lanes */
@@ -170,9 +187,6 @@
ctrl->panel->pinfo.bpp);
cc = ctrl->link->get_colorimetry_config(ctrl->link);
ctrl->catalog->config_misc(ctrl->catalog, cc, tb);
-
- ctrl->catalog->config_msa(ctrl->catalog);
-
ctrl->panel->timing_cfg(ctrl->panel);
}
@@ -226,7 +240,7 @@
{
u32 const multiplier = 1000000;
u64 pclk, lclk;
- u8 bpp, ln_cnt, link_rate;
+ u8 bpp, ln_cnt;
int run_idx = 0;
u32 lwidth, h_blank;
u32 fifo_empty = 0;
@@ -289,7 +303,6 @@
u64 brute_force_threshold = 10;
u64 diff_abs;
- link_rate = ctrl->link->link_rate;
ln_cnt = ctrl->link->lane_count;
bpp = pinfo->bpp;
@@ -309,7 +322,7 @@
even_distribution = 0;
min_hblank = 0;
- lclk = drm_dp_bw_code_to_link_rate(link_rate) * DP_KHZ_TO_HZ;
+ lclk = drm_dp_bw_code_to_link_rate(ctrl->link->bw_code) * DP_KHZ_TO_HZ;
pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
pclk, lwidth, h_blank);
@@ -734,14 +747,12 @@
max_level_reached |= BIT(5);
}
- pr_debug("max_level_reached = 0x%x\n", max_level_reached);
-
pre_emphasis_level <<= 3;
for (i = 0; i < 4; i++)
buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
- pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
+ pr_debug("sink: p|v=0x%x\n", voltage_level | pre_emphasis_level);
return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
}
@@ -749,8 +760,6 @@
{
struct dp_link *link = ctrl->link;
- pr_debug("v=%d p=%d\n", link->v_level, link->p_level);
-
ctrl->catalog->update_vx_px(ctrl->catalog,
link->v_level, link->p_level);
@@ -762,15 +771,38 @@
{
u8 buf[4];
- pr_debug("pattern=%x\n", pattern);
+ pr_debug("sink: pattern=%x\n", pattern);
buf[0] = pattern;
drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
}
+static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+ u8 *link_status)
+{
+ int ret = 0, len;
+ u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
+ u32 link_status_read_max_retries = 100;
+
+ while (--link_status_read_max_retries) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+ link_status);
+ if (len != DP_LINK_STATUS_SIZE) {
+ pr_err("DP link status read failed, err: %d\n", len);
+ ret = len;
+ break;
+ }
+
+ if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
+ break;
+ }
+
+ return ret;
+}
+
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
{
- int tries, old_v_level, ret = 0, len = 0;
+ int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 5;
@@ -780,7 +812,7 @@
ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
- DP_RECOVERED_CLOCK_OUT_EN); /* train_1 */
+ DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
dp_ctrl_update_vx_px(ctrl);
tries = 0;
@@ -788,36 +820,35 @@
while (1) {
drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
- len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
- link_status);
- if (len < DP_LINK_STATUS_SIZE) {
- pr_err("[%s]: DP link status read failed\n", __func__);
- ret = -1;
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
break;
- }
if (drm_dp_clock_recovery_ok(link_status,
ctrl->link->lane_count)) {
- ret = 0;
break;
}
if (ctrl->link->v_level == DP_LINK_VOLTAGE_MAX) {
- ret = -1;
- break; /* quit */
+ pr_err_ratelimited("max v_level reached\n");
+ ret = -EAGAIN;
+ break;
}
if (old_v_level == ctrl->link->v_level) {
tries++;
if (tries >= maximum_retries) {
- ret = -1;
- break; /* quit */
+ pr_err("max tries reached\n");
+ ret = -EAGAIN;
+ break;
}
} else {
tries = 0;
old_v_level = ctrl->link->v_level;
}
+ pr_debug("clock recovery not done, adjusting vx px\n");
+
ctrl->link->adjust_levels(ctrl->link, link_status);
dp_ctrl_update_vx_px(ctrl);
}
@@ -828,27 +859,41 @@
static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
{
int ret = 0;
+ u32 min_req_link_rate_khz;
+ u32 new_proposed_link_bw_code;
+ u32 new_proposed_link_rate_khz;
if (!ctrl)
return -EINVAL;
- switch (ctrl->link->link_rate) {
+ min_req_link_rate_khz = ctrl->panel->get_min_req_link_rate(ctrl->panel);
+
+ switch (ctrl->link->bw_code) {
case DP_LINK_RATE_810:
- ctrl->link->link_rate = DP_LINK_BW_5_4;
+ new_proposed_link_bw_code = DP_LINK_BW_5_4;
break;
case DP_LINK_BW_5_4:
- ctrl->link->link_rate = DP_LINK_BW_2_7;
+ new_proposed_link_bw_code = DP_LINK_BW_2_7;
break;
case DP_LINK_BW_2_7:
- ctrl->link->link_rate = DP_LINK_BW_1_62;
- break;
case DP_LINK_BW_1_62:
default:
- ret = -EINVAL;
+ new_proposed_link_bw_code = DP_LINK_BW_1_62;
break;
};
- pr_debug("new rate=%d\n", ctrl->link->link_rate);
+ new_proposed_link_rate_khz = drm_dp_bw_code_to_link_rate(
+ new_proposed_link_bw_code);
+
+ pr_debug("new proposed link rate=%d khz\n", new_proposed_link_rate_khz);
+ pr_debug("min required link rate=%d khz\n", min_req_link_rate_khz);
+
+ if (new_proposed_link_rate_khz >= min_req_link_rate_khz)
+ ctrl->link->bw_code = new_proposed_link_bw_code;
+ else
+ pr_debug("can't go below min required link rate\n");
+
+ pr_debug("new bw code=0x%x\n", ctrl->link->bw_code);
return ret;
}
@@ -861,11 +906,15 @@
static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
{
- int tries = 0, ret = 0, len = 0;
+ int tries = 0, ret = 0;
char pattern;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
+ dp_ctrl_state_ctrl(ctrl, 0);
+ /* Make sure to clear the current pattern before starting a new one */
+ wmb();
+
if (drm_dp_tps3_supported(ctrl->panel->dpcd))
pattern = DP_TRAINING_PATTERN_3;
else
@@ -878,21 +927,15 @@
do {
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
- len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
- link_status);
- if (len < DP_LINK_STATUS_SIZE) {
- pr_err("[%s]: DP link status read failed\n", __func__);
- ret = -1;
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
break;
- }
- if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count)) {
- ret = 0;
+ if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count))
break;
- }
if (tries > maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break;
}
tries++;
@@ -907,61 +950,45 @@
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
{
int ret = 0;
- struct drm_dp_link dp_link;
+ u8 encoding = 0x1;
+ struct drm_dp_link link_info = {0};
ctrl->link->p_level = 0;
ctrl->link->v_level = 0;
dp_ctrl_config_ctrl(ctrl);
- dp_ctrl_state_ctrl(ctrl, 0);
- dp_link.num_lanes = ctrl->link->lane_count;
- dp_link.rate = ctrl->link->link_rate;
- dp_link.capabilities = ctrl->panel->dp_link.capabilities;
- drm_dp_link_configure(ctrl->aux->drm_aux, &dp_link);
+ link_info.num_lanes = ctrl->link->lane_count;
+ link_info.rate = drm_dp_bw_code_to_link_rate(ctrl->link->bw_code);
+ link_info.capabilities = ctrl->panel->link_info.capabilities;
+
+ drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
+ drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ &encoding, 1);
ret = dp_ctrl_link_train_1(ctrl);
- if (ret < 0) {
- if (!dp_ctrl_link_rate_down_shift(ctrl)) {
- pr_debug("retry with lower rate\n");
-
- dp_ctrl_clear_training_pattern(ctrl);
- return -EAGAIN;
- }
-
- pr_err("Training 1 failed\n");
- ret = -EINVAL;
- goto clear;
+ if (ret) {
+ pr_err("link training #1 failed\n");
+ goto end;
}
- pr_debug("Training 1 completed successfully\n");
-
- dp_ctrl_state_ctrl(ctrl, 0);
-
- /* Make sure to clear the current pattern before starting a new one */
- wmb();
+ /* print success info as this is a result of user initiated action */
+ pr_info("link training #1 successful\n");
ret = dp_ctrl_link_training_2(ctrl);
- if (ret < 0) {
- if (!dp_ctrl_link_rate_down_shift(ctrl)) {
- pr_debug("retry with lower rate\n");
-
- dp_ctrl_clear_training_pattern(ctrl);
- return -EAGAIN;
- }
-
- pr_err("Training 2 failed\n");
- ret = -EINVAL;
- goto clear;
+ if (ret) {
+ pr_err("link training #2 failed\n");
+ goto end;
}
- pr_debug("Training 2 completed successfully\n");
+ /* print success info as this is a result of user initiated action */
+ pr_debug("link training #2 successful\n");
+end:
dp_ctrl_state_ctrl(ctrl, 0);
/* Make sure to clear the current pattern before starting a new one */
wmb();
-clear:
dp_ctrl_clear_training_pattern(ctrl);
return ret;
}
@@ -973,7 +1000,7 @@
ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
- drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
+ drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->link_info);
if (ctrl->link->phy_pattern_requested(ctrl->link))
goto end;
@@ -1018,6 +1045,8 @@
cfg++;
}
+ pr_debug("setting rate=%d on clk=%s\n", rate, name);
+
if (num)
cfg->rate = rate;
else
@@ -1031,7 +1060,7 @@
ctrl->power->set_pixel_clk_parent(ctrl->power);
dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
- drm_dp_bw_code_to_link_rate(ctrl->link->link_rate));
+ drm_dp_bw_code_to_link_rate(ctrl->link->bw_code));
dp_ctrl_set_clock_rate(ctrl, "ctrl_pixel_clk", ctrl->pixel_rate);
@@ -1128,6 +1157,9 @@
dp_ctrl_configure_source_params(ctrl);
+ ctrl->catalog->config_msa(ctrl->catalog,
+ drm_dp_bw_code_to_link_rate(ctrl->link->bw_code));
+
reinit_completion(&ctrl->idle_comp);
if (ctrl->psm_enabled) {
@@ -1148,16 +1180,20 @@
static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
{
int ret = 0;
+ u32 rate = ctrl->panel->link_info.rate;
+ u32 link_train_max_retries = 100;
+
+ atomic_set(&ctrl->aborted, 0);
ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
ctrl->catalog->hpd_config(ctrl->catalog, true);
- ctrl->link->link_rate = ctrl->panel->get_link_rate(ctrl->panel);
- ctrl->link->lane_count = ctrl->panel->dp_link.num_lanes;
+ ctrl->link->bw_code = drm_dp_link_rate_to_bw_code(rate);
+ ctrl->link->lane_count = ctrl->panel->link_info.num_lanes;
ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
- pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
- ctrl->link->link_rate, ctrl->link->lane_count,
+ pr_debug("bw_code=%d, lane_count=%d, pixel_rate=%d\n",
+ ctrl->link->bw_code, ctrl->link->lane_count,
ctrl->pixel_rate);
ctrl->catalog->phy_lane_cfg(ctrl->catalog,
@@ -1174,8 +1210,25 @@
if (ctrl->psm_enabled)
ret = ctrl->link->send_psm_request(ctrl->link, false);
- while (-EAGAIN == dp_ctrl_setup_main_link(ctrl, true))
- pr_debug("MAIN LINK TRAINING RETRY\n");
+ while (--link_train_max_retries && !atomic_read(&ctrl->aborted)) {
+ ctrl->catalog->config_msa(ctrl->catalog,
+ drm_dp_bw_code_to_link_rate(ctrl->link->bw_code));
+
+ ret = dp_ctrl_setup_main_link(ctrl, true);
+ if (!ret)
+ break;
+
+ /* try with lower link rate */
+ dp_ctrl_link_rate_down_shift(ctrl);
+
+ ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+ dp_ctrl_disable_mainlink_clocks(ctrl);
+ /* hw recommended delay before re-enabling clocks */
+ msleep(20);
+
+ dp_ctrl_enable_mainlink_clocks(ctrl);
+ }
pr_debug("End-\n");
@@ -1292,6 +1345,7 @@
dp_ctrl->on = dp_ctrl_on;
dp_ctrl->off = dp_ctrl_off;
dp_ctrl->push_idle = dp_ctrl_push_idle;
+ dp_ctrl->abort = dp_ctrl_abort;
dp_ctrl->isr = dp_ctrl_isr;
return dp_ctrl;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 474e0ad..2ecfa0d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -28,6 +28,7 @@
int (*on)(struct dp_ctrl *dp_ctrl, bool hpd_irq);
void (*off)(struct dp_ctrl *dp_ctrl, bool hpd_irq);
void (*push_idle)(struct dp_ctrl *dp_ctrl);
+ void (*abort)(struct dp_ctrl *dp_ctrl);
void (*isr)(struct dp_ctrl *dp_ctrl);
};
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a3c6f58..b4dafe4 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -12,7 +12,7 @@
*
*/
-#define pr_fmt(fmt) "[drm-dp]: %s: " fmt, __func__
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/component.h>
#include <linux/of_irq.h>
+#include <linux/hdcp_qseecom.h>
#include "msm_drv.h"
#include "dp_usbpd.h"
@@ -31,9 +32,25 @@
#include "dp_panel.h"
#include "dp_ctrl.h"
#include "dp_display.h"
+#include "sde_hdcp.h"
static struct dp_display *g_dp_display;
+struct dp_hdcp {
+ void *data;
+ struct sde_hdcp_ops *ops;
+
+ void *hdcp1;
+ void *hdcp2;
+
+ int enc_lvl;
+
+ bool auth_state;
+ bool hdcp1_present;
+ bool hdcp2_present;
+ bool feature_enabled;
+};
+
struct dp_display_private {
char *name;
int irq;
@@ -45,7 +62,7 @@
struct platform_device *pdev;
struct dentry *root;
- struct mutex lock;
+ struct completion notification_comp;
struct dp_usbpd *usbpd;
struct dp_parser *parser;
@@ -55,10 +72,16 @@
struct dp_link *link;
struct dp_panel *panel;
struct dp_ctrl *ctrl;
+ struct dp_hdcp hdcp;
struct dp_usbpd_cb usbpd_cb;
struct dp_display_mode mode;
struct dp_display dp_display;
+
+ struct workqueue_struct *hdcp_workqueue;
+ struct delayed_work hdcp_cb_work;
+ struct mutex hdcp_mutex;
+ int hdcp_status;
};
static const struct of_device_id dp_dt_match[] = {
@@ -66,6 +89,13 @@
{}
};
+static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
+{
+ return dp->hdcp.feature_enabled &&
+ (dp->hdcp.hdcp1_present || dp->hdcp.hdcp2_present) &&
+ dp->hdcp.ops;
+}
+
static irqreturn_t dp_display_irq(int irq, void *dev_id)
{
struct dp_display_private *dp = dev_id;
@@ -81,6 +111,12 @@
/* DP aux isr */
dp->aux->isr(dp->aux);
+ /* HDCP isr */
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->isr) {
+ if (dp->hdcp.ops->isr(dp->hdcp.data))
+ pr_err("dp_hdcp_isr failed\n");
+ }
+
return IRQ_HANDLED;
}
@@ -158,6 +194,213 @@
return 0;
}
+static void dp_display_hdcp_cb_work(struct work_struct *work)
+{
+ struct dp_display_private *dp;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sde_hdcp_ops *ops;
+ int rc = 0;
+ u32 hdcp_auth_state;
+
+ dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
+
+ rc = dp->catalog->ctrl.read_hdcp_status(&dp->catalog->ctrl);
+ if (rc >= 0) {
+ hdcp_auth_state = (rc >> 20) & 0x3;
+ pr_debug("hdcp auth state %d\n", hdcp_auth_state);
+ }
+
+ ops = dp->hdcp.ops;
+
+ switch (dp->hdcp_status) {
+ case HDCP_STATE_AUTHENTICATING:
+ pr_debug("start authenticaton\n");
+
+ if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
+ rc = dp->hdcp.ops->authenticate(dp->hdcp.data);
+
+ break;
+ case HDCP_STATE_AUTHENTICATED:
+ pr_debug("hdcp authenticated\n");
+ dp->hdcp.auth_state = true;
+ break;
+ case HDCP_STATE_AUTH_FAIL:
+ dp->hdcp.auth_state = false;
+
+ if (dp->power_on) {
+ pr_debug("Reauthenticating\n");
+ if (ops && ops->reauthenticate) {
+ rc = ops->reauthenticate(dp->hdcp.data);
+ if (rc)
+ pr_err("reauth failed rc=%d\n", rc);
+ }
+ } else {
+ pr_debug("not reauthenticating, cable disconnected\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+static void dp_display_notify_hdcp_status_cb(void *ptr,
+ enum sde_hdcp_states status)
+{
+ struct dp_display_private *dp = ptr;
+
+ if (!dp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp->hdcp_status = status;
+
+ if (dp->dp_display.is_connected)
+ queue_delayed_work(dp->hdcp_workqueue, &dp->hdcp_cb_work, HZ/4);
+}
+
+static int dp_display_create_hdcp_workqueue(struct dp_display_private *dp)
+{
+ dp->hdcp_workqueue = create_workqueue("sdm_dp_hdcp");
+ if (IS_ERR_OR_NULL(dp->hdcp_workqueue)) {
+ pr_err("Error creating hdcp_workqueue\n");
+ return -EPERM;
+ }
+
+ INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
+
+ return 0;
+}
+
+static void dp_display_destroy_hdcp_workqueue(struct dp_display_private *dp)
+{
+ if (dp->hdcp_workqueue)
+ destroy_workqueue(dp->hdcp_workqueue);
+}
+
+static void dp_display_update_hdcp_info(struct dp_display_private *dp)
+{
+ void *fd = NULL;
+ struct sde_hdcp_ops *ops = NULL;
+
+ if (!dp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!dp->hdcp.feature_enabled) {
+ pr_debug("feature not enabled\n");
+ return;
+ }
+
+ fd = dp->hdcp.hdcp2;
+ if (fd)
+ ops = sde_dp_hdcp2p2_start(fd);
+
+ if (ops && ops->feature_supported)
+ dp->hdcp.hdcp2_present = ops->feature_supported(fd);
+ else
+ dp->hdcp.hdcp2_present = false;
+
+ pr_debug("hdcp2p2: %s\n",
+ dp->hdcp.hdcp2_present ? "supported" : "not supported");
+
+ if (!dp->hdcp.hdcp2_present) {
+ dp->hdcp.hdcp1_present = hdcp1_check_if_supported_load_app();
+
+ if (dp->hdcp.hdcp1_present) {
+ fd = dp->hdcp.hdcp1;
+ ops = sde_hdcp_1x_start(fd);
+ }
+ }
+
+ pr_debug("hdcp1x: %s\n",
+ dp->hdcp.hdcp1_present ? "supported" : "not supported");
+
+ if (dp->hdcp.hdcp2_present || dp->hdcp.hdcp1_present) {
+ dp->hdcp.data = fd;
+ dp->hdcp.ops = ops;
+ } else {
+ dp->hdcp.data = NULL;
+ dp->hdcp.ops = NULL;
+ }
+}
+
+static void dp_display_deinitialize_hdcp(struct dp_display_private *dp)
+{
+ if (!dp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ sde_dp_hdcp2p2_deinit(dp->hdcp.data);
+ dp_display_destroy_hdcp_workqueue(dp);
+ if (&dp->hdcp_mutex)
+ mutex_destroy(&dp->hdcp_mutex);
+}
+
+static int dp_display_initialize_hdcp(struct dp_display_private *dp)
+{
+ struct sde_hdcp_init_data hdcp_init_data;
+ struct resource *res;
+ int rc = 0;
+
+ if (!dp) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&dp->hdcp_mutex);
+
+ rc = dp_display_create_hdcp_workqueue(dp);
+ if (rc) {
+ pr_err("Failed to create HDCP workqueue\n");
+ goto error;
+ }
+
+ res = platform_get_resource_byname(dp->pdev,
+ IORESOURCE_MEM, "dp_ctrl");
+ if (!res) {
+ pr_err("Error getting dp ctrl resource\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ hdcp_init_data.phy_addr = res->start;
+ hdcp_init_data.client_id = HDCP_CLIENT_DP;
+ hdcp_init_data.drm_aux = dp->aux->drm_aux;
+ hdcp_init_data.cb_data = (void *)dp;
+ hdcp_init_data.workq = dp->hdcp_workqueue;
+ hdcp_init_data.mutex = &dp->hdcp_mutex;
+ hdcp_init_data.sec_access = true;
+ hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
+ hdcp_init_data.core_io = &dp->parser->io.ctrl_io;
+ hdcp_init_data.qfprom_io = &dp->parser->io.qfprom_io;
+ hdcp_init_data.hdcp_io = &dp->parser->io.hdcp_io;
+ hdcp_init_data.revision = &dp->panel->link_info.revision;
+
+ dp->hdcp.hdcp1 = sde_hdcp_1x_init(&hdcp_init_data);
+ if (IS_ERR_OR_NULL(dp->hdcp.hdcp1)) {
+ pr_err("Error initializing HDCP 1.x\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("HDCP 1.3 initialized\n");
+
+ dp->hdcp.hdcp2 = sde_dp_hdcp2p2_init(&hdcp_init_data);
+ if (!IS_ERR_OR_NULL(dp->hdcp.hdcp2))
+ pr_debug("HDCP 2.2 initialized\n");
+
+ dp->hdcp.feature_enabled = true;
+
+ return 0;
+error:
+ dp_display_deinitialize_hdcp(dp);
+ return rc;
+}
+
static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
@@ -171,7 +414,7 @@
pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
dev, pdev, master);
rc = -EINVAL;
- goto error;
+ goto end;
}
drm = dev_get_drvdata(master);
@@ -180,14 +423,12 @@
pr_err("invalid param(s), drm %pK, dp %pK\n",
drm, dp);
rc = -EINVAL;
- goto error;
+ goto end;
}
dp->dp_display.drm_dev = drm;
priv = drm->dev_private;
- mutex_lock(&dp->lock);
-
rc = dp_display_debugfs_init(dp);
if (rc) {
pr_err("[%s]Debugfs init failed, rc=%d\n", dp->name, rc);
@@ -217,9 +458,13 @@
pr_err("Power client create failed\n");
goto end;
}
+
+ rc = dp_display_initialize_hdcp(dp);
+ if (rc) {
+ pr_err("HDCP initialization failed\n");
+ goto end;
+ }
end:
- mutex_unlock(&dp->lock);
-error:
return rc;
}
@@ -240,17 +485,11 @@
return;
}
- mutex_lock(&dp->lock);
-
(void)dp->power->power_client_deinit(dp->power);
-
- (void) dp->panel->sde_edid_deregister(dp->panel);
-
- (void) dp->aux->drm_aux_deregister(dp->aux);
-
+ (void)dp->panel->sde_edid_deregister(dp->panel);
+ (void)dp->aux->drm_aux_deregister(dp->aux);
(void)dp_display_debugfs_deinit(dp);
-
- mutex_unlock(&dp->lock);
+ dp_display_deinitialize_hdcp(dp);
}
static const struct component_ops dp_display_comp_ops = {
@@ -261,17 +500,25 @@
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
int rc = 0;
+ u32 max_pclk_from_edid = 0;
- rc = dp->panel->read_dpcd(dp->panel);
+ rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
if (rc)
return rc;
- sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
- (void **)&dp->panel->edid_ctrl);
+ max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
+
+ dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
+ dp->parser->max_pclk_khz);
dp->dp_display.is_connected = true;
+
drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+ reinit_completion(&dp->notification_comp);
+ if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 2))
+ pr_warn("timeout\n");
+
return rc;
}
@@ -310,8 +557,20 @@
static void dp_display_process_hpd_low(struct dp_display_private *dp)
{
+ /* cancel any pending request */
+ dp->ctrl->abort(dp->ctrl);
+
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off) {
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
+ dp->hdcp.ops->off(dp->hdcp.data);
+ }
+
dp->dp_display.is_connected = false;
drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+
+ reinit_completion(&dp->notification_comp);
+ if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 2))
+ pr_warn("timeout\n");
}
static int dp_display_usbpd_configure_cb(struct device *dev)
@@ -332,11 +591,10 @@
goto end;
}
- mutex_lock(&dp->lock);
dp_display_host_init(dp);
+
if (dp->usbpd->hpd_high)
dp_display_process_hpd_high(dp);
- mutex_unlock(&dp->lock);
end:
return rc;
}
@@ -359,11 +617,16 @@
goto end;
}
- mutex_lock(&dp->lock);
+ /* cancel any pending request */
+ dp->ctrl->abort(dp->ctrl);
dp->dp_display.is_connected = false;
drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+ reinit_completion(&dp->notification_comp);
+ if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 2))
+ pr_warn("timeout\n");
+
/*
* If a cable/dongle is connected to the TX device but
* no sink device is connected, we call host
@@ -375,8 +638,6 @@
*/
if (!dp->power_on && dp->core_initialized)
dp_display_host_deinit(dp);
-
- mutex_unlock(&dp->lock);
end:
return rc;
}
@@ -397,10 +658,14 @@
return -ENODEV;
}
- mutex_lock(&dp->lock);
-
if (dp->usbpd->hpd_irq) {
dp->hpd_irq_on = true;
+
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) {
+ if (!dp->hdcp.ops->cp_irq(dp->hdcp.data))
+ goto end;
+ }
+
rc = dp->link->process_request(dp->link);
dp->hpd_irq_on = false;
if (!rc)
@@ -417,7 +682,6 @@
dp_display_process_hpd_high(dp);
}
end:
- mutex_unlock(&dp->lock);
return rc;
}
@@ -462,7 +726,7 @@
goto err;
}
- dp->aux = dp_aux_get(dev, &dp->catalog->aux);
+ dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser->aux_cfg);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
pr_err("failed to initialize aux, rc = %d\n", rc);
@@ -537,18 +801,39 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
- mutex_lock(&dp->lock);
rc = dp->ctrl->on(dp->ctrl, dp->hpd_irq_on);
if (!rc)
dp->power_on = true;
- mutex_unlock(&dp->lock);
error:
return rc;
}
-static int dp_display_post_enable(struct dp_display *dp)
+static int dp_display_post_enable(struct dp_display *dp_display)
{
- return 0;
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dp_display) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ complete_all(&dp->notification_comp);
+
+ dp_display_update_hdcp_info(dp);
+
+ if (dp_display_is_hdcp_enabled(dp)) {
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
+
+ dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
+ queue_delayed_work(dp->hdcp_workqueue,
+ &dp->hdcp_cb_work, HZ / 2);
+ }
+end:
+ return rc;
}
static int dp_display_pre_disable(struct dp_display *dp_display)
@@ -564,9 +849,15 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
- mutex_lock(&dp->lock);
+ if (dp_display_is_hdcp_enabled(dp)) {
+ dp->hdcp_status = HDCP_STATE_INACTIVE;
+
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
+ if (dp->hdcp.ops->off)
+ dp->hdcp.ops->off(dp->hdcp.data);
+ }
+
dp->ctrl->push_idle(dp->ctrl);
- mutex_unlock(&dp->lock);
error:
return rc;
}
@@ -584,11 +875,12 @@
dp = container_of(dp_display, struct dp_display_private, dp_display);
- mutex_lock(&dp->lock);
dp->ctrl->off(dp->ctrl, dp->hpd_irq_on);
dp_display_host_deinit(dp);
+
dp->power_on = false;
- mutex_unlock(&dp->lock);
+
+ complete_all(&dp->notification_comp);
error:
return rc;
}
@@ -662,7 +954,8 @@
if (!dp)
return -ENOMEM;
- mutex_init(&dp->lock);
+ init_completion(&dp->notification_comp);
+
dp->pdev = pdev;
dp->name = "drm_dp";
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 877287a..3caa277 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -29,6 +29,7 @@
struct dp_bridge *bridge;
struct drm_connector *connector;
bool is_connected;
+ u32 max_pclk_khz;
int (*enable)(struct dp_display *dp_display);
int (*post_enable)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 78c04c4..c388048 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -296,24 +296,30 @@
return 0;
}
-int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology, u32 max_mixer_width)
+int dp_connector_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info, u32 max_mixer_width)
{
const u32 dual_lm = 2;
const u32 single_lm = 1;
const u32 single_intf = 1;
const u32 no_enc = 0;
+ struct msm_display_topology *topology;
- if (!drm_mode || !topology || !max_mixer_width) {
+ if (!drm_mode || !mode_info || !max_mixer_width) {
pr_err("invalid params\n");
return -EINVAL;
}
+ topology = &mode_info->topology;
topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
dual_lm : single_lm;
topology->num_enc = no_enc;
topology->num_intf = single_intf;
+ mode_info->frame_rate = drm_mode->vrefresh;
+ mode_info->vtotal = drm_mode->vtotal;
+ mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+
return 0;
}
@@ -331,7 +337,6 @@
info->num_of_h_tiles = 1;
info->h_tile_instance[0] = 0;
info->is_connected = display->is_connected;
- info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
MSM_DISPLAY_CAP_HOT_PLUG;
@@ -450,5 +455,17 @@
struct drm_display_mode *mode,
void *display)
{
- return MODE_OK;
+ struct dp_display *dp_disp;
+
+ if (!mode || !display) {
+ pr_err("invalid params\n");
+ return MODE_ERROR;
+ }
+
+ dp_disp = display;
+
+ if (mode->clock > dp_disp->max_pclk_khz)
+ return MODE_BAD;
+ else
+ return MODE_OK;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index bef3758..5918df1 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -74,14 +74,14 @@
void *display);
/**
- * dp_connector_get_topology - retrieve current topology for the mode selected
+ * dp_connector_get_mode_info - retrieve information of the mode selected
* @drm_mode: Display mode set for the display
- * @topology: Out parameter. Topology for the mode.
+ * @mode_info: Out parameter. Information of the mode
* @max_mixer_width: max width supported by HW layer mixer
* Returns: zero on success
*/
-int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology,
+int dp_connector_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info,
u32 max_mixer_width);
int dp_connector_get_info(struct msm_display_info *info, void *display);
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
new file mode 100644
index 0000000..061acee
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -0,0 +1,925 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[dp-hdcp2p2] %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+#include <linux/hdcp_qseecom.h>
+#include <drm/drm_dp_helper.h>
+
+#include "sde_hdcp.h"
+
+#define DP_INTR_STATUS2 (0x00000024)
+#define DP_INTR_STATUS3 (0x00000028)
+#define dp_read(offset) readl_relaxed((offset))
+#define dp_write(offset, data) writel_relaxed((data), (offset))
+
+enum dp_hdcp2p2_sink_status {
+ SINK_DISCONNECTED,
+ SINK_CONNECTED
+};
+
+enum dp_auth_status {
+ DP_HDCP_AUTH_STATUS_FAILURE,
+ DP_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct dp_hdcp2p2_ctrl {
+ atomic_t auth_state;
+ enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */
+ struct dp_hdcp2p2_interrupts *intr;
+ struct sde_hdcp_init_data init_data;
+ struct mutex mutex; /* mutex to protect access to ctrl */
+ struct mutex msg_lock; /* mutex to protect access to msg buffer */
+ struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+ struct sde_hdcp_ops *ops;
+ void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+ struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+ enum hdcp_wakeup_cmd wakeup_cmd;
+ enum dp_auth_status auth_status;
+
+ struct task_struct *thread;
+ struct kthread_worker worker;
+ struct kthread_work status;
+ struct kthread_work auth;
+ struct kthread_work send_msg;
+ struct kthread_work recv_msg;
+ struct kthread_work link;
+ char *msg_buf;
+ uint32_t send_msg_len; /* length of all parameters in msg */
+ uint32_t timeout;
+ uint32_t num_messages;
+ struct hdcp_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS];
+ u8 sink_rx_status;
+ u8 rx_status;
+ char abort_mask;
+
+ bool cp_irq_done;
+ bool polling;
+};
+
+struct dp_hdcp2p2_int_set {
+ u32 interrupt;
+ char *name;
+ void (*func)(struct dp_hdcp2p2_ctrl *ctrl);
+};
+
+struct dp_hdcp2p2_interrupts {
+ u32 reg;
+ struct dp_hdcp2p2_int_set *int_set;
+};
+
+static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_AUTHENTICATE)
+ return true;
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ return true;
+
+ return false;
+}
+
+static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl,
+ struct hdcp_wakeup_data *data)
+{
+ int i = 0;
+
+ if (!data || !data->message_data)
+ return 0;
+
+ mutex_lock(&ctrl->msg_lock);
+
+ ctrl->timeout = data->timeout;
+ ctrl->num_messages = data->message_data->num_messages;
+ ctrl->send_msg_len = 0; /* Total len of all messages */
+
+ for (i = 0; i < ctrl->num_messages ; i++)
+ ctrl->send_msg_len += data->message_data->messages[i].length;
+
+ memcpy(ctrl->msg_part, data->message_data->messages,
+ sizeof(data->message_data->messages));
+
+ ctrl->rx_status = data->message_data->rx_status;
+ ctrl->abort_mask = data->abort_mask;
+
+ if (!data->send_msg_len) {
+ mutex_unlock(&ctrl->msg_lock);
+ return 0;
+ }
+
+ kzfree(ctrl->msg_buf);
+
+ ctrl->msg_buf = kzalloc(ctrl->send_msg_len, GFP_KERNEL);
+
+ if (!ctrl->msg_buf) {
+ mutex_unlock(&ctrl->msg_lock);
+ return -ENOMEM;
+ }
+
+ /* ignore first byte as it contains message id */
+ memcpy(ctrl->msg_buf, data->send_msg_buf + 1, ctrl->send_msg_len);
+
+ mutex_unlock(&ctrl->msg_lock);
+
+ return 0;
+}
+
+static int dp_hdcp2p2_wakeup(struct hdcp_wakeup_data *data)
+{
+ struct dp_hdcp2p2_ctrl *ctrl;
+ u32 const default_timeout_us = 500;
+
+ if (!data) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ ctrl = data->context;
+ if (!ctrl) {
+ pr_err("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->wakeup_mutex);
+
+ ctrl->wakeup_cmd = data->cmd;
+
+ if (data->timeout)
+ ctrl->timeout = (data->timeout) * 2;
+ else
+ ctrl->timeout = default_timeout_us;
+
+ if (!dp_hdcp2p2_is_valid_state(ctrl)) {
+ pr_err("invalid state\n");
+ goto exit;
+ }
+
+ if (dp_hdcp2p2_copy_buf(ctrl, data))
+ goto exit;
+
+ if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_STATUS_SUCCESS)
+ ctrl->auth_status = DP_HDCP_AUTH_STATUS_SUCCESS;
+ else if (ctrl->wakeup_cmd == HDCP_WKUP_CMD_STATUS_FAILED)
+ ctrl->auth_status = DP_HDCP_AUTH_STATUS_FAILURE;
+
+ switch (ctrl->wakeup_cmd) {
+ case HDCP_WKUP_CMD_SEND_MESSAGE:
+ kthread_queue_work(&ctrl->worker, &ctrl->send_msg);
+ break;
+ case HDCP_WKUP_CMD_RECV_MESSAGE:
+ kthread_queue_work(&ctrl->worker, &ctrl->recv_msg);
+ break;
+ case HDCP_WKUP_CMD_STATUS_SUCCESS:
+ case HDCP_WKUP_CMD_STATUS_FAILED:
+ kthread_queue_work(&ctrl->worker, &ctrl->status);
+ break;
+ case HDCP_WKUP_CMD_LINK_POLL:
+ if (ctrl->cp_irq_done)
+ kthread_queue_work(&ctrl->worker, &ctrl->recv_msg);
+ else
+ ctrl->polling = true;
+ break;
+ case HDCP_WKUP_CMD_AUTHENTICATE:
+ kthread_queue_work(&ctrl->worker, &ctrl->auth);
+ break;
+ default:
+ pr_err("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+ }
+exit:
+ mutex_unlock(&ctrl->wakeup_mutex);
+
+ return 0;
+}
+
+static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl,
+ struct hdcp_lib_wakeup_data *data)
+{
+ int rc = 0;
+
+ if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+ data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+ rc = ctrl->lib->wakeup(data);
+ if (rc)
+ pr_err("error sending %s to lib\n",
+ hdcp_lib_cmd_to_str(data->cmd));
+ }
+}
+
+static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable)
+{
+ void __iomem *base = ctrl->init_data.core_io->base;
+ struct dp_hdcp2p2_interrupts *intr = ctrl->intr;
+
+ while (intr && intr->reg) {
+ struct dp_hdcp2p2_int_set *int_set = intr->int_set;
+ u32 interrupts = 0;
+
+ while (int_set && int_set->interrupt) {
+ interrupts |= int_set->interrupt;
+ int_set++;
+ }
+
+ if (enable)
+ dp_write(base + intr->reg,
+ dp_read(base + intr->reg) | interrupts);
+ else
+ dp_write(base + intr->reg,
+ dp_read(base + intr->reg) & ~interrupts);
+ intr++;
+ }
+}
+
+static void dp_hdcp2p2_off(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+ struct hdcp_wakeup_data cdata = {HDCP_WKUP_CMD_AUTHENTICATE};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return;
+ }
+
+ dp_hdcp2p2_set_interrupts(ctrl, false);
+
+ dp_hdcp2p2_reset(ctrl);
+
+ kthread_flush_worker(&ctrl->worker);
+
+ cdata.context = input;
+ dp_hdcp2p2_wakeup(&cdata);
+}
+
+static int dp_hdcp2p2_authenticate(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_wakeup_data cdata = {HDCP_WKUP_CMD_AUTHENTICATE};
+ int rc = 0;
+
+ kthread_flush_worker(&ctrl->worker);
+
+ dp_hdcp2p2_set_interrupts(ctrl, true);
+
+ ctrl->sink_status = SINK_CONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+ cdata.context = input;
+ dp_hdcp2p2_wakeup(&cdata);
+
+ return rc;
+}
+
+static int dp_hdcp2p2_reauthenticate(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input);
+
+ return dp_hdcp2p2_authenticate(input);
+}
+
+static void dp_hdcp2p2_min_level_change(void *client_ctx,
+ int min_enc_level)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+ bool enc_notify = true;
+ int enc_lvl;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ switch (min_enc_level) {
+ case 0:
+ enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+ break;
+ case 1:
+ enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+ break;
+ case 2:
+ enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+ break;
+ default:
+ enc_notify = false;
+ }
+
+ pr_debug("enc level changed %d\n", min_enc_level);
+
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ if (enc_notify && ctrl->init_data.notify_status)
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+}
+
+static void dp_hdcp2p2_auth_failed(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_hdcp2p2_set_interrupts(ctrl, false);
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ /* notify DP about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL);
+}
+
+static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, int offset, u32 timeout)
+{
+ int const max_size = 16;
+ int rc = 0, read_size = 0, bytes_read = 0;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return -EINVAL;
+ }
+
+ do {
+ read_size = min(size, max_size);
+
+ bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
+ offset, buf, read_size);
+ if (bytes_read != read_size) {
+ pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
+ offset, read_size, bytes_read);
+ break;
+ }
+
+ buf += read_size;
+ offset += read_size;
+ size -= read_size;
+ } while (size > 0);
+
+ return rc;
+}
+
+static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, uint offset, uint timeout)
+{
+ int const max_size = 16;
+ int rc = 0, write_size = 0, bytes_written = 0;
+
+ do {
+ write_size = min(size, max_size);
+
+ bytes_written = drm_dp_dpcd_write(ctrl->init_data.drm_aux,
+ offset, buf, write_size);
+ if (bytes_written != write_size) {
+ pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
+ offset, write_size, bytes_written);
+ break;
+ }
+
+ buf += write_size;
+ offset += write_size;
+ size -= write_size;
+ } while (size > 0);
+
+ return rc;
+}
+
+static bool dp_hdcp2p2_feature_supported(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_txmtr_ops *lib = NULL;
+ bool supported = false;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ goto end;
+ }
+
+ lib = ctrl->lib;
+ if (!lib) {
+ pr_err("invalid lib ops data\n");
+ goto end;
+ }
+
+ if (lib->feature_supported)
+ supported = lib->feature_supported(
+ ctrl->lib_ctx);
+end:
+ return supported;
+}
+
+static void dp_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, send_msg);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ goto exit;
+ }
+
+ mutex_lock(&ctrl->msg_lock);
+
+ rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->msg_buf,
+ ctrl->send_msg_len, ctrl->msg_part->offset,
+ ctrl->timeout);
+ if (rc) {
+ pr_err("Error sending msg to sink %d\n", rc);
+ mutex_unlock(&ctrl->msg_lock);
+ goto exit;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+ cdata.timeout = ctrl->timeout;
+ mutex_unlock(&ctrl->msg_lock);
+
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ int rc = 0;
+ char *recvd_msg_buf = NULL;
+ struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID };
+
+ cdata.context = ctrl->lib_ctx;
+
+ recvd_msg_buf = kzalloc(ctrl->send_msg_len, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rc = dp_hdcp2p2_aux_read_message(ctrl, recvd_msg_buf,
+ ctrl->send_msg_len, ctrl->msg_part->offset,
+ ctrl->timeout);
+ if (rc) {
+ pr_err("error reading message %d\n", rc);
+ goto exit;
+ }
+
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ctrl->send_msg_len;
+ cdata.timeout = ctrl->timeout;
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+
+ return rc;
+}
+
+static void dp_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+ struct hdcp_lib_wakeup_data cdata = { HDCP_LIB_WKUP_CMD_INVALID };
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, recv_msg);
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return;
+ }
+
+ if (ctrl->rx_status) {
+ if (!ctrl->cp_irq_done) {
+ pr_debug("waiting for CP_IRQ\n");
+ ctrl->polling = true;
+ return;
+ }
+
+ if (ctrl->rx_status & ctrl->sink_rx_status) {
+ ctrl->cp_irq_done = false;
+ ctrl->sink_rx_status = 0;
+ ctrl->rx_status = 0;
+ }
+ }
+
+ dp_hdcp2p2_get_msg_from_sink(ctrl);
+}
+
+static void dp_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, status);
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("hdcp is off\n");
+ return;
+ }
+
+ if (ctrl->auth_status == DP_HDCP_AUTH_STATUS_SUCCESS) {
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTHENTICATED);
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+ } else {
+ dp_hdcp2p2_auth_failed(ctrl);
+ }
+}
+
+static void dp_hdcp2p2_link_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, link);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
+ atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("invalid hdcp state\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (ctrl->sink_rx_status & ctrl->abort_mask) {
+ if (ctrl->sink_rx_status & BIT(3))
+ pr_err("reauth_req set by sink\n");
+
+ if (ctrl->sink_rx_status & BIT(4))
+ pr_err("link failure reported by sink\n");
+
+ ctrl->sink_rx_status = 0;
+ ctrl->rx_status = 0;
+
+ rc = -ENOLINK;
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_LINK_FAILED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+ goto exit;
+ }
+
+ if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
+ ctrl->sink_rx_status = 0;
+ ctrl->rx_status = 0;
+
+ dp_hdcp2p2_get_msg_from_sink(ctrl);
+
+ ctrl->polling = false;
+ } else {
+ ctrl->cp_irq_done = true;
+ }
+exit:
+ if (rc)
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void dp_hdcp2p2_auth_work(struct kthread_work *work)
+{
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ struct dp_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct dp_hdcp2p2_ctrl, auth);
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
+ u8 *rx_status)
+{
+ u32 const cp_irq_dpcd_offset = 0x201;
+ u32 const rxstatus_dpcd_offset = 0x69493;
+ ssize_t const bytes_to_read = 1;
+ ssize_t bytes_read = 0;
+ u8 buf = 0;
+ int rc = 0;
+ bool cp_irq = 0;
+
+ *rx_status = 0;
+
+ bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
+ cp_irq_dpcd_offset, &buf, bytes_to_read);
+ if (bytes_read != bytes_to_read) {
+ pr_err("cp irq read failed\n");
+ rc = bytes_read;
+ goto error;
+ }
+
+ cp_irq = buf & BIT(2);
+ pr_debug("cp_irq=0x%x\n", cp_irq);
+ buf = 0;
+
+ if (cp_irq) {
+ bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
+ rxstatus_dpcd_offset, &buf, bytes_to_read);
+ if (bytes_read != bytes_to_read) {
+ pr_err("rxstatus read failed\n");
+ rc = bytes_read;
+ goto error;
+ }
+ *rx_status = buf;
+ pr_debug("rx_status=0x%x\n", *rx_status);
+ }
+
+error:
+ return rc;
+}
+
+static int dp_hdcp2p2_cp_irq(void *input)
+{
+ int rc = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
+ atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ pr_err("invalid hdcp state\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ ctrl->sink_rx_status = 0;
+ rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
+ if (rc) {
+ pr_err("failed to read rx status\n");
+ goto error;
+ }
+
+ pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
+
+ if (!ctrl->sink_rx_status) {
+ pr_debug("not a hdcp 2.2 irq\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ kthread_queue_work(&ctrl->worker, &ctrl->link);
+
+ return 0;
+error:
+ return rc;
+}
+
+static int dp_hdcp2p2_isr(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+ int rc = 0;
+ struct dss_io_data *io;
+ struct dp_hdcp2p2_interrupts *intr;
+ u32 hdcp_int_val = 0;
+
+ if (!ctrl || !ctrl->init_data.core_io) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ io = ctrl->init_data.core_io;
+ intr = ctrl->intr;
+
+ while (intr && intr->reg) {
+ struct dp_hdcp2p2_int_set *int_set = intr->int_set;
+
+ hdcp_int_val = dp_read(io->base + intr->reg);
+
+ while (int_set && int_set->interrupt) {
+ if (hdcp_int_val & (int_set->interrupt >> 2)) {
+ pr_debug("%s\n", int_set->name);
+
+ if (int_set->func)
+ int_set->func(ctrl);
+
+ dp_write(io->base + intr->reg, hdcp_int_val |
+ (int_set->interrupt >> 1));
+ }
+ int_set++;
+ }
+ intr++;
+ }
+end:
+ return rc;
+}
+
+void sde_dp_hdcp2p2_deinit(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ kthread_stop(ctrl->thread);
+
+ mutex_destroy(&ctrl->mutex);
+ mutex_destroy(&ctrl->msg_lock);
+ mutex_destroy(&ctrl->wakeup_mutex);
+ kzfree(ctrl->msg_buf);
+ kfree(ctrl);
+}
+
+void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
+{
+ int rc;
+ struct dp_hdcp2p2_ctrl *ctrl;
+ static struct hdcp_txmtr_ops txmtr_ops;
+ struct hdcp_register_data register_data;
+ static struct sde_hdcp_ops ops = {
+ .isr = dp_hdcp2p2_isr,
+ .reauthenticate = dp_hdcp2p2_reauthenticate,
+ .authenticate = dp_hdcp2p2_authenticate,
+ .feature_supported = dp_hdcp2p2_feature_supported,
+ .off = dp_hdcp2p2_off,
+ .cp_irq = dp_hdcp2p2_cp_irq,
+ };
+
+ static struct hdcp_client_ops client_ops = {
+ .wakeup = dp_hdcp2p2_wakeup,
+ .notify_lvl_change = dp_hdcp2p2_min_level_change,
+ };
+ static struct dp_hdcp2p2_int_set int_set1[] = {
+ {BIT(17), "authentication successful", NULL},
+ {BIT(20), "authentication failed", NULL},
+ {BIT(24), "encryption enabled", NULL},
+ {BIT(27), "encryption disabled", NULL},
+ {0},
+ };
+ static struct dp_hdcp2p2_int_set int_set2[] = {
+ {BIT(2), "key fifo underflow", NULL},
+ {0},
+ };
+ static struct dp_hdcp2p2_interrupts intr[] = {
+ {DP_INTR_STATUS2, int_set1},
+ {DP_INTR_STATUS3, int_set2},
+ {0}
+ };
+
+ if (!init_data || !init_data->cb_data ||
+ !init_data->notify_status || !init_data->drm_aux) {
+ pr_err("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl->init_data = *init_data;
+ ctrl->lib = &txmtr_ops;
+ ctrl->msg_buf = NULL;
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+ ctrl->intr = intr;
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+ ctrl->ops = &ops;
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->msg_lock);
+ mutex_init(&ctrl->wakeup_mutex);
+
+ register_data.hdcp_ctx = &ctrl->lib_ctx;
+ register_data.client_ops = &client_ops;
+ register_data.txmtr_ops = &txmtr_ops;
+ register_data.device_type = HDCP_TXMTR_DP;
+ register_data.client_ctx = ctrl;
+
+ rc = hdcp_library_register(®ister_data);
+ if (rc) {
+ pr_err("Unable to register with HDCP 2.2 library\n");
+ goto error;
+ }
+
+ kthread_init_worker(&ctrl->worker);
+
+ kthread_init_work(&ctrl->auth, dp_hdcp2p2_auth_work);
+ kthread_init_work(&ctrl->send_msg, dp_hdcp2p2_send_msg_work);
+ kthread_init_work(&ctrl->recv_msg, dp_hdcp2p2_recv_msg_work);
+ kthread_init_work(&ctrl->status, dp_hdcp2p2_auth_status_work);
+ kthread_init_work(&ctrl->link, dp_hdcp2p2_link_work);
+
+ ctrl->thread = kthread_run(kthread_worker_fn,
+ &ctrl->worker, "dp_hdcp2p2");
+
+ if (IS_ERR(ctrl->thread)) {
+ pr_err("unable to start DP hdcp2p2 thread\n");
+ rc = PTR_ERR(ctrl->thread);
+ ctrl->thread = NULL;
+ goto error;
+ }
+
+ return ctrl;
+error:
+ kfree(ctrl);
+ return ERR_PTR(rc);
+}
+
+static bool dp_hdcp2p2_supported(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ u32 const rxcaps_dpcd_offset = 0x6921d;
+ ssize_t const bytes_to_read = 1;
+ ssize_t bytes_read = 0;
+ u8 buf = 0;
+
+ bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux,
+ rxcaps_dpcd_offset, &buf, bytes_to_read);
+ if (bytes_read != bytes_to_read) {
+ pr_err("RxCaps read failed\n");
+ goto error;
+ }
+
+ pr_debug("rxcaps 0x%x\n", buf);
+
+ if (buf & BIT(1))
+ return true;
+error:
+ return false;
+}
+
+struct sde_hdcp_ops *sde_dp_hdcp2p2_start(void *input)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+
+ pr_debug("Checking sink capability\n");
+ if (dp_hdcp2p2_supported(ctrl))
+ return ctrl->ops;
+ else
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 741acfca..7e3d81f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -47,13 +47,6 @@
DP_TEST_VIDEO_PATTERN_COLOR_SQUARE = 0x03,
};
-enum test_bit_depth {
- DP_TEST_BIT_DEPTH_6 = 0x00,
- DP_TEST_BIT_DEPTH_8 = 0x01,
- DP_TEST_BIT_DEPTH_10 = 0x02,
- DP_TEST_BIT_DEPTH_UNKNOWN = 0xFFFFFFFF,
-};
-
enum dp_link_response {
TEST_ACK = 0x1,
TEST_NACK = 0x2,
@@ -218,7 +211,7 @@
static int dp_link_get_period(struct dp_link_private *link, int const addr)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
u32 const param_len = 0x1;
u32 const max_audio_period = 0xA;
@@ -231,7 +224,7 @@
goto exit;
}
- data = *bp;
+ data = bp;
/* Period - Bits 3:0 */
data = data & 0xF;
@@ -329,7 +322,7 @@
static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -344,7 +337,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Audio Pattern Type - Bits 7:0 */
if ((int)data > max_audio_pattern_type) {
@@ -363,7 +356,7 @@
static int dp_link_parse_audio_mode(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -381,7 +374,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Sampling Rate - Bits 3:0 */
sampling_rate = data & 0xF;
@@ -535,14 +528,14 @@
static int dp_link_parse_timing_params1(struct dp_link_private *link,
int const addr, int const len, u32 *val)
{
- u8 *bp;
+ u8 bp[2];
int rlen;
if (len < 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -556,14 +549,14 @@
static int dp_link_parse_timing_params2(struct dp_link_private *link,
int const addr, int const len, u32 *val1, u32 *val2)
{
- u8 *bp;
+ u8 bp[2];
int rlen;
if (len < 2)
return -EINVAL;
/* Read the requested video link pattern (Byte 0x221). */
- rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+ rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len);
if (rlen < len) {
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
@@ -578,7 +571,7 @@
static int dp_link_parse_timing_params3(struct dp_link_private *link,
int const addr, u32 *val)
{
- u8 *bp;
+ u8 bp;
u32 len = 1;
int rlen;
@@ -588,7 +581,7 @@
pr_err("failed to read 0x%x\n", addr);
return -EINVAL;
}
- *val = bp[0];
+ *val = bp;
return 0;
}
@@ -604,7 +597,7 @@
{
int ret = 0;
int rlen;
- u8 *bp;
+ u8 bp;
u8 data;
u32 dyn_range;
int const param_len = 0x1;
@@ -619,7 +612,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_video_pattern_valid(data)) {
pr_err("invalid link video pattern = 0x%x\n", data);
@@ -641,7 +634,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
/* Dynamic Range */
dyn_range = (data & BIT(3)) >> 3;
@@ -765,12 +758,12 @@
*
* Returns true if the requested link rate is supported.
*/
-static bool dp_link_is_link_rate_valid(u32 link_rate)
+static bool dp_link_is_link_rate_valid(u32 bw_code)
{
- return ((link_rate == DP_LINK_BW_1_62) ||
- (link_rate == DP_LINK_BW_2_7) ||
- (link_rate == DP_LINK_BW_5_4) ||
- (link_rate == DP_LINK_RATE_810));
+ return ((bw_code == DP_LINK_BW_1_62) ||
+ (bw_code == DP_LINK_BW_2_7) ||
+ (bw_code == DP_LINK_BW_5_4) ||
+ (bw_code == DP_LINK_RATE_810));
}
/**
@@ -796,7 +789,7 @@
*/
static int dp_link_parse_link_training_params(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int ret = 0;
int rlen;
@@ -810,7 +803,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_link_rate_valid(data)) {
pr_err("invalid link rate = 0x%x\n", data);
@@ -829,7 +822,7 @@
ret = -EINVAL;
goto exit;
}
- data = *bp;
+ data = bp;
data &= 0x1F;
if (!dp_link_is_lane_count_valid(data)) {
@@ -868,7 +861,7 @@
*/
static int dp_link_parse_phy_test_params(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -883,7 +876,7 @@
goto end;
}
- data = *bp;
+ data = bp;
link->request.phy_test_pattern_sel = data;
@@ -946,7 +939,7 @@
static int dp_link_parse_request(struct dp_link_private *link)
{
int ret = 0;
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
u32 const param_len = 0x1;
@@ -964,12 +957,12 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("device service irq vector = 0x%x\n", data);
if (!(data & BIT(1))) {
- pr_debug("no link requested\n");
+ pr_debug("no test requested\n");
goto end;
}
@@ -985,7 +978,7 @@
goto end;
}
- data = *bp;
+ data = bp;
if (!dp_link_is_test_supported(data)) {
pr_debug("link 0x%x not supported\n", data);
@@ -1039,7 +1032,7 @@
*/
static void dp_link_parse_sink_count(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int rlen;
int const param_len = 0x1;
@@ -1051,7 +1044,7 @@
return;
}
- data = *bp;
+ data = bp;
/* BIT 7, BIT 5:0 */
link->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
@@ -1101,7 +1094,7 @@
link->request.test_lane_count);
link->dp_link.lane_count = link->request.test_lane_count;
- link->dp_link.link_rate = link->request.test_link_rate;
+ link->dp_link.bw_code = link->request.test_link_rate;
return 0;
}
@@ -1116,7 +1109,7 @@
static int dp_link_parse_vx_px(struct dp_link_private *link)
{
- u8 *bp;
+ u8 bp;
u8 data;
int const param_len = 0x1;
int const addr1 = 0x206;
@@ -1134,7 +1127,7 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
@@ -1155,7 +1148,7 @@
goto end;
}
- data = *bp;
+ data = bp;
pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
@@ -1216,7 +1209,7 @@
pr_debug("start\n");
link->dp_link.lane_count = link->request.test_lane_count;
- link->dp_link.link_rate = link->request.test_link_rate;
+ link->dp_link.bw_code = link->request.test_link_rate;
dp_link_parse_vx_px(link);
@@ -1517,7 +1510,7 @@
dp_link->p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1;
}
- pr_debug("v_level=%d, p_level=%d\n",
+ pr_debug("adjusted: v_level=%d, p_level=%d\n",
dp_link->v_level, dp_link->p_level);
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 26249d6..8ea43da 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -46,11 +46,18 @@
DS_PORT_STATUS_CHANGED = 0x200,
};
+enum test_bit_depth {
+ DP_TEST_BIT_DEPTH_6 = 0x00,
+ DP_TEST_BIT_DEPTH_8 = 0x01,
+ DP_TEST_BIT_DEPTH_10 = 0x02,
+ DP_TEST_BIT_DEPTH_UNKNOWN = 0xFFFFFFFF,
+};
+
struct dp_link {
u32 test_requested;
u32 lane_count;
- u32 link_rate;
+ u32 bw_code;
u32 v_level;
u32 p_level;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index fed1dbb..2e21033 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -16,6 +16,8 @@
#include "dp_panel.h"
+#define DP_PANEL_DEFAULT_BPP 24
+
enum {
DP_LINK_RATE_MULTIPLIER = 27000000,
};
@@ -25,14 +27,17 @@
struct dp_panel dp_panel;
struct dp_aux *aux;
struct dp_catalog_panel *catalog;
+ bool lane_switch_supported;
+ bool aux_cfg_update_done;
};
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rlen, rc = 0;
struct dp_panel_private *panel;
- struct drm_dp_link *dp_link;
+ struct drm_dp_link *link_info;
u8 major = 0, minor = 0;
+ unsigned long caps = DP_LINK_CAP_ENHANCED_FRAMING;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -41,7 +46,7 @@
}
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- dp_link = &dp_panel->dp_link;
+ link_info = &dp_panel->link_info;
rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
@@ -51,27 +56,124 @@
goto end;
}
- dp_link->revision = dp_panel->dpcd[DP_DPCD_REV];
+ link_info->revision = dp_panel->dpcd[DP_DPCD_REV];
- major = (dp_link->revision >> 4) & 0x0f;
- minor = dp_link->revision & 0x0f;
+ major = (link_info->revision >> 4) & 0x0f;
+ minor = link_info->revision & 0x0f;
pr_debug("version: %d.%d\n", major, minor);
- dp_link->rate =
+ link_info->rate =
drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
- pr_debug("link_rate=%d\n", dp_link->rate);
+ pr_debug("link_rate=%d\n", link_info->rate);
- dp_link->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
+ if (panel->lane_switch_supported)
+ link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
DP_MAX_LANE_COUNT_MASK;
- pr_debug("lane_count=%d\n", dp_link->num_lanes);
+ else
+ link_info->num_lanes = 2;
+
+ pr_debug("lane_count=%d\n", link_info->num_lanes);
if (dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
- dp_link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+ link_info->capabilities |= caps;
end:
return rc;
}
+
+static int dp_panel_read_edid(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ int retry_cnt = 0;
+ const int max_retry = 10;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ do {
+ sde_get_edid(connector, &panel->aux->drm_aux->ddc,
+ (void **)&dp_panel->edid_ctrl);
+ if (!dp_panel->edid_ctrl->edid) {
+ pr_err("EDID read failed\n");
+ retry_cnt++;
+ panel->aux->reconfig(panel->aux);
+ panel->aux_cfg_update_done = true;
+ } else {
+ return 0;
+ }
+ } while (retry_cnt < max_retry);
+
+ return -EINVAL;
+}
+
+static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ int rc = 0;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel || !connector) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ pr_err("panel dpcd read failed\n");
+ return rc;
+ }
+
+ rc = dp_panel_read_edid(dp_panel, connector);
+ if (rc) {
+ pr_err("panel edid read failed\n");
+ return rc;
+ }
+
+ if (panel->aux_cfg_update_done) {
+ pr_debug("read DPCD with updated AUX config\n");
+ dp_panel_read_dpcd(dp_panel);
+ panel->aux_cfg_update_done = false;
+ }
+
+ return 0;
+}
+
+static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel)
+{
+ struct drm_dp_link *link_info;
+ const u8 num_components = 3;
+ u32 bpc = 0, bpp = 0, max_data_rate_khz = 0, max_pclk_rate_khz = 0;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return 0;
+ }
+
+ link_info = &dp_panel->link_info;
+
+ bpc = sde_get_sink_bpc(dp_panel->edid_ctrl);
+ bpp = bpc * num_components;
+ if (!bpp)
+ bpp = DP_PANEL_DEFAULT_BPP;
+
+ max_data_rate_khz = (link_info->num_lanes * link_info->rate * 8);
+ max_pclk_rate_khz = max_data_rate_khz / bpp;
+
+ pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, link_info->num_lanes);
+ pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n",
+ max_data_rate_khz, max_pclk_rate_khz);
+
+ return max_pclk_rate_khz;
+}
+
static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -168,7 +270,7 @@
static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
int rc = 0;
- struct dp_panel_private *panel;
+ struct dp_panel_info *pinfo;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -176,18 +278,38 @@
goto end;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ pinfo = &dp_panel->pinfo;
+
+ /*
+ * print resolution info as this is a result
+ * of user initiated action of cable connection
+ */
+ pr_info("SET NEW RESOLUTION:\n");
+ pr_info("%dx%d@%dfps\n", pinfo->h_active,
+ pinfo->v_active, pinfo->refresh_rate);
+ pr_info("h_porches(back|front|width) = (%d|%d|%d)\n",
+ pinfo->h_back_porch,
+ pinfo->h_front_porch,
+ pinfo->h_sync_width);
+ pr_info("v_porches(back|front|width) = (%d|%d|%d)\n",
+ pinfo->v_back_porch,
+ pinfo->v_front_porch,
+ pinfo->v_sync_width);
+ pr_info("pixel clock (KHz)=(%d)\n", pinfo->pixel_clk_khz);
+ pr_info("bpp = %d\n", pinfo->bpp);
+ pr_info("active low (h|v)=(%d|%d)\n", pinfo->h_active_low,
+ pinfo->v_active_low);
+
+ pinfo->bpp = max_t(u32, 18, min_t(u32, pinfo->bpp, 30));
+ pr_info("updated bpp = %d\n", pinfo->bpp);
end:
return rc;
}
-static u32 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
{
const u32 encoding_factx10 = 8;
- const u32 ln_to_link_ratio = 10;
- u32 min_link_rate, reminder = 0;
- u32 calc_link_rate = 0, lane_cnt, max_rate = 0;
- struct dp_panel_private *panel;
+ u32 min_link_rate_khz = 0, lane_cnt;
struct dp_panel_info *pinfo;
if (!dp_panel) {
@@ -195,54 +317,19 @@
goto end;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
- lane_cnt = dp_panel->dp_link.num_lanes;
- max_rate = drm_dp_link_rate_to_bw_code(dp_panel->dp_link.rate);
+ lane_cnt = dp_panel->link_info.num_lanes;
pinfo = &dp_panel->pinfo;
- /*
- * The max pixel clock supported is 675Mhz. The
- * current calculations below will make sure
- * the min_link_rate is within 32 bit limits.
- * Any changes in the section of code should
- * consider this limitation.
- */
- min_link_rate = (u32)div_u64(pinfo->pixel_clk_khz * 1000,
- (lane_cnt * encoding_factx10));
- min_link_rate /= ln_to_link_ratio;
- min_link_rate = (min_link_rate * pinfo->bpp);
- min_link_rate = (u32)div_u64_rem(min_link_rate * 10,
- DP_LINK_RATE_MULTIPLIER, &reminder);
+ /* num_lanes * lane_count * 8 >= pclk * bpp * 10 */
+ min_link_rate_khz = pinfo->pixel_clk_khz /
+ (lane_cnt * encoding_factx10);
+ min_link_rate_khz *= pinfo->bpp;
- /*
- * To avoid any fractional values,
- * increment the min_link_rate
- */
- if (reminder)
- min_link_rate += 1;
- pr_debug("min_link_rate = %d\n", min_link_rate);
-
- if (min_link_rate <= DP_LINK_BW_1_62)
- calc_link_rate = DP_LINK_BW_1_62;
- else if (min_link_rate <= DP_LINK_BW_2_7)
- calc_link_rate = DP_LINK_BW_2_7;
- else if (min_link_rate <= DP_LINK_BW_5_4)
- calc_link_rate = DP_LINK_BW_5_4;
- else if (min_link_rate <= DP_LINK_RATE_810)
- calc_link_rate = DP_LINK_RATE_810;
- else {
- /* Cap the link rate to the max supported rate */
- pr_debug("link_rate = %d is unsupported\n", min_link_rate);
- calc_link_rate = DP_LINK_RATE_810;
- }
-
- if (calc_link_rate > max_rate)
- calc_link_rate = max_rate;
-
- pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
+ pr_debug("min lclk req=%d khz for pclk=%d khz, lanes=%d, bpp=%d\n",
+ min_link_rate_khz, pinfo->pixel_clk_khz, lane_cnt,
+ pinfo->bpp);
end:
- return calc_link_rate;
+ return min_link_rate_khz;
}
struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
@@ -269,13 +356,15 @@
panel->catalog = catalog;
dp_panel = &panel->dp_panel;
+ panel->aux_cfg_update_done = false;
dp_panel->sde_edid_register = dp_panel_edid_register;
dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
dp_panel->init_info = dp_panel_init_panel_info;
dp_panel->timing_cfg = dp_panel_timing_cfg;
- dp_panel->read_dpcd = dp_panel_read_dpcd;
- dp_panel->get_link_rate = dp_panel_get_link_rate;
+ dp_panel->read_sink_caps = dp_panel_read_sink_caps;
+ dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
+ dp_panel->get_max_pclk = dp_panel_get_max_pclk;
return dp_panel;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5852c70..ab9a451 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -40,19 +40,23 @@
struct dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
- struct drm_dp_link dp_link;
+ struct drm_dp_link link_info;
struct sde_edid_ctrl *edid_ctrl;
+ struct drm_connector *connector;
struct dp_panel_info pinfo;
u32 vic;
+ u32 max_pclk_khz;
int (*sde_edid_register)(struct dp_panel *dp_panel);
void (*sde_edid_deregister)(struct dp_panel *dp_panel);
int (*init_info)(struct dp_panel *dp_panel);
int (*timing_cfg)(struct dp_panel *dp_panel);
- int (*read_dpcd)(struct dp_panel *dp_panel);
- u32 (*get_link_rate)(struct dp_panel *dp_panel);
+ int (*read_sink_caps)(struct dp_panel *dp_panel,
+ struct drm_connector *connector);
+ u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
+ u32 (*get_max_pclk)(struct dp_panel *dp_panel);
};
struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index e81bbb3..c85c2a2 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -101,23 +101,85 @@
return rc;
}
+static const char *dp_get_phy_aux_config_property(u32 cfg_type)
+{
+ switch (cfg_type) {
+ case PHY_AUX_CFG0:
+ return "qcom,aux-cfg0-settings";
+ case PHY_AUX_CFG1:
+ return "qcom,aux-cfg1-settings";
+ case PHY_AUX_CFG2:
+ return "qcom,aux-cfg2-settings";
+ case PHY_AUX_CFG3:
+ return "qcom,aux-cfg3-settings";
+ case PHY_AUX_CFG4:
+ return "qcom,aux-cfg4-settings";
+ case PHY_AUX_CFG5:
+ return "qcom,aux-cfg5-settings";
+ case PHY_AUX_CFG6:
+ return "qcom,aux-cfg6-settings";
+ case PHY_AUX_CFG7:
+ return "qcom,aux-cfg7-settings";
+ case PHY_AUX_CFG8:
+ return "qcom,aux-cfg8-settings";
+ case PHY_AUX_CFG9:
+ return "qcom,aux-cfg9-settings";
+ default:
+ return "unknown";
+ }
+}
+
+static void dp_parser_phy_aux_cfg_reset(struct dp_parser *parser)
+{
+ int i = 0;
+
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++)
+ parser->aux_cfg[i] = (const struct dp_aux_cfg){ 0 };
+}
+
static int dp_parser_aux(struct dp_parser *parser)
{
- int len = 0, i = 0, rc = 0;
struct device_node *of_node = parser->pdev->dev.of_node;
+ int len = 0, i = 0, j = 0, config_count = 0;
const char *data;
+ int const minimum_config_count = 1;
- data = of_get_property(of_node, "qcom,aux-cfg-settings", &len);
- if (!data || (len != AUX_CFG_LEN)) {
- pr_err("Unable to read DP AUX CFG settings\n");
- rc = -EINVAL;
- goto end;
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
+ const char *property = dp_get_phy_aux_config_property(i);
+
+ data = of_get_property(of_node, property, &len);
+ if (!data) {
+ pr_err("Unable to read %s\n", property);
+ goto error;
+ }
+
+ config_count = len - 1;
+ if ((config_count < minimum_config_count) ||
+ (config_count > DP_AUX_CFG_MAX_VALUE_CNT)) {
+ pr_err("Invalid config count (%d) configs for %s\n",
+ config_count, property);
+ goto error;
+ }
+
+ parser->aux_cfg[i].offset = data[0];
+ parser->aux_cfg[i].cfg_cnt = config_count;
+ pr_debug("%s offset=0x%x, cfg_cnt=%d\n",
+ property,
+ parser->aux_cfg[i].offset,
+ parser->aux_cfg[i].cfg_cnt);
+ for (j = 1; j < len; j++) {
+ parser->aux_cfg[i].lut[j - 1] = data[j];
+ pr_debug("%s lut[%d]=0x%x\n",
+ property,
+ i,
+ parser->aux_cfg[i].lut[j - 1]);
+ }
}
+ return 0;
- for (i = 0; i < len; i++)
- parser->aux_cfg[i] = data[i];
-end:
- return rc;
+error:
+ dp_parser_phy_aux_cfg_reset(parser);
+ return -EINVAL;
}
static int dp_parser_misc(struct dp_parser *parser)
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index fdcdd3a..7794da5 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -93,6 +93,66 @@
struct pinctrl_state *state_suspend;
};
+#define DP_ENUM_STR(x) #x
+#define DP_AUX_CFG_MAX_VALUE_CNT 3
+/**
+ * struct dp_aux_cfg - DP's AUX configuration settings
+ *
+ * @cfg_cnt: count of the configurable settings for the AUX register
+ * @current_index: current index of the AUX config lut
+ * @offset: register offset of the AUX config register
+ * @lut: look up table for the AUX config values for this register
+ */
+struct dp_aux_cfg {
+ u32 cfg_cnt;
+ u32 current_index;
+ u32 offset;
+ u32 lut[DP_AUX_CFG_MAX_VALUE_CNT];
+};
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+ PHY_AUX_CFG0,
+ PHY_AUX_CFG1,
+ PHY_AUX_CFG2,
+ PHY_AUX_CFG3,
+ PHY_AUX_CFG4,
+ PHY_AUX_CFG5,
+ PHY_AUX_CFG6,
+ PHY_AUX_CFG7,
+ PHY_AUX_CFG8,
+ PHY_AUX_CFG9,
+ PHY_AUX_CFG_MAX,
+};
+
+static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type)
+{
+ switch (cfg_type) {
+ case PHY_AUX_CFG0:
+ return DP_ENUM_STR(PHY_AUX_CFG0);
+ case PHY_AUX_CFG1:
+ return DP_ENUM_STR(PHY_AUX_CFG1);
+ case PHY_AUX_CFG2:
+ return DP_ENUM_STR(PHY_AUX_CFG2);
+ case PHY_AUX_CFG3:
+ return DP_ENUM_STR(PHY_AUX_CFG3);
+ case PHY_AUX_CFG4:
+ return DP_ENUM_STR(PHY_AUX_CFG4);
+ case PHY_AUX_CFG5:
+ return DP_ENUM_STR(PHY_AUX_CFG5);
+ case PHY_AUX_CFG6:
+ return DP_ENUM_STR(PHY_AUX_CFG6);
+ case PHY_AUX_CFG7:
+ return DP_ENUM_STR(PHY_AUX_CFG7);
+ case PHY_AUX_CFG8:
+ return DP_ENUM_STR(PHY_AUX_CFG8);
+ case PHY_AUX_CFG9:
+ return DP_ENUM_STR(PHY_AUX_CFG9);
+ default:
+ return "unknown";
+ }
+}
+
/**
* struct dp_parser - DP parser's data exposed to clients
*
@@ -111,7 +171,7 @@
struct dp_display_data disp_data;
u8 l_map[4];
- u32 aux_cfg[AUX_CFG_LEN];
+ struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
u32 max_pclk_khz;
int (*parse)(struct dp_parser *parser);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
new file mode 100644
index 0000000..30377a0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_REG_H_
+#define _DP_REG_H_
+
+/* DP_TX Registers */
+#define DP_HW_VERSION (0x00000000)
+#define DP_SW_RESET (0x00000010)
+#define DP_PHY_CTRL (0x00000014)
+#define DP_CLK_CTRL (0x00000018)
+#define DP_CLK_ACTIVE (0x0000001C)
+#define DP_INTR_STATUS (0x00000020)
+#define DP_INTR_STATUS2 (0x00000024)
+#define DP_INTR_STATUS3 (0x00000028)
+
+#define DP_DP_HPD_CTRL (0x00000200)
+#define DP_DP_HPD_INT_STATUS (0x00000204)
+#define DP_DP_HPD_INT_ACK (0x00000208)
+#define DP_DP_HPD_INT_MASK (0x0000020C)
+#define DP_DP_HPD_REFTIMER (0x00000218)
+#define DP_DP_HPD_EVENT_TIME_0 (0x0000021C)
+#define DP_DP_HPD_EVENT_TIME_1 (0x00000220)
+#define DP_AUX_CTRL (0x00000230)
+#define DP_AUX_DATA (0x00000234)
+#define DP_AUX_TRANS_CTRL (0x00000238)
+#define DP_TIMEOUT_COUNT (0x0000023C)
+#define DP_AUX_LIMITS (0x00000240)
+#define DP_AUX_STATUS (0x00000244)
+
+#define DP_DPCD_CP_IRQ (0x201)
+#define DP_DPCD_RXSTATUS (0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM (0x000002A0)
+
+#define DP_MAINLINK_CTRL (0x00000400)
+#define DP_STATE_CTRL (0x00000404)
+#define DP_CONFIGURATION_CTRL (0x00000408)
+#define DP_SOFTWARE_MVID (0x00000410)
+#define DP_SOFTWARE_NVID (0x00000418)
+#define DP_TOTAL_HOR_VER (0x0000041C)
+#define DP_START_HOR_VER_FROM_SYNC (0x00000420)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424)
+#define DP_ACTIVE_HOR_VER (0x00000428)
+#define DP_MISC1_MISC0 (0x0000042C)
+#define DP_VALID_BOUNDARY (0x00000430)
+#define DP_VALID_BOUNDARY_2 (0x00000434)
+#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING (0x00000438)
+
+#define DP_MAINLINK_READY (0x00000440)
+#define DP_MAINLINK_LEVELS (0x00000444)
+#define DP_TU (0x0000044C)
+
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000454)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000004C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000004C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000004C8)
+
+#define MMSS_DP_MISC1_MISC0 (0x0000042C)
+#define MMSS_DP_AUDIO_TIMING_GEN (0x00000480)
+#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000484)
+#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000488)
+#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000048C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000490)
+#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000494)
+#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000498)
+
+#define MMSS_DP_PSR_CRC_RG (0x00000554)
+#define MMSS_DP_PSR_CRC_B (0x00000558)
+
+#define MMSS_DP_AUDIO_CFG (0x00000600)
+#define MMSS_DP_AUDIO_STATUS (0x00000604)
+#define MMSS_DP_AUDIO_PKT_CTRL (0x00000608)
+#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000060C)
+#define MMSS_DP_AUDIO_ACR_CTRL (0x00000610)
+#define MMSS_DP_AUDIO_CTRL_RESET (0x00000614)
+
+#define MMSS_DP_SDP_CFG (0x00000628)
+#define MMSS_DP_SDP_CFG2 (0x0000062C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000630)
+#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000634)
+
+#define MMSS_DP_AUDIO_STREAM_0 (0x00000640)
+#define MMSS_DP_AUDIO_STREAM_1 (0x00000644)
+
+#define MMSS_DP_EXTENSION_0 (0x00000650)
+#define MMSS_DP_EXTENSION_1 (0x00000654)
+#define MMSS_DP_EXTENSION_2 (0x00000658)
+#define MMSS_DP_EXTENSION_3 (0x0000065C)
+#define MMSS_DP_EXTENSION_4 (0x00000660)
+#define MMSS_DP_EXTENSION_5 (0x00000664)
+#define MMSS_DP_EXTENSION_6 (0x00000668)
+#define MMSS_DP_EXTENSION_7 (0x0000066C)
+#define MMSS_DP_EXTENSION_8 (0x00000670)
+#define MMSS_DP_EXTENSION_9 (0x00000674)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000678)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000067C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000680)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000684)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000688)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000068C)
+#define MMSS_DP_AUDIO_ISRC_0 (0x00000690)
+#define MMSS_DP_AUDIO_ISRC_1 (0x00000694)
+#define MMSS_DP_AUDIO_ISRC_2 (0x00000698)
+#define MMSS_DP_AUDIO_ISRC_3 (0x0000069C)
+#define MMSS_DP_AUDIO_ISRC_4 (0x000006A0)
+#define MMSS_DP_AUDIO_ISRC_5 (0x000006A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000006A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000006AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000006B0)
+
+#define MMSS_DP_GENERIC0_0 (0x00000700)
+#define MMSS_DP_GENERIC0_1 (0x00000704)
+#define MMSS_DP_GENERIC0_2 (0x00000708)
+#define MMSS_DP_GENERIC0_3 (0x0000070C)
+#define MMSS_DP_GENERIC0_4 (0x00000710)
+#define MMSS_DP_GENERIC0_5 (0x00000714)
+#define MMSS_DP_GENERIC0_6 (0x00000718)
+#define MMSS_DP_GENERIC0_7 (0x0000071C)
+#define MMSS_DP_GENERIC0_8 (0x00000720)
+#define MMSS_DP_GENERIC0_9 (0x00000724)
+#define MMSS_DP_GENERIC1_0 (0x00000728)
+#define MMSS_DP_GENERIC1_1 (0x0000072C)
+#define MMSS_DP_GENERIC1_2 (0x00000730)
+#define MMSS_DP_GENERIC1_3 (0x00000734)
+#define MMSS_DP_GENERIC1_4 (0x00000738)
+#define MMSS_DP_GENERIC1_5 (0x0000073C)
+#define MMSS_DP_GENERIC1_6 (0x00000740)
+#define MMSS_DP_GENERIC1_7 (0x00000744)
+#define MMSS_DP_GENERIC1_8 (0x00000748)
+#define MMSS_DP_GENERIC1_9 (0x0000074C)
+
+#define MMSS_DP_TIMING_ENGINE_EN (0x00000A10)
+#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000A88)
+
+/*DP PHY Register offsets */
+#define DP_PHY_REVISION_ID0 (0x00000000)
+#define DP_PHY_REVISION_ID1 (0x00000004)
+#define DP_PHY_REVISION_ID2 (0x00000008)
+#define DP_PHY_REVISION_ID3 (0x0000000C)
+
+#define DP_PHY_CFG (0x00000010)
+#define DP_PHY_PD_CTL (0x00000018)
+#define DP_PHY_MODE (0x0000001C)
+
+#define DP_PHY_AUX_CFG0 (0x00000020)
+#define DP_PHY_AUX_CFG1 (0x00000024)
+#define DP_PHY_AUX_CFG2 (0x00000028)
+#define DP_PHY_AUX_CFG3 (0x0000002C)
+#define DP_PHY_AUX_CFG4 (0x00000030)
+#define DP_PHY_AUX_CFG5 (0x00000034)
+#define DP_PHY_AUX_CFG6 (0x00000038)
+#define DP_PHY_AUX_CFG7 (0x0000003C)
+#define DP_PHY_AUX_CFG8 (0x00000040)
+#define DP_PHY_AUX_CFG9 (0x00000044)
+#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048)
+#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
+
+#define DP_PHY_SPARE0 (0x00AC)
+
+#define TXn_TX_EMP_POST1_LVL (0x000C)
+#define TXn_TX_DRV_LVL (0x001C)
+
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
+
+/* DP MMSS_CC registers */
+#define MMSS_DP_LINK_CMD_RCGR (0x0138)
+#define MMSS_DP_LINK_CFG_RCGR (0x013C)
+#define MMSS_DP_PIXEL_M (0x0174)
+#define MMSS_DP_PIXEL_N (0x0178)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL (0x0A0)
+#define DP_HDCP_STATUS (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV (0x298)
+#define DP_HDCP_SW_LOWER_AKSV (0x29C)
+#define DP_HDCP_ENTROPY_CTRL0 (0x750)
+#define DP_HDCP_ENTROPY_CTRL1 (0x75C)
+#define DP_HDCP_SHA_STATUS (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3 (0x2A4)
+#define DP_HDCP_RCVPORT_DATA4 (0x2A8)
+#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
+
+#endif /* _DP_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 6ef8266..7bc1433 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -345,12 +345,11 @@
pd->vdo = *vdos;
dp_usbpd_get_status(pd);
- if (pd->dp_cb && pd->dp_cb->attention) {
+ if (pd->dp_cb && pd->dp_cb->attention)
pd->dp_cb->attention(pd->dev);
- if (!pd->dp_usbpd.alt_mode_cfg_done)
- dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
- }
+ if (!pd->dp_usbpd.alt_mode_cfg_done)
+ dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
break;
case DP_USBPD_VDM_STATUS:
pd->vdo = *vdos;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 3625ed0..5e76ce7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -56,6 +56,8 @@
dsi_ctrl_hw_cmn_trigger_cmd_test_pattern;
ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err;
ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
+ ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
+ ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
switch (version) {
case DSI_CTRL_VERSION_1_4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 2d7b174..e8a6ab4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -147,6 +147,12 @@
void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable, u32 frame_count);
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode);
+
void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
struct dsi_ctrl_cmd_dma_info *cmd,
u32 flags);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 4788f3b..96136ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -9,7 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__
@@ -25,6 +24,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gpu.h"
+#include "msm_mmu.h"
#include "dsi_ctrl.h"
#include "dsi_ctrl_hw.h"
#include "dsi_clk.h"
@@ -253,6 +253,16 @@
return 0;
}
+static inline struct msm_gem_address_space*
+dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl,
+ int domain)
+{
+ if (!dsi_ctrl || !dsi_ctrl->drm_dev)
+ return NULL;
+
+ return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain);
+}
+
static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
enum dsi_ctrl_driver_ops op,
u32 op_state)
@@ -884,7 +894,7 @@
const struct mipi_dsi_msg *msg,
u32 flags)
{
- int rc = 0;
+ int rc = 0, ret = 0;
struct mipi_dsi_packet packet;
struct dsi_ctrl_cmd_dma_fifo_info cmd;
struct dsi_ctrl_cmd_dma_info cmd_mem;
@@ -948,42 +958,59 @@
hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
- if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
- reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
- if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
- dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
- &cmd_mem,
- hw_flags);
- } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
- dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
- &cmd,
- hw_flags);
+ if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) {
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+ &cmd_mem,
+ hw_flags);
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
+ }
}
if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
- u32 retry = 10;
- u32 status = 0;
- u64 error = 0;
- u32 mask = (DSI_CMD_MODE_DMA_DONE);
+ dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
- while ((status == 0) && (retry > 0)) {
- udelay(1000);
- status = dsi_ctrl->hw.ops.get_interrupt_status(
- &dsi_ctrl->hw);
- error = dsi_ctrl->hw.ops.get_error_status(
- &dsi_ctrl->hw);
- status &= mask;
- retry--;
- dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
- status);
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- error);
+ if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+ dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+ &cmd_mem,
+ hw_flags);
+ } else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
}
- pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
- if (retry == 0)
- pr_err("[DSI_%d]Command transfer failed\n",
- dsi_ctrl->cell_index);
+
+ ret = wait_for_completion_timeout(
+ &dsi_ctrl->irq_info.cmd_dma_done,
+ msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+ if (ret == 0) {
+ u32 status = 0;
+ u32 mask = DSI_CMD_MODE_DMA_DONE;
+
+ if (status & mask) {
+ status |= (DSI_CMD_MODE_DMA_DONE |
+ DSI_BTA_DONE);
+ dsi_ctrl->hw.ops.clear_interrupt_status(
+ &dsi_ctrl->hw,
+ status);
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ pr_warn("dma_tx done but irq not triggered\n");
+ } else {
+ rc = -ETIMEDOUT;
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->cell_index);
+ }
+ }
dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
}
@@ -1152,19 +1179,19 @@
return rc;
}
-int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
-{
- struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
-
- devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
-
- return 0;
-}
-
static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
{
+ struct msm_gem_address_space *aspace = NULL;
+
if (dsi_ctrl->tx_cmd_buf) {
- msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
+
+ msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
dsi_ctrl->tx_cmd_buf = NULL;
@@ -1177,6 +1204,13 @@
{
int rc = 0;
u32 iova = 0;
+ struct msm_gem_address_space *aspace = NULL;
+
+ aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get address space\n");
+ return -ENOMEM;
+ }
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
@@ -1191,7 +1225,7 @@
dsi_ctrl->cmd_buffer_size = SZ_4K;
- rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+ rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova);
if (rc) {
pr_err("failed to get iova, rc=%d\n", rc);
(void)dsi_ctrl_buffer_deinit(dsi_ctrl);
@@ -1259,6 +1293,10 @@
dsi_ctrl->cell_index = index;
dsi_ctrl->version = version;
+ dsi_ctrl->irq_info.irq_num = -1;
+ dsi_ctrl->irq_info.irq_stat_mask = 0x0;
+
+ spin_lock_init(&dsi_ctrl->irq_info.irq_lock);
dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
if (!dsi_ctrl->name)
@@ -1677,6 +1715,266 @@
return 0;
}
+static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
+ unsigned long int error)
+{
+ pr_err("%s: %lu\n", __func__, error);
+
+ /* DTLN PHY error */
+ if (error & 0x3000e00)
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0x3000e00);
+
+ /* DSI FIFO OVERFLOW error */
+ if (error & 0xf0000) {
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0xf0000);
+ }
+
+ /* DSI FIFO UNDERFLOW error */
+ if (error & 0xf00000) {
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ 0xf00000);
+ }
+
+ /* DSI PLL UNLOCK error */
+ if (error & BIT(8))
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ BIT(8));
+}
+
+/**
+ * dsi_ctrl_isr - interrupt service routine for DSI CTRL component
+ * @irq: Incoming IRQ number
+ * @ptr: Pointer to user data structure (struct dsi_ctrl)
+ * Returns: IRQ_HANDLED if no further action required
+ */
+static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
+{
+ struct dsi_ctrl *dsi_ctrl;
+ struct dsi_event_cb_info cb_info;
+ unsigned long flags;
+ uint32_t cell_index, status, i;
+ uint64_t errors;
+
+ if (!ptr)
+ return IRQ_NONE;
+ dsi_ctrl = ptr;
+
+ /* clear status interrupts */
+ if (dsi_ctrl->hw.ops.get_interrupt_status)
+ status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
+ else
+ status = 0x0;
+
+ if (dsi_ctrl->hw.ops.clear_interrupt_status)
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status);
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+ cell_index = dsi_ctrl->cell_index;
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ /* clear error interrupts */
+ if (dsi_ctrl->hw.ops.get_error_status)
+ errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
+ else
+ errors = 0x0;
+
+ if (errors) {
+ /* handle DSI error recovery */
+ dsi_ctrl_handle_error_status(dsi_ctrl, errors);
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ errors);
+ }
+
+ if (status & DSI_CMD_MODE_DMA_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ }
+
+ if (status & DSI_CMD_FRAME_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_FRAME_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_frame_done);
+ }
+
+ if (status & DSI_VIDEO_MODE_FRAME_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_VIDEO_MODE_FRAME_DONE);
+ complete_all(&dsi_ctrl->irq_info.vid_frame_done);
+ }
+
+ if (status & DSI_BTA_DONE) {
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_BTA_DONE);
+ complete_all(&dsi_ctrl->irq_info.bta_done);
+ }
+
+ for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
+ if (status & 0x1) {
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+ cb_info = dsi_ctrl->irq_info.irq_stat_cb[i];
+ spin_unlock_irqrestore(
+ &dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (cb_info.event_cb)
+ (void)cb_info.event_cb(cb_info.event_usr_ptr,
+ cb_info.event_idx,
+ cell_index, irq, 0, 0, 0);
+ }
+ status >>= 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * _dsi_ctrl_setup_isr - register ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ * Returns: Zero on success
+ */
+static int dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl)
+{
+ int irq_num, rc;
+
+ if (!dsi_ctrl)
+ return -EINVAL;
+ if (dsi_ctrl->irq_info.irq_num != -1)
+ return 0;
+
+ init_completion(&dsi_ctrl->irq_info.cmd_dma_done);
+ init_completion(&dsi_ctrl->irq_info.vid_frame_done);
+ init_completion(&dsi_ctrl->irq_info.cmd_frame_done);
+ init_completion(&dsi_ctrl->irq_info.bta_done);
+
+ irq_num = platform_get_irq(dsi_ctrl->pdev, 0);
+ if (irq_num < 0) {
+ pr_err("[DSI_%d] Failed to get IRQ number, %d\n",
+ dsi_ctrl->cell_index, irq_num);
+ rc = irq_num;
+ } else {
+ rc = devm_request_threaded_irq(&dsi_ctrl->pdev->dev, irq_num,
+ dsi_ctrl_isr, NULL, 0, "dsi_ctrl", dsi_ctrl);
+ if (rc) {
+ pr_err("[DSI_%d] Failed to request IRQ, %d\n",
+ dsi_ctrl->cell_index, rc);
+ } else {
+ dsi_ctrl->irq_info.irq_num = irq_num;
+ disable_irq_nosync(irq_num);
+
+ pr_info("[DSI_%d] IRQ %d registered\n",
+ dsi_ctrl->cell_index, irq_num);
+ }
+ }
+ return rc;
+}
+
+/**
+ * _dsi_ctrl_destroy_isr - unregister ISR handler
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_destroy_isr(struct dsi_ctrl *dsi_ctrl)
+{
+ if (!dsi_ctrl || !dsi_ctrl->pdev || dsi_ctrl->irq_info.irq_num < 0)
+ return;
+
+ if (dsi_ctrl->irq_info.irq_num != -1) {
+ devm_free_irq(&dsi_ctrl->pdev->dev,
+ dsi_ctrl->irq_info.irq_num, dsi_ctrl);
+ dsi_ctrl->irq_info.irq_num = -1;
+ }
+}
+
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx, struct dsi_event_cb_info *event_info)
+{
+ unsigned long flags;
+
+ if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+ intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+ return;
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
+ /* enable irq on first request */
+ if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+ enable_irq(dsi_ctrl->irq_info.irq_num);
+
+ /* update hardware mask */
+ dsi_ctrl->irq_info.irq_stat_mask |= BIT(intr_idx);
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+ dsi_ctrl->irq_info.irq_stat_mask);
+ }
+ ++(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]);
+
+ if (event_info)
+ dsi_ctrl->irq_info.irq_stat_cb[intr_idx] = *event_info;
+
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
+void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx)
+{
+ unsigned long flags;
+
+ if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 ||
+ intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
+ return;
+
+ spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
+
+ if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
+ if (--(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) == 0) {
+ dsi_ctrl->irq_info.irq_stat_mask &= ~BIT(intr_idx);
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw,
+ dsi_ctrl->irq_info.irq_stat_mask);
+
+ /* don't need irq if no lines are enabled */
+ if (dsi_ctrl->irq_info.irq_stat_mask == 0)
+ disable_irq_nosync(dsi_ctrl->irq_info.irq_num);
+ }
+
+ spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
+}
+
+int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
+{
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (dsi_ctrl->hw.ops.host_setup)
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config);
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ if (dsi_ctrl->hw.ops.cmd_engine_setup)
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config,
+ &dsi_ctrl->host_config.u.cmd_engine);
+
+ if (dsi_ctrl->hw.ops.setup_cmd_stream)
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.video_timing,
+ dsi_ctrl->host_config.video_timing.h_active * 3,
+ 0x0, NULL);
+ } else {
+ pr_err("invalid panel mode for resolution switch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
@@ -1729,13 +2027,11 @@
&dsi_ctrl->host_config.video_timing);
}
-
+ dsi_ctrl_setup_isr(dsi_ctrl);
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
- /* Perform a soft reset before enabling dsi controller */
- dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
pr_debug("[DSI_%d]Host initialization complete\n",
dsi_ctrl->cell_index);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
@@ -1777,6 +2073,8 @@
mutex_lock(&dsi_ctrl->ctrl_lock);
+ _dsi_ctrl_destroy_isr(dsi_ctrl);
+
rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
if (rc) {
pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -1933,7 +2231,7 @@
*/
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
{
- int rc = 0;
+ int rc = 0, ret = 0;
u32 status = 0;
u32 mask = (DSI_CMD_MODE_DMA_DONE);
@@ -1944,27 +2242,43 @@
mutex_lock(&dsi_ctrl->ctrl_lock);
- reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
-
- dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+ if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
if ((flags & DSI_CTRL_CMD_BROADCAST) &&
- (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
- u32 retry = 10;
+ (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+ dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
- while ((status == 0) && (retry > 0)) {
- udelay(1000);
+ /* trigger command */
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+ ret = wait_for_completion_timeout(
+ &dsi_ctrl->irq_info.cmd_dma_done,
+ msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+
+ if (ret == 0) {
status = dsi_ctrl->hw.ops.get_interrupt_status(
&dsi_ctrl->hw);
- status &= mask;
- retry--;
- dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ if (status & mask) {
+ status |= (DSI_CMD_MODE_DMA_DONE |
+ DSI_BTA_DONE);
+ dsi_ctrl->hw.ops.clear_interrupt_status(
+ &dsi_ctrl->hw,
status);
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ complete_all(&dsi_ctrl->irq_info.cmd_dma_done);
+ pr_warn("dma_tx done but irq not triggered\n");
+ } else {
+ rc = -ETIMEDOUT;
+ dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+ DSI_SINT_CMD_MODE_DMA_DONE);
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->cell_index);
+ }
}
- pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
- if (retry == 0)
- pr_err("[DSI_%d]Command transfer failed\n",
- dsi_ctrl->cell_index);
}
mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -1972,6 +2286,28 @@
}
/**
+ * _dsi_ctrl_cache_misr - Cache frame MISR value
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
+{
+ u32 misr;
+
+ if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+ return;
+
+ misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode);
+
+ if (misr)
+ dsi_ctrl->misr_cache = misr;
+
+ pr_debug("DSI_%d misr_cache = %x\n", dsi_ctrl->cell_index,
+ dsi_ctrl->misr_cache);
+
+}
+
+/**
* dsi_ctrl_set_power_state() - set power state for dsi controller
* @dsi_ctrl: DSI controller handle.
* @state: Power state.
@@ -2009,6 +2345,9 @@
goto error;
}
} else if (state == DSI_CTRL_POWER_VREG_OFF) {
+ if (dsi_ctrl->misr_enable)
+ _dsi_ctrl_cache_misr(dsi_ctrl);
+
rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
if (rc) {
pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
@@ -2323,6 +2662,59 @@
}
/**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+ bool enable,
+ u32 frame_count)
+{
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!dsi_ctrl->hw.ops.setup_misr)
+ return 0;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->misr_enable = enable;
+ dsi_ctrl->hw.ops.setup_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode,
+ enable, frame_count);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return 0;
+}
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl)
+{
+ u32 misr;
+
+ if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+ return 0;
+
+ misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.panel_mode);
+ if (!misr)
+ misr = dsi_ctrl->misr_cache;
+
+ pr_debug("DSI_%d cached misr = %x, final = %x\n",
+ dsi_ctrl->cell_index, dsi_ctrl->misr_cache, misr);
+
+ return misr;
+}
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 7f36fde..dff5b02 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -138,33 +138,26 @@
/**
* struct dsi_ctrl_interrupts - define interrupt information
- * @irq: IRQ id for the DSI controller.
- * @intr_lock: Spinlock to protect access to interrupt registers.
- * @interrupt_status: Status interrupts which need to be serviced.
- * @error_status: Error interurpts which need to be serviced.
- * @interrupts_enabled: Status interrupts which are enabled.
- * @errors_enabled: Error interrupts which are enabled.
+ * @irq_lock: Spinlock for ISR handler.
+ * @irq_num: Linux interrupt number associated with device.
+ * @irq_stat_mask: Hardware mask of currently enabled interrupts.
+ * @irq_stat_refcount: Number of times each interrupt has been requested.
+ * @irq_stat_cb: Status IRQ callback definitions.
* @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
* @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
* @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
- * @interrupt_done_work: Work item for servicing status interrupts.
- * @error_status_work: Work item for servicing error interrupts.
*/
struct dsi_ctrl_interrupts {
- u32 irq;
- spinlock_t intr_lock; /* protects access to interrupt registers */
- u32 interrupt_status;
- u64 error_status;
-
- u32 interrupts_enabled;
- u64 errors_enabled;
+ spinlock_t irq_lock;
+ int irq_num;
+ uint32_t irq_stat_mask;
+ int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
+ struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
struct completion cmd_dma_done;
struct completion vid_frame_done;
struct completion cmd_frame_done;
-
- struct work_struct interrupt_done_work;
- struct work_struct error_status_work;
+ struct completion bta_done;
};
/**
@@ -180,7 +173,7 @@
* @hw: DSI controller hardware object.
* @current_state: Current driver and hardware state.
* @clk_cb: Callback for DSI clock control.
- * @int_info: Interrupt information.
+ * @irq_info: Interrupt information.
* @clk_info: Clock information.
* @clk_freq: DSi Link clock frequency information.
* @pwr_info: Power information.
@@ -195,6 +188,8 @@
* @vaddr: CPU virtual address of cmd buffer.
* @cmd_buffer_size: Size of command buffer.
* @debugfs_root: Root for debugfs entries.
+ * @misr_enable: Frame MISR enable/disable
+ * @misr_cache: Cached Frame MISR value
*/
struct dsi_ctrl {
struct platform_device *pdev;
@@ -212,7 +207,8 @@
struct dsi_ctrl_state_info current_state;
struct clk_ctrl_cb clk_cb;
- struct dsi_ctrl_interrupts int_info;
+ struct dsi_ctrl_interrupts irq_info;
+
/* Clock and power states */
struct dsi_ctrl_clk_info clk_info;
struct link_clk_freq clk_freq;
@@ -232,6 +228,10 @@
/* Debug Information */
struct dentry *debugfs_root;
+ /* MISR */
+ bool misr_enable;
+ u32 misr_cache;
+
};
/**
@@ -360,6 +360,17 @@
int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl);
/**
+ * dsi_ctrl_host_timing_update - reinitialize host with new timing values
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Reinitialize DSI controller hardware with new display timing values
+ * when resolution is switched dynamically.
+ *
+ * Return: error code
+ */
+int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl);
+
+/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
*
@@ -560,6 +571,43 @@
struct dsi_clk_link_set *source_clks);
/**
+ * dsi_ctrl_enable_status_interrupt() - enable status interrupts
+ * @dsi_ctrl: DSI controller handle.
+ * @intr_idx: Index interrupt to disable.
+ * @event_info: Pointer to event callback definition
+ */
+void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl,
+ uint32_t intr_idx, struct dsi_event_cb_info *event_info);
+
+/**
+ * dsi_ctrl_disable_status_interrupt() - disable status interrupts
+ * @dsi_ctrl: DSI controller handle.
+ * @intr_idx: Index interrupt to disable.
+ */
+void dsi_ctrl_disable_status_interrupt(
+ struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
+
+/**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+ bool enable,
+ u32 frame_count);
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl);
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index bb72807..2130144 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -9,7 +9,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#ifndef _DSI_CTRL_HW_H_
@@ -84,6 +83,36 @@
};
/**
+ * enum dsi_status_int_index - index of interrupts generated by DSI controller
+ * @DSI_SINT_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
+ * @DSI_SINT_CMD_STREAM0_FRAME_DONE: A frame of cmd mode stream0 is sent out.
+ * @DSI_SINT_CMD_STREAM1_FRAME_DONE: A frame of cmd mode stream1 is sent out.
+ * @DSI_SINT_CMD_STREAM2_FRAME_DONE: A frame of cmd mode stream2 is sent out.
+ * @DSI_SINT_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
+ * @DSI_SINT_BTA_DONE: A BTA is completed.
+ * @DSI_SINT_CMD_FRAME_DONE: A frame of selected cmd mode stream is
+ * sent out by MDP.
+ * @DSI_SINT_DYN_REFRESH_DONE: The dynamic refresh operation completed.
+ * @DSI_SINT_DESKEW_DONE: The deskew calibration operation done.
+ * @DSI_SINT_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
+ * completed.
+ */
+enum dsi_status_int_index {
+ DSI_SINT_CMD_MODE_DMA_DONE = 0,
+ DSI_SINT_CMD_STREAM0_FRAME_DONE = 1,
+ DSI_SINT_CMD_STREAM1_FRAME_DONE = 2,
+ DSI_SINT_CMD_STREAM2_FRAME_DONE = 3,
+ DSI_SINT_VIDEO_MODE_FRAME_DONE = 4,
+ DSI_SINT_BTA_DONE = 5,
+ DSI_SINT_CMD_FRAME_DONE = 6,
+ DSI_SINT_DYN_REFRESH_DONE = 7,
+ DSI_SINT_DESKEW_DONE = 8,
+ DSI_SINT_DYN_BLANK_DMA_DONE = 9,
+
+ DSI_STATUS_INTERRUPT_COUNT
+};
+
+/**
* enum dsi_status_int_type - status interrupts generated by DSI controller
* @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
* @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
@@ -99,16 +128,89 @@
* completed.
*/
enum dsi_status_int_type {
- DSI_CMD_MODE_DMA_DONE = BIT(0),
- DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
- DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
- DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
- DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
- DSI_BTA_DONE = BIT(5),
- DSI_CMD_FRAME_DONE = BIT(6),
- DSI_DYN_REFRESH_DONE = BIT(7),
- DSI_DESKEW_DONE = BIT(8),
- DSI_DYN_BLANK_DMA_DONE = BIT(9)
+ DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
+ DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE),
+ DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE),
+ DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE),
+ DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE),
+ DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE),
+ DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
+ DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
+ DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
+ DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE)
+};
+
+/**
+ * enum dsi_error_int_index - index of error interrupts from DSI controller
+ * @DSI_EINT_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
+ * @DSI_EINT_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
+ * @DSI_EINT_RDBK_CRC_ERR: CRC error in read packet.
+ * @DSI_EINT_RDBK_INCOMPLETE_PKT: Incomplete read packet.
+ * @DSI_EINT_PERIPH_ERROR_PKT: Error packet returned from peripheral,
+ * @DSI_EINT_LP_RX_TIMEOUT: Low power reverse transmission timeout.
+ * @DSI_EINT_HS_TX_TIMEOUT: High speed fwd transmission timeout.
+ * @DSI_EINT_BTA_TIMEOUT: BTA timeout.
+ * @DSI_EINT_PLL_UNLOCK: PLL has unlocked.
+ * @DSI_EINT_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
+ * @DSI_EINT_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
+ * @DSI_EINT_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
+ * @DSI_EINT_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout.
+ * @DSI_EINT_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
+ * @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
+ * @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
+ * receive one complete line from MDP).
+ * @DSI_EINT_DLN0_HS_FIFO_OVERFLOW: High speed FIFO data lane 0 overflows.
+ * @DSI_EINT_DLN1_HS_FIFO_OVERFLOW: High speed FIFO data lane 1 overflows.
+ * @DSI_EINT_DLN2_HS_FIFO_OVERFLOW: High speed FIFO data lane 2 overflows.
+ * @DSI_EINT_DLN3_HS_FIFO_OVERFLOW: High speed FIFO data lane 3 overflows.
+ * @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO data lane 0 underflows.
+ * @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO data lane 1 underflows.
+ * @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO data lane 2 underflows.
+ * @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO data lane 3 undeflows.
+ * @DSI_EINT_DLN0_LP0_CONTENTION: PHY level contention while lane 0 low.
+ * @DSI_EINT_DLN1_LP0_CONTENTION: PHY level contention while lane 1 low.
+ * @DSI_EINT_DLN2_LP0_CONTENTION: PHY level contention while lane 2 low.
+ * @DSI_EINT_DLN3_LP0_CONTENTION: PHY level contention while lane 3 low.
+ * @DSI_EINT_DLN0_LP1_CONTENTION: PHY level contention while lane 0 high.
+ * @DSI_EINT_DLN1_LP1_CONTENTION: PHY level contention while lane 1 high.
+ * @DSI_EINT_DLN2_LP1_CONTENTION: PHY level contention while lane 2 high.
+ * @DSI_EINT_DLN3_LP1_CONTENTION: PHY level contention while lane 3 high.
+ */
+enum dsi_error_int_index {
+ DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
+ DSI_EINT_RDBK_MULTI_ECC_ERR = 1,
+ DSI_EINT_RDBK_CRC_ERR = 2,
+ DSI_EINT_RDBK_INCOMPLETE_PKT = 3,
+ DSI_EINT_PERIPH_ERROR_PKT = 4,
+ DSI_EINT_LP_RX_TIMEOUT = 5,
+ DSI_EINT_HS_TX_TIMEOUT = 6,
+ DSI_EINT_BTA_TIMEOUT = 7,
+ DSI_EINT_PLL_UNLOCK = 8,
+ DSI_EINT_DLN0_ESC_ENTRY_ERR = 9,
+ DSI_EINT_DLN0_ESC_SYNC_ERR = 10,
+ DSI_EINT_DLN0_LP_CONTROL_ERR = 11,
+ DSI_EINT_PENDING_HS_TX_TIMEOUT = 12,
+ DSI_EINT_INTERLEAVE_OP_CONTENTION = 13,
+ DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14,
+ DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15,
+ DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16,
+ DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17,
+ DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18,
+ DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19,
+ DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20,
+ DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21,
+ DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22,
+ DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23,
+ DSI_EINT_DLN0_LP0_CONTENTION = 24,
+ DSI_EINT_DLN1_LP0_CONTENTION = 25,
+ DSI_EINT_DLN2_LP0_CONTENTION = 26,
+ DSI_EINT_DLN3_LP0_CONTENTION = 27,
+ DSI_EINT_DLN0_LP1_CONTENTION = 28,
+ DSI_EINT_DLN1_LP1_CONTENTION = 29,
+ DSI_EINT_DLN2_LP1_CONTENTION = 30,
+ DSI_EINT_DLN3_LP1_CONTENTION = 31,
+
+ DSI_ERROR_INTERRUPT_COUNT
};
/**
@@ -148,38 +250,38 @@
* @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
*/
enum dsi_error_int_type {
- DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
- DSI_RDBK_MULTI_ECC_ERR = BIT(1),
- DSI_RDBK_CRC_ERR = BIT(2),
- DSI_RDBK_INCOMPLETE_PKT = BIT(3),
- DSI_PERIPH_ERROR_PKT = BIT(4),
- DSI_LP_RX_TIMEOUT = BIT(5),
- DSI_HS_TX_TIMEOUT = BIT(6),
- DSI_BTA_TIMEOUT = BIT(7),
- DSI_PLL_UNLOCK = BIT(8),
- DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
- DSI_DLN0_ESC_SYNC_ERR = BIT(10),
- DSI_DLN0_LP_CONTROL_ERR = BIT(11),
- DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
- DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
- DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
- DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
- DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
- DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
- DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
- DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
- DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
- DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
- DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
- DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
- DSI_DLN0_LP0_CONTENTION = BIT(24),
- DSI_DLN1_LP0_CONTENTION = BIT(25),
- DSI_DLN2_LP0_CONTENTION = BIT(26),
- DSI_DLN3_LP0_CONTENTION = BIT(27),
- DSI_DLN0_LP1_CONTENTION = BIT(28),
- DSI_DLN1_LP1_CONTENTION = BIT(29),
- DSI_DLN2_LP1_CONTENTION = BIT(30),
- DSI_DLN3_LP1_CONTENTION = BIT(31),
+ DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
+ DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR),
+ DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR),
+ DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT),
+ DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT),
+ DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT),
+ DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT),
+ DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT),
+ DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK),
+ DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR),
+ DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR),
+ DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR),
+ DSI_PENDING_HS_TX_TIMEOUT = BIT(DSI_EINT_PENDING_HS_TX_TIMEOUT),
+ DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION),
+ DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW),
+ DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW),
+ DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW),
+ DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW),
+ DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW),
+ DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW),
+ DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW),
+ DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW),
+ DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW),
+ DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW),
+ DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION),
+ DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION),
+ DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION),
+ DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION),
+ DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION),
+ DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
+ DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
+ DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
};
/**
@@ -573,6 +675,26 @@
ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
+
+ /**
+ * setup_misr() - Setup frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ * @enable: Enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ */
+ void (*setup_misr)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable, u32 frame_count);
+
+ /**
+ * collect_misr() - Read frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ */
+ u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode);
+
};
/*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
index c22849a..6421dc2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
@@ -157,6 +157,10 @@
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
+ len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_STATUS));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_LANE_CTRL));
@@ -193,6 +197,12 @@
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
+ len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
len += snprintf((buf + len), (size - len),
DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index a024c43..c85d9f4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -20,9 +20,12 @@
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
#include "dsi_panel.h"
+#include "dsi_catalog.h"
#define MMSS_MISC_CLAMP_REG_OFF 0x0014
#define DSI_CTRL_DYNAMIC_FORCE_ON (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
+#define DSI_CTRL_CMD_MISR_ENABLE BIT(28)
+#define DSI_CTRL_VIDEO_MISR_ENABLE BIT(16)
/* Unsupported formats default to RGB888 */
static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
@@ -76,7 +79,7 @@
DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
/* Setup DSI control register */
- reg_value = 0;
+ reg_value = DSI_R32(ctrl, DSI_CTRL);
reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
reg_value |= BIT(8); /* Clock lane */
@@ -149,6 +152,70 @@
}
/**
+ * setup_misr() - Setup frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ * @enable: Enable/disable MISR.
+ * @frame_count: Number of frames to accumulate MISR.
+ */
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode,
+ bool enable,
+ u32 frame_count)
+{
+ u32 addr;
+ u32 config = 0;
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ addr = DSI_MISR_CMD_CTRL;
+ if (enable)
+ config = DSI_CTRL_CMD_MISR_ENABLE;
+ } else {
+ addr = DSI_MISR_VIDEO_CTRL;
+ if (enable)
+ config = DSI_CTRL_VIDEO_MISR_ENABLE;
+ if (frame_count > 255)
+ frame_count = 255;
+ config |= frame_count << 8;
+ }
+
+ pr_debug("[DSI_%d] MISR ctrl: 0x%x\n", ctrl->index,
+ config);
+ DSI_W32(ctrl, addr, config);
+ wmb(); /* make sure MISR is configured */
+}
+
+/**
+ * collect_misr() - Read frame MISR
+ * @ctrl: Pointer to the controller host hardware.
+ * @panel_mode: CMD or VIDEO mode indicator
+ */
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+ enum dsi_op_mode panel_mode)
+{
+ u32 addr;
+ u32 enabled;
+ u32 misr = 0;
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ addr = DSI_MISR_CMD_MDP0_32BIT;
+ enabled = DSI_R32(ctrl, DSI_MISR_CMD_CTRL) &
+ DSI_CTRL_CMD_MISR_ENABLE;
+ } else {
+ addr = DSI_MISR_VIDEO_32BIT;
+ enabled = DSI_R32(ctrl, DSI_MISR_VIDEO_CTRL) &
+ DSI_CTRL_VIDEO_MISR_ENABLE;
+ }
+
+ if (enabled)
+ misr = DSI_R32(ctrl, addr);
+
+ pr_debug("[DSI_%d] MISR enabled %x value: 0x%x\n", ctrl->index,
+ enabled, misr);
+ return misr;
+}
+
+/**
* set_video_timing() - set up the timing for video frame
* @ctrl: Pointer to controller host hardware.
* @mode: Video mode information.
@@ -706,6 +773,8 @@
{
u32 reg = 0;
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
if (ints & DSI_CMD_MODE_DMA_DONE)
reg |= BIT(0);
if (ints & DSI_CMD_FRAME_DONE)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 77da9b4..fcc59ef 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -9,13 +9,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#ifndef _DSI_DEFS_H_
#define _DSI_DEFS_H_
#include <linux/types.h>
+#include <drm/drm_mipi_dsi.h>
#include "msm_drv.h"
#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
@@ -76,11 +76,13 @@
* @DSI_MODE_FLAG_SEAMLESS: Seamless transition requested by user
* @DSI_MODE_FLAG_DFPS: Seamless transition is DynamicFPS
* @DSI_MODE_FLAG_VBLANK_PRE_MODESET: Transition needs VBLANK before Modeset
+ * @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
*/
enum dsi_mode_flags {
DSI_MODE_FLAG_SEAMLESS = BIT(0),
DSI_MODE_FLAG_DFPS = BIT(1),
- DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2)
+ DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2),
+ DSI_MODE_FLAG_DMS = BIT(3),
};
/**
@@ -215,6 +217,68 @@
};
/**
+ * enum dsi_cmd_set_type - DSI command set type
+ * @DSI_CMD_SET_PRE_ON: Panel pre on
+ * @DSI_CMD_SET_ON: Panel on
+ * @DSI_CMD_SET_POST_ON: Panel post on
+ * @DSI_CMD_SET_PRE_OFF: Panel pre off
+ * @DSI_CMD_SET_OFF: Panel off
+ * @DSI_CMD_SET_POST_OFF: Panel post off
+ * @DSI_CMD_SET_PRE_RES_SWITCH: Pre resolution switch
+ * @DSI_CMD_SET_RES_SWITCH: Resolution switch
+ * @DSI_CMD_SET_POST_RES_SWITCH: Post resolution switch
+ * @DSI_CMD_SET_CMD_TO_VID_SWITCH: Cmd to video mode switch
+ * @DSI_CMD_SET_POST_CMD_TO_VID_SWITCH: Post cmd to vid switch
+ * @DSI_CMD_SET_VID_TO_CMD_SWITCH: Video to cmd mode switch
+ * @DSI_CMD_SET_POST_VID_TO_CMD_SWITCH: Post vid to cmd switch
+ * @DSI_CMD_SET_PANEL_STATUS: Panel status
+ * @DSI_CMD_SET_LP1: Low power mode 1
+ * @DSI_CMD_SET_LP2: Low power mode 2
+ * @DSI_CMD_SET_NOLP: Low power mode disable
+ * @DSI_CMD_SET_PPS: DSC PPS command
+ * @DSI_CMD_SET_ROI: Panel ROI update
+ * @DSI_CMD_SET_TIMING_SWITCH: Timing switch
+ * @DSI_CMD_SET_POST_TIMING_SWITCH: Post timing switch
+ * @DSI_CMD_SET_MAX
+ */
+enum dsi_cmd_set_type {
+ DSI_CMD_SET_PRE_ON = 0,
+ DSI_CMD_SET_ON,
+ DSI_CMD_SET_POST_ON,
+ DSI_CMD_SET_PRE_OFF,
+ DSI_CMD_SET_OFF,
+ DSI_CMD_SET_POST_OFF,
+ DSI_CMD_SET_PRE_RES_SWITCH,
+ DSI_CMD_SET_RES_SWITCH,
+ DSI_CMD_SET_POST_RES_SWITCH,
+ DSI_CMD_SET_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_PANEL_STATUS,
+ DSI_CMD_SET_LP1,
+ DSI_CMD_SET_LP2,
+ DSI_CMD_SET_NOLP,
+ DSI_CMD_SET_PPS,
+ DSI_CMD_SET_ROI,
+ DSI_CMD_SET_TIMING_SWITCH,
+ DSI_CMD_SET_POST_TIMING_SWITCH,
+ DSI_CMD_SET_MAX
+};
+
+/**
+ * enum dsi_cmd_set_state - command set state
+ * @DSI_CMD_SET_STATE_LP: dsi low power mode
+ * @DSI_CMD_SET_STATE_HS: dsi high speed mode
+ * @DSI_CMD_SET_STATE_MAX
+ */
+enum dsi_cmd_set_state {
+ DSI_CMD_SET_STATE_LP = 0,
+ DSI_CMD_SET_STATE_HS,
+ DSI_CMD_SET_STATE_MAX
+};
+
+/**
* enum dsi_phy_type - DSI phy types
* @DSI_PHY_TYPE_DPHY:
* @DSI_PHY_TYPE_CPHY:
@@ -247,6 +311,34 @@
};
/**
+ * struct dsi_cmd_desc - description of a dsi command
+ * @msg: dsi mipi msg packet
+ * @last_command: indicates whether the cmd is the last one to send
+ * @post_wait_ms: post wait duration
+ */
+struct dsi_cmd_desc {
+ struct mipi_dsi_msg msg;
+ bool last_command;
+ u32 post_wait_ms;
+};
+
+/**
+ * struct dsi_panel_cmd_set - command set of the panel
+ * @type: type of the command
+ * @state: state of the command
+ * @count: number of cmds
+ * @ctrl_idx: index of the dsi control
+ * @cmds: arry of cmds
+ */
+struct dsi_panel_cmd_set {
+ enum dsi_cmd_set_type type;
+ enum dsi_cmd_set_state state;
+ u32 count;
+ u32 ctrl_idx;
+ struct dsi_cmd_desc *cmds;
+};
+
+/**
* struct dsi_mode_info - video mode information dsi frame
* @h_active: Active width of one frame in pixels.
* @h_back_porch: Horizontal back porch in pixels.
@@ -398,18 +490,44 @@
};
/**
+ * struct dsi_display_mode_priv_info - private mode info that will be attached
+ * with each drm mode
+ * @cmd_sets: Command sets of the mode
+ * @phy_timing_val: Phy timing values
+ * @phy_timing_len: Phy timing array length
+ * @panel_jitter: Panel jitter for RSC backoff
+ * @panel_prefill_lines: Panel prefill lines for RSC
+ * @topology: Topology selected for the panel
+ * @dsc: DSC compression info
+ * @dsc_enabled: DSC compression enabled
+ */
+struct dsi_display_mode_priv_info {
+ struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
+
+ u32 *phy_timing_val;
+ u32 phy_timing_len;
+
+ u32 panel_jitter_numer;
+ u32 panel_jitter_denom;
+ u32 panel_prefill_lines;
+
+ struct msm_display_topology topology;
+ struct msm_display_dsc_info dsc;
+ bool dsc_enabled;
+};
+
+/**
* struct dsi_display_mode - specifies mode for dsi display
* @timing: Timing parameters for the panel.
* @pixel_clk_khz: Pixel clock in Khz.
- * @panel_mode: Panel operation mode.
* @dsi_mode_flags: Flags to signal other drm components via private flags
+ * @priv_info: Mode private info
*/
struct dsi_display_mode {
struct dsi_mode_info timing;
u32 pixel_clk_khz;
- enum dsi_op_mode panel_mode;
u32 dsi_mode_flags;
- struct msm_mode_info *mode_info;
+ struct dsi_display_mode_priv_info *priv_info;
};
/**
@@ -447,5 +565,14 @@
r1->h == r2->h;
}
+struct dsi_event_cb_info {
+ uint32_t event_idx;
+ void *event_usr_ptr;
+
+ int (*event_cb)(void *event_usr_ptr,
+ uint32_t event_idx, uint32_t instance_idx,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3);
+};
#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3dd4950..547a3e5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -19,6 +19,8 @@
#include <linux/err.h>
#include "msm_drv.h"
+#include "sde_connector.h"
+#include "msm_mmu.h"
#include "dsi_display.h"
#include "dsi_panel.h"
#include "dsi_ctrl.h"
@@ -29,6 +31,9 @@
#define to_dsi_display(x) container_of(x, struct dsi_display, host)
#define INT_BASE_10 10
+#define NO_OVERRIDE -1
+
+#define MISR_BUFF_SIZE 256
static DEFINE_MUTEX(dsi_display_list_lock);
static LIST_HEAD(dsi_display_list);
@@ -109,9 +114,62 @@
return rc;
}
+
+enum dsi_pixel_format dsi_display_get_dst_format(void *display)
+{
+ enum dsi_pixel_format format = DSI_PIXEL_FORMAT_MAX;
+ struct dsi_display *dsi_display = (struct dsi_display *)display;
+
+ if (!dsi_display || !dsi_display->panel) {
+ pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
+ dsi_display,
+ ((dsi_display) ? dsi_display->panel : NULL));
+ return format;
+ }
+
+ format = dsi_display->panel->host_config.dst_format;
+ return format;
+}
+
+static void _dsi_display_setup_misr(struct dsi_display *display)
+{
+ int i;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ dsi_ctrl_setup_misr(display->ctrl[i].ctrl,
+ display->misr_enable,
+ display->misr_frame_count);
+ }
+}
+
+int dsi_display_set_power(struct drm_connector *connector,
+ int power_mode, void *disp)
+{
+ struct dsi_display *display = disp;
+ int rc = 0;
+
+ if (!display || !display->panel) {
+ pr_err("invalid display/panel\n");
+ return -EINVAL;
+ }
+
+ switch (power_mode) {
+ case SDE_MODE_DPMS_LP1:
+ rc = dsi_panel_set_lp1(display->panel);
+ break;
+ case SDE_MODE_DPMS_LP2:
+ rc = dsi_panel_set_lp2(display->panel);
+ break;
+ default:
+ rc = dsi_panel_set_nolp(display->panel);
+ break;
+ }
+ return rc;
+}
+
static ssize_t debugfs_dump_info_read(struct file *file,
- char __user *buff,
- size_t count,
+ char __user *user_buf,
+ size_t user_len,
loff_t *ppos)
{
struct dsi_display *display = file->private_data;
@@ -149,7 +207,7 @@
"\tClock master = %s\n",
display->ctrl[display->clk_master_idx].ctrl->name);
- if (copy_to_user(buff, buf, len)) {
+ if (copy_to_user(user_buf, buf, len)) {
kfree(buf);
return -EFAULT;
}
@@ -160,16 +218,151 @@
return len;
}
+static ssize_t debugfs_misr_setup(struct file *file,
+ const char __user *user_buf,
+ size_t user_len,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ int rc = 0;
+ size_t len;
+ u32 enable, frame_count;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(MISR_BUFF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* leave room for termination char */
+ len = min_t(size_t, user_len, MISR_BUFF_SIZE - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ buf[len] = '\0'; /* terminate the string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ display->misr_enable = enable;
+ display->misr_frame_count = frame_count;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto unlock;
+ }
+
+ _dsi_display_setup_misr(display);
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto unlock;
+ }
+
+ rc = user_len;
+unlock:
+ mutex_unlock(&display->display_lock);
+error:
+ kfree(buf);
+ return rc;
+}
+
+static ssize_t debugfs_misr_read(struct file *file,
+ char __user *user_buf,
+ size_t user_len,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ int rc = 0;
+ struct dsi_ctrl *dsi_ctrl;
+ int i;
+ u32 misr;
+ size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE);
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(max_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ dsi_ctrl = display->ctrl[i].ctrl;
+ misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl);
+
+ len += snprintf((buf + len), max_len - len,
+ "DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr);
+
+ if (len >= max_len)
+ break;
+ }
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ if (copy_to_user(user_buf, buf, len)) {
+ rc = -EFAULT;
+ goto error;
+ }
+
+ *ppos += len;
+
+error:
+ mutex_unlock(&display->display_lock);
+ kfree(buf);
+ return len;
+}
static const struct file_operations dump_info_fops = {
.open = simple_open,
.read = debugfs_dump_info_read,
};
+static const struct file_operations misr_data_fops = {
+ .open = simple_open,
+ .read = debugfs_misr_read,
+ .write = debugfs_misr_setup,
+};
+
static int dsi_display_debugfs_init(struct dsi_display *display)
{
int rc = 0;
- struct dentry *dir, *dump_file;
+ struct dentry *dir, *dump_file, *misr_data;
dir = debugfs_create_dir(display->name, NULL);
if (IS_ERR_OR_NULL(dir)) {
@@ -180,13 +373,25 @@
}
dump_file = debugfs_create_file("dump_info",
- 0444,
+ 0400,
dir,
display,
&dump_info_fops);
if (IS_ERR_OR_NULL(dump_file)) {
rc = PTR_ERR(dump_file);
- pr_err("[%s] debugfs create file failed, rc=%d\n",
+ pr_err("[%s] debugfs create dump info file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ misr_data = debugfs_create_file("misr_data",
+ 0600,
+ dir,
+ display,
+ &misr_data_fops);
+ if (IS_ERR_OR_NULL(misr_data)) {
+ rc = PTR_ERR(misr_data);
+ pr_err("[%s] debugfs create misr datafile failed, rc=%d\n",
display->name, rc);
goto error_remove_dir;
}
@@ -499,7 +704,45 @@
return 0;
}
+void dsi_display_enable_event(struct dsi_display *display,
+ uint32_t event_idx, struct dsi_event_cb_info *event_info,
+ bool enable)
+{
+ uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT;
+ int i;
+ if (!display) {
+ pr_err("invalid display\n");
+ return;
+ }
+
+ if (event_info)
+ event_info->event_idx = event_idx;
+
+ switch (event_idx) {
+ case SDE_CONN_EVENT_VID_DONE:
+ irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE;
+ break;
+ case SDE_CONN_EVENT_CMD_DONE:
+ irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
+ break;
+ default:
+ /* nothing to do */
+ pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
+ return;
+ }
+
+ if (enable) {
+ for (i = 0; i < display->ctrl_count; i++)
+ dsi_ctrl_enable_status_interrupt(
+ display->ctrl[i].ctrl, irq_status_idx,
+ event_info);
+ } else {
+ for (i = 0; i < display->ctrl_count; i++)
+ dsi_ctrl_disable_status_interrupt(
+ display->ctrl[i].ctrl, irq_status_idx);
+ }
+}
static int dsi_display_ctrl_power_on(struct dsi_display *display)
{
@@ -558,29 +801,46 @@
return rc;
}
-static int dsi_display_parse_cmdline_topology(unsigned int display_type)
+static void dsi_display_parse_cmdline_topology(struct dsi_display *display,
+ unsigned int display_type)
{
+ char *boot_str = NULL;
char *str = NULL;
- int top_index = -1;
+ unsigned long value;
if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
pr_err("display_type=%d not supported\n", display_type);
- return -EINVAL;
+ return;
}
+
if (display_type == DSI_PRIMARY)
- str = strnstr(dsi_display_primary,
- ":config", strlen(dsi_display_primary));
+ boot_str = dsi_display_primary;
else
- str = strnstr(dsi_display_secondary,
- ":config", strlen(dsi_display_secondary));
+ boot_str = dsi_display_secondary;
+
+ str = strnstr(boot_str, ":config", strlen(boot_str));
if (!str)
- return -EINVAL;
+ return;
if (kstrtol(str + strlen(":config"), INT_BASE_10,
- (unsigned long *)&top_index))
- return -EINVAL;
+ (unsigned long *)&value)) {
+ pr_err("invalid config index override: %s\n", boot_str);
+ return;
+ }
+ display->cmdline_topology = value;
- return top_index;
+ str = strnstr(boot_str, ":timing", strlen(boot_str));
+ if (!str)
+ return;
+
+ if (kstrtol(str + strlen(":timing"), INT_BASE_10,
+ (unsigned long *)&value)) {
+ pr_err("invalid timing index override: %s. resetting both timing and config\n",
+ boot_str);
+ display->cmdline_topology = NO_OVERRIDE;
+ return;
+ }
+ display->cmdline_timing = value;
}
/**
@@ -847,6 +1107,32 @@
return 0;
}
+static int dsi_display_ctrl_update(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_host_timing_update(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to update host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error_host_deinit;
+ }
+ }
+
+ return 0;
+error_host_deinit:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ (void)dsi_ctrl_host_deinit(ctrl->ctrl);
+ }
+
+ return rc;
+}
+
static int dsi_display_ctrl_init(struct dsi_display *display)
{
int rc = 0;
@@ -1215,8 +1501,7 @@
goto error;
}
- rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
- DSI_CTRL_CMD_BROADCAST);
+ rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags);
if (rc) {
pr_err("[%s] cmd trigger failed, rc=%d\n",
display->name, rc);
@@ -1224,9 +1509,7 @@
}
}
- rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
- (DSI_CTRL_CMD_BROADCAST_MASTER |
- DSI_CTRL_CMD_BROADCAST));
+ rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags);
if (rc) {
pr_err("[%s] cmd trigger failed for master, rc=%d\n",
display->name, rc);
@@ -1285,6 +1568,7 @@
{
struct dsi_display *display = to_dsi_display(host);
struct dsi_display_ctrl *display_ctrl;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0, cnt = 0;
if (!host || !msg) {
@@ -1327,7 +1611,16 @@
pr_err("value of display->tx_cmd_buf is NULL");
goto error_disable_cmd_engine;
}
- rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+
+ aspace = msm_gem_smmu_address_space_get(display->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ pr_err("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
&(display->cmd_buffer_iova));
if (rc) {
pr_err("failed to get the iova rc %d\n", rc);
@@ -1383,7 +1676,7 @@
}
return rc;
put_iova:
- msm_gem_put_iova(display->tx_cmd_buf, 0);
+ msm_gem_put_iova(display->tx_cmd_buf, aspace);
free_gem:
msm_gem_free_object(display->tx_cmd_buf);
error:
@@ -1990,17 +2283,6 @@
goto error_ctrl_put;
}
- if (display->panel->phy_timing_len) {
- for (i = 0; i < display->ctrl_count; i++) {
- ctrl = &display->ctrl[i];
- rc = dsi_phy_set_timing_params(ctrl->phy,
- display->panel->phy_timing_val,
- display->panel->phy_timing_len);
- if (rc)
- pr_err("failed to add DSI PHY timing params");
- }
- }
-
rc = dsi_display_parse_lane_map(display);
if (rc) {
pr_err("Lane map not found, rc=%d\n", rc);
@@ -2039,6 +2321,9 @@
dsi_ctrl_put(ctrl->ctrl);
}
+ if (display->panel)
+ dsi_panel_put(display->panel);
+
return rc;
}
@@ -2077,12 +2362,12 @@
{
struct dsi_display_mode *cur;
- if (!display || !tgt) {
+ if (!display || !tgt || !display->panel) {
pr_err("Invalid params\n");
return false;
}
- cur = &display->panel->mode;
+ cur = display->panel->cur_mode;
if (cur->timing.h_active != tgt->timing.h_active) {
pr_debug("timing.h_active differs %d %d\n",
@@ -2161,12 +2446,6 @@
pr_debug("pixel_clk_khz differs %d %d\n",
cur->pixel_clk_khz, tgt->pixel_clk_khz);
- if (cur->panel_mode != tgt->panel_mode) {
- pr_debug("panel_mode differs %d %d\n",
- cur->panel_mode, tgt->panel_mode);
- return false;
- }
-
if (cur->dsi_mode_flags != tgt->dsi_mode_flags)
pr_debug("flags differs %d %d\n",
cur->dsi_mode_flags, tgt->dsi_mode_flags);
@@ -2184,7 +2463,7 @@
int rc = 0;
int i = 0;
- if (!display || !dsi_mode) {
+ if (!display || !dsi_mode || !display->panel) {
pr_err("Invalid params\n");
return -EINVAL;
}
@@ -2227,7 +2506,7 @@
}
}
- panel_mode = &display->panel->mode;
+ panel_mode = display->panel->cur_mode;
memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
/*
* dsi_mode_flags flags are used to communicate with other drm driver
@@ -2382,6 +2661,14 @@
int rc = 0;
int i;
struct dsi_display_ctrl *ctrl;
+ struct dsi_display_mode_priv_info *priv_info;
+
+ priv_info = mode->priv_info;
+ if (!priv_info) {
+ pr_err("[%s] failed to get private info of the display mode",
+ display->name);
+ return -EINVAL;
+ }
rc = dsi_panel_get_host_cfg_for_mode(display->panel,
mode,
@@ -2414,6 +2701,17 @@
goto error;
}
}
+
+ if (priv_info->phy_timing_len) {
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_phy_set_timing_params(ctrl->phy,
+ priv_info->phy_timing_val,
+ priv_info->phy_timing_len);
+ if (rc)
+ pr_err("failed to add DSI PHY timing params");
+ }
+ }
error:
return rc;
}
@@ -2640,19 +2938,10 @@
goto error_host_deinit;
}
- rc = dsi_panel_get_mode_count(display->panel, &display->num_of_modes);
- if (rc) {
- pr_err("[%s] failed to get mode count, rc=%d\n",
- display->name, rc);
- goto error_panel_deinit;
- }
-
pr_info("Successfully bind display panel '%s'\n", display->name);
display->drm_dev = drm;
goto error;
-error_panel_deinit:
- (void)dsi_panel_drv_deinit(display->panel);
error_host_deinit:
(void)dsi_display_mipi_host_deinit(display);
error_clk_client_deinit:
@@ -2769,8 +3058,10 @@
boot_displays_parsed = true;
}
- /* Initialize cmdline_topology to use default topology */
- display->cmdline_topology = -1;
+ /* use default topology of every mode if not overridden */
+ display->cmdline_topology = NO_OVERRIDE;
+ display->cmdline_timing = 0;
+
if ((!display_from_cmdline) &&
(boot_displays[DSI_PRIMARY].boot_disp_en)) {
display->is_active = dsi_display_name_compare(pdev->dev.of_node,
@@ -2780,6 +3071,9 @@
(void)_dsi_display_dev_deinit(main_display);
component_del(&main_display->pdev->dev,
&dsi_display_comp_ops);
+ mutex_lock(&dsi_display_list_lock);
+ list_del(&main_display->list);
+ mutex_unlock(&dsi_display_list_lock);
comp_add_success = false;
default_active_node = NULL;
pr_debug("removed the existing comp ops\n");
@@ -2793,8 +3087,8 @@
pr_debug("cmdline primary dsi: %s\n",
display->name);
display_from_cmdline = true;
- display->cmdline_topology =
- dsi_display_parse_cmdline_topology(DSI_PRIMARY);
+ dsi_display_parse_cmdline_topology(display,
+ DSI_PRIMARY);
primary_np = pdev->dev.of_node;
}
}
@@ -2809,9 +3103,8 @@
if (primary_np) {
if (validate_dsi_display_selection()) {
display->is_active = true;
- display->cmdline_topology =
dsi_display_parse_cmdline_topology
- (DSI_SECONDARY);
+ (display, DSI_SECONDARY);
} else {
boot_displays[DSI_SECONDARY]
.boot_disp_en = false;
@@ -3012,7 +3305,6 @@
{
struct dsi_display *display;
struct dsi_panel_phy_props phy_props;
- struct dsi_mode_info *timing;
int i, rc;
if (!info || !disp) {
@@ -3036,31 +3328,18 @@
memset(info, 0, sizeof(struct msm_display_info));
info->intf_type = DRM_MODE_CONNECTOR_DSI;
- timing = &display->panel->mode.timing;
-
info->num_of_h_tiles = display->ctrl_count;
for (i = 0; i < info->num_of_h_tiles; i++)
info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
info->is_connected = true;
info->is_primary = true;
- info->frame_rate = timing->refresh_rate;
- info->vtotal = DSI_V_TOTAL(timing);
- info->prefill_lines = display->panel->panel_prefill_lines;
- info->jitter = display->panel->panel_jitter;
info->width_mm = phy_props.panel_width_mm;
info->height_mm = phy_props.panel_height_mm;
info->max_width = 1920;
info->max_height = 1080;
- info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
- if (display->panel->dsc_enabled) {
- info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_DSC;
- memcpy(&info->comp_info.dsc_info, &display->panel->dsc,
- sizeof(struct msm_display_dsc_info));
- }
-
- switch (display->panel->mode.panel_mode) {
+ switch (display->panel->panel_mode) {
case DSI_OP_VIDEO_MODE:
info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
break;
@@ -3071,7 +3350,7 @@
break;
default:
pr_err("unknwown dsi panel mode %d\n",
- display->panel->mode.panel_mode);
+ display->panel->panel_mode);
break;
}
@@ -3083,16 +3362,57 @@
return rc;
}
-int dsi_display_get_modes(struct dsi_display *display,
- struct dsi_display_mode *modes,
- u32 *count)
+int dsi_display_get_mode_count(struct dsi_display *display,
+ u32 *count)
{
- int rc = 0;
- int i;
struct dsi_dfps_capabilities dfps_caps;
- int num_dfps_rates;
+ int num_dfps_rates, rc = 0;
- if (!display || !count) {
+ if (!display || !display->panel) {
+ pr_err("invalid display:%d panel:%d\n", display != NULL,
+ display ? display->panel != NULL : 0);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ *count = display->panel->num_timing_nodes;
+
+ rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ if (rc) {
+ pr_err("[%s] failed to get dfps caps from panel\n",
+ display->name);
+ goto done;
+ }
+
+ num_dfps_rates = !dfps_caps.dfps_support ? 1 :
+ dfps_caps.max_refresh_rate -
+ dfps_caps.min_refresh_rate + 1;
+
+ /* Inflate num_of_modes by fps in dfps */
+ *count = display->panel->num_timing_nodes * num_dfps_rates;
+
+done:
+ mutex_unlock(&display->display_lock);
+
+ return 0;
+}
+
+void dsi_display_put_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode)
+{
+ dsi_panel_put_mode(mode);
+}
+
+int dsi_display_get_modes(struct dsi_display *display,
+ struct dsi_display_mode *modes)
+{
+ struct dsi_dfps_capabilities dfps_caps;
+ u32 num_dfps_rates, panel_mode_count;
+ u32 mode_idx, array_idx = 0;
+ int i, rc = 0;
+
+ if (!display || !modes) {
pr_err("Invalid params\n");
return -EINVAL;
}
@@ -3110,41 +3430,55 @@
dfps_caps.max_refresh_rate -
dfps_caps.min_refresh_rate + 1;
- if (!modes) {
- /* Inflate num_of_modes by fps in dfps */
- *count = display->num_of_modes * num_dfps_rates;
- goto error;
- }
+ panel_mode_count = display->panel->num_timing_nodes;
- for (i = 0; i < *count; i++) {
- /* Insert the dfps "sub-modes" between main panel modes */
- int panel_mode_idx = i / num_dfps_rates;
+ for (mode_idx = 0; mode_idx < panel_mode_count; mode_idx++) {
+ struct dsi_display_mode panel_mode;
+ int topology_override = NO_OVERRIDE;
- rc = dsi_panel_get_mode(display->panel, panel_mode_idx, modes);
+ if (display->cmdline_timing == mode_idx)
+ topology_override = display->cmdline_topology;
+
+ memset(&panel_mode, 0, sizeof(panel_mode));
+
+ rc = dsi_panel_get_mode(display->panel, mode_idx,
+ &panel_mode, topology_override);
if (rc) {
- pr_err("[%s] failed to get mode from panel\n",
- display->name);
+ pr_err("[%s] failed to get mode idx %d from panel\n",
+ display->name, mode_idx);
goto error;
}
- if (dfps_caps.dfps_support) {
- modes->timing.refresh_rate = dfps_caps.min_refresh_rate
- + (i % num_dfps_rates);
- modes->pixel_clk_khz = (DSI_H_TOTAL(&modes->timing) *
- DSI_V_TOTAL(&modes->timing) *
- modes->timing.refresh_rate) / 1000;
- }
-
if (display->ctrl_count > 1) { /* TODO: remove if */
- modes->timing.h_active *= display->ctrl_count;
- modes->timing.h_front_porch *= display->ctrl_count;
- modes->timing.h_sync_width *= display->ctrl_count;
- modes->timing.h_back_porch *= display->ctrl_count;
- modes->timing.h_skew *= display->ctrl_count;
- modes->pixel_clk_khz *= display->ctrl_count;
+ panel_mode.timing.h_active *= display->ctrl_count;
+ panel_mode.timing.h_front_porch *= display->ctrl_count;
+ panel_mode.timing.h_sync_width *= display->ctrl_count;
+ panel_mode.timing.h_back_porch *= display->ctrl_count;
+ panel_mode.timing.h_skew *= display->ctrl_count;
+ panel_mode.pixel_clk_khz *= display->ctrl_count;
}
- modes++;
+ for (i = 0; i < num_dfps_rates; i++) {
+ struct dsi_display_mode *sub_mode = &modes[array_idx];
+
+ if (!sub_mode) {
+ pr_err("invalid mode data\n");
+ return -EFAULT;
+ }
+
+ memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
+
+ if (dfps_caps.dfps_support) {
+ sub_mode->timing.refresh_rate =
+ dfps_caps.min_refresh_rate +
+ (i % num_dfps_rates);
+ sub_mode->pixel_clk_khz =
+ (DSI_H_TOTAL(&sub_mode->timing) *
+ DSI_V_TOTAL(&sub_mode->timing) *
+ sub_mode->timing.refresh_rate) / 1000;
+ }
+ array_idx++;
+ }
}
error:
@@ -3217,7 +3551,7 @@
int rc = 0;
struct dsi_display_mode adj_mode;
- if (!display || !mode) {
+ if (!display || !mode || !display->panel) {
pr_err("Invalid params\n");
return -EINVAL;
}
@@ -3238,6 +3572,17 @@
pr_err("[%s] failed to set mode\n", display->name);
goto error;
}
+
+ if (!display->panel->cur_mode) {
+ display->panel->cur_mode =
+ kzalloc(sizeof(struct dsi_display_mode), GFP_KERNEL);
+ if (!display->panel->cur_mode) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ }
+
+ memcpy(display->panel->cur_mode, &adj_mode, sizeof(adj_mode));
error:
mutex_unlock(&display->display_lock);
return rc;
@@ -3269,17 +3614,80 @@
return rc;
}
+static int dsi_display_pre_switch(struct dsi_display *display)
+{
+ int rc = 0;
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_ctrl_update(display);
+ if (rc) {
+ pr_err("[%s] failed to update DSI controller, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_clk_off;
+ }
+
+ rc = dsi_display_set_clk_src(display);
+ if (rc) {
+ pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_LINK_CLK, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ goto error;
+
+error_ctrl_deinit:
+ (void)dsi_display_ctrl_deinit(display);
+error_ctrl_clk_off:
+ (void)dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_CORE_CLK, DSI_CLK_OFF);
+error:
+ return rc;
+}
+
int dsi_display_prepare(struct dsi_display *display)
{
int rc = 0;
+ struct dsi_display_mode *mode;
if (!display) {
pr_err("Invalid params\n");
return -EINVAL;
}
+ if (!display->panel->cur_mode) {
+ pr_err("no valid mode set for the display");
+ return -EINVAL;
+ }
+
mutex_lock(&display->display_lock);
+ mode = display->panel->cur_mode;
+
+ if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
+ /* update dsi ctrl for new mode */
+ rc = dsi_display_pre_switch(display);
+ if (rc)
+ pr_err("[%s] panel pre-prepare-res-switch failed, rc=%d\n",
+ display->name, rc);
+
+ goto error;
+ }
+
rc = dsi_panel_pre_prepare(display->panel);
if (rc) {
pr_err("[%s] panel pre-prepare failed, rc=%d\n",
@@ -3308,6 +3716,13 @@
goto error_ctrl_clk_off;
}
+ rc = dsi_display_set_clk_src(display);
+ if (rc) {
+ pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
+ display->name, rc);
+ goto error_phy_disable;
+ }
+
rc = dsi_display_ctrl_init(display);
if (rc) {
pr_err("[%s] failed to setup DSI controller, rc=%d\n",
@@ -3315,10 +3730,10 @@
goto error_phy_disable;
}
- rc = dsi_display_set_clk_src(display);
+ rc = dsi_display_ctrl_host_enable(display);
if (rc) {
- pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
- display->name, rc);
+ pr_err("[%s] failed to enable DSI host, rc=%d\n",
+ display->name, rc);
goto error_ctrl_deinit;
}
@@ -3327,29 +3742,28 @@
if (rc) {
pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
display->name, rc);
- goto error_ctrl_deinit;
+ goto error_host_engine_off;
}
- rc = dsi_display_ctrl_host_enable(display);
+ rc = dsi_display_soft_reset(display);
if (rc) {
- pr_err("[%s] failed to enable DSI host, rc=%d\n",
- display->name, rc);
+ pr_err("[%s] failed soft reset, rc=%d\n", display->name, rc);
goto error_ctrl_link_off;
}
rc = dsi_panel_prepare(display->panel);
if (rc) {
pr_err("[%s] panel prepare failed, rc=%d\n", display->name, rc);
- goto error_host_engine_off;
+ goto error_ctrl_link_off;
}
goto error;
-error_host_engine_off:
- (void)dsi_display_ctrl_host_disable(display);
error_ctrl_link_off:
(void)dsi_display_clk_ctrl(display->dsi_clk_handle,
DSI_LINK_CLK, DSI_CLK_OFF);
+error_host_engine_off:
+ (void)dsi_display_ctrl_host_disable(display);
error_ctrl_deinit:
(void)dsi_display_ctrl_deinit(display);
error_phy_disable:
@@ -3465,6 +3879,10 @@
{
int rc = 0;
+ /* check and setup MISR */
+ if (display->misr_enable)
+ _dsi_display_setup_misr(display);
+
rc = dsi_display_set_roi(display, params->rois);
return rc;
@@ -3473,23 +3891,40 @@
int dsi_display_enable(struct dsi_display *display)
{
int rc = 0;
+ struct dsi_display_mode *mode;
- if (!display) {
+ if (!display || !display->panel) {
pr_err("Invalid params\n");
return -EINVAL;
}
+ if (!display->panel->cur_mode) {
+ pr_err("no valid mode set for the display");
+ return -EINVAL;
+ }
+
mutex_lock(&display->display_lock);
- rc = dsi_panel_enable(display->panel);
- if (rc) {
- pr_err("[%s] failed to enable DSI panel, rc=%d\n",
- display->name, rc);
- goto error;
+ mode = display->panel->cur_mode;
+
+ if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
+ rc = dsi_panel_post_switch(display->panel);
+ if (rc) {
+ pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ } else {
+ rc = dsi_panel_enable(display->panel);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI panel, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
}
- if (display->panel->dsc_enabled) {
- display->panel->dsc.pic_width *= display->ctrl_count;
+ if (mode->priv_info->dsc_enabled) {
+ mode->priv_info->dsc.pic_width *= display->ctrl_count;
rc = dsi_panel_update_pps(display->panel);
if (rc) {
pr_err("[%s] panel pps cmd update failed, rc=%d\n",
@@ -3498,6 +3933,15 @@
}
}
+ if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
+ rc = dsi_panel_switch(display->panel);
+ if (rc)
+ pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
+ display->name, rc);
+
+ goto error_disable_panel;
+ }
+
if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
rc = dsi_display_vid_engine_enable(display);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 9aa3113..1c30b9c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -141,8 +141,8 @@
* @clock_info: Clock sourcing for DSI display.
* @config: DSI host configuration information.
* @lane_map: Lane mapping between DSI host and Panel.
- * @num_of_modes: Number of modes supported by display.
* @cmdline_topology: Display topology shared from kernel command line.
+ * @cmdline_timing: Display timing shared from kernel command line.
* @is_tpg_enabled: TPG state.
* @ulps_enabled: ulps state.
* @clamp_enabled: clamp state.
@@ -154,6 +154,8 @@
* @dsi_clk_handle: DSI clock handle.
* @mdp_clk_handle: MDP clock handle.
* @root: Debugfs root directory
+ * @misr_enable Frame MISR enable/disable
+ * @misr_frame_count Number of frames to accumulate the MISR value
*/
struct dsi_display {
struct platform_device *pdev;
@@ -180,8 +182,8 @@
struct dsi_display_clk_info clock_info;
struct dsi_host_config config;
struct dsi_lane_map lane_map;
- u32 num_of_modes;
int cmdline_topology;
+ int cmdline_timing;
bool is_tpg_enabled;
bool ulps_enabled;
bool clamp_enabled;
@@ -201,6 +203,9 @@
/* DEBUG FS */
struct dentry *root;
+
+ bool misr_enable;
+ u32 misr_frame_count;
};
int dsi_display_dev_probe(struct platform_device *pdev);
@@ -275,21 +280,36 @@
int dsi_display_get_info(struct msm_display_info *info, void *disp);
/**
+ * dsi_display_get_mode_count() - get number of modes supported by the display
+ * @display: Handle to display.
+ * @count: Number of modes supported
+ *
+ * Return: error code.
+ */
+int dsi_display_get_mode_count(struct dsi_display *display, u32 *count);
+
+/**
* dsi_display_get_modes() - get modes supported by display
* @display: Handle to display.
* @modes; Pointer to array of modes. Memory allocated should be
* big enough to store (count * struct dsi_display_mode)
* elements. If modes pointer is NULL, number of modes will
* be stored in the memory pointed to by count.
- * @count: If modes is NULL, number of modes will be stored. If
- * not, mode information will be copied (number of modes
- * copied will be equal to *count).
*
* Return: error code.
*/
int dsi_display_get_modes(struct dsi_display *display,
- struct dsi_display_mode *modes,
- u32 *count);
+ struct dsi_display_mode *modes);
+
+/**
+ * dsi_display_put_mode() - free up mode created for the display
+ * @display: Handle to display.
+ * @mode: Display mode to be freed up
+ *
+ * Return: error code.
+ */
+void dsi_display_put_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode);
/**
* dsi_display_validate_mode() - validates if mode is supported by display
@@ -444,6 +464,17 @@
int dsi_display_clock_gate(struct dsi_display *display, bool enable);
int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
+/**
+ * dsi_display_enable_event() - enable interrupt based connector event
+ * @display: Handle to display.
+ * @event_idx: Event index.
+ * @event_info: Event callback definition.
+ * @enable: Whether to enable/disable the event interrupt.
+ */
+void dsi_display_enable_event(struct dsi_display *display,
+ uint32_t event_idx, struct dsi_event_cb_info *event_info,
+ bool enable);
+
int dsi_display_set_backlight(void *display, u32 bl_lvl);
/**
@@ -461,6 +492,22 @@
*/
int dsi_display_soft_reset(void *display);
+/**
+ * dsi_display_set_power - update power/dpms setting
+ * @connector: Pointer to drm connector structure
+ * @power_mode: One of the following,
+ * SDE_MODE_DPMS_ON
+ * SDE_MODE_DPMS_LP1
+ * SDE_MODE_DPMS_LP2
+ * SDE_MODE_DPMS_STANDBY
+ * SDE_MODE_DPMS_SUSPEND
+ * SDE_MODE_DPMS_OFF
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int dsi_display_set_power(struct drm_connector *connector,
+ int power_mode, void *display);
+
/*
* dsi_display_pre_kickoff - program kickoff-time features
* @display: Pointer to private display structure
@@ -469,5 +516,12 @@
*/
int dsi_display_pre_kickoff(struct dsi_display *display,
struct msm_display_kickoff_params *params);
+/**
+ * dsi_display_get_dst_format() - get dst_format from DSI display
+ * @display: Handle to display
+ *
+ * Return: enum dsi_pixel_format type
+ */
+enum dsi_pixel_format dsi_display_get_dst_format(void *display);
#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
index 93fb041..6e41f36 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,7 @@
test = container_of(work, struct dsi_display_test, test_work);
display = test->display;
- rc = dsi_display_get_modes(display, NULL, &count);
+ rc = dsi_display_get_mode_count(display, &count);
if (rc) {
pr_err("failed to get modes count, rc=%d\n", rc);
goto test_fail;
@@ -47,7 +47,7 @@
goto test_fail;
}
- rc = dsi_display_get_modes(display, modes, &count);
+ rc = dsi_display_get_modes(display, modes);
if (rc) {
pr_err("failed to get modes, rc=%d\n", rc);
goto test_fail_free_modes;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 4e09cfb..30e5f02 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -49,9 +49,9 @@
dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
dsi_mode->pixel_clk_khz = drm_mode->clock;
- dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
- dsi_mode->mode_info = (struct msm_mode_info *)drm_mode->private;
+ dsi_mode->priv_info =
+ (struct dsi_display_mode_priv_info *)drm_mode->private;
if (msm_is_mode_seamless(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_SEAMLESS;
@@ -59,6 +59,8 @@
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DFPS;
if (msm_needs_vblank_pre_modeset(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ if (msm_is_mode_seamless_dms(drm_mode))
+ dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
}
static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
@@ -84,7 +86,7 @@
drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
drm_mode->clock = dsi_mode->pixel_clk_khz;
- drm_mode->private = (int *)dsi_mode->mode_info;
+ drm_mode->private = (int *)dsi_mode->priv_info;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)
drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
@@ -92,6 +94,8 @@
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DMS)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
drm_mode_set_name(drm_mode);
}
@@ -237,10 +241,6 @@
memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
-
- pr_debug("note: using panel cmd/vid mode instead of user val\n");
- c_bridge->dsi_mode.panel_mode =
- c_bridge->display->panel->mode.panel_mode;
}
static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -248,9 +248,9 @@
struct drm_display_mode *adjusted_mode)
{
int rc = 0;
- bool ret = true;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
struct dsi_display_mode dsi_mode;
+ struct drm_display_mode cur_mode;
if (!bridge || !mode || !adjusted_mode) {
pr_err("Invalid params\n");
@@ -263,31 +263,55 @@
DSI_VALIDATE_FLAG_ALLOW_ADJUST);
if (rc) {
pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
- ret = false;
- } else {
- convert_to_drm_mode(&dsi_mode, adjusted_mode);
+ return false;
}
- return ret;
+ if (bridge->encoder && bridge->encoder->crtc) {
+ cur_mode = bridge->encoder->crtc->mode;
+
+ if (!drm_mode_equal(&cur_mode, adjusted_mode))
+ dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+ }
+
+ convert_to_drm_mode(&dsi_mode, adjusted_mode);
+
+ return true;
}
-int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology,
+int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info,
u32 max_mixer_width)
{
struct dsi_display_mode dsi_mode;
+ struct dsi_mode_info *timing;
- if (!drm_mode || !topology)
+ if (!drm_mode || !mode_info)
return -EINVAL;
convert_to_dsi_mode(drm_mode, &dsi_mode);
- if (!dsi_mode.mode_info)
+ if (!dsi_mode.priv_info)
return -EINVAL;
- memcpy(topology, &dsi_mode.mode_info->topology,
+ memset(mode_info, 0, sizeof(*mode_info));
+
+ timing = &dsi_mode.timing;
+ mode_info->frame_rate = dsi_mode.timing.refresh_rate;
+ mode_info->vtotal = DSI_V_TOTAL(timing);
+ mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
+ mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
+ mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
+
+ memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
sizeof(struct msm_display_topology));
+ mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+ if (dsi_mode.priv_info->dsc_enabled) {
+ mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_DSC;
+ memcpy(&mode_info->comp_info.dsc_info, &dsi_mode.priv_info->dsc,
+ sizeof(dsi_mode.priv_info->dsc));
+ }
+
return 0;
}
@@ -343,7 +367,7 @@
panel = dsi_display->panel;
sde_kms_info_add_keystr(info, "panel name", panel->name);
- switch (panel->mode.panel_mode) {
+ switch (panel->panel_mode) {
case DSI_OP_VIDEO_MODE:
sde_kms_info_add_keystr(info, "panel mode", "video");
break;
@@ -353,7 +377,7 @@
panel->cmd_config.mdp_transfer_time_us);
break;
default:
- pr_debug("invalid panel type:%d\n", panel->mode.panel_mode);
+ pr_debug("invalid panel type:%d\n", panel->panel_mode);
break;
}
sde_kms_info_add_keystr(info, "dfps support",
@@ -449,6 +473,21 @@
return status;
}
+void dsi_connector_put_modes(struct drm_connector *connector,
+ void *display)
+{
+ struct drm_display_mode *drm_mode;
+ struct dsi_display_mode dsi_mode;
+
+ if (!connector || !display)
+ return;
+
+ list_for_each_entry(drm_mode, &connector->modes, head) {
+ convert_to_dsi_mode(drm_mode, &dsi_mode);
+ dsi_display_put_mode(display, &dsi_mode);
+ }
+}
+
int dsi_connector_get_modes(struct drm_connector *connector,
void *display)
{
@@ -466,7 +505,7 @@
*/
goto end;
}
- rc = dsi_display_get_modes(display, NULL, &count);
+ rc = dsi_display_get_mode_count(display, &count);
if (rc) {
pr_err("failed to get num of modes, rc=%d\n", rc);
goto error;
@@ -479,7 +518,7 @@
goto end;
}
- rc = dsi_display_get_modes(display, modes, &count);
+ rc = dsi_display_get_modes(display, modes);
if (rc) {
pr_err("failed to get modes, rc=%d\n", rc);
count = 0;
@@ -546,6 +585,19 @@
return dsi_display_pre_kickoff(display, params);
}
+void dsi_conn_enable_event(struct drm_connector *connector,
+ uint32_t event_idx, bool enable, void *display)
+{
+ struct dsi_event_cb_info event_info;
+
+ memset(&event_info, 0, sizeof(event_info));
+
+ event_info.event_cb = sde_connector_trigger_event;
+ event_info.event_usr_ptr = connector;
+
+ dsi_display_enable_event(display, event_idx, &event_info, enable);
+}
+
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 68520a8..793f8f1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -64,15 +64,22 @@
void *display);
/**
- * dsi_conn_get_topology - retrieve current topology for the mode selected
+ * dsi_connector_put_modes - callback to free up drm modes of the connector
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ */
+void dsi_connector_put_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * dsi_conn_get_mode_info - retrieve information on the mode selected
* @drm_mode: Display mode set for the display
- * @topology: Out parameter. Topology for the mode.
+ * @mode_info: Out parameter. information of the mode.
* @max_mixer_width: max width supported by HW layer mixer
* Returns: Zero on success
*/
-int dsi_conn_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology,
- u32 max_mixer_width);
+int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info, u32 max_mixer_width);
/**
* dsi_conn_mode_valid - callback to determine if specified mode is valid
@@ -85,6 +92,16 @@
struct drm_display_mode *mode,
void *display);
+/**
+ * dsi_conn_enable_event - callback to notify DSI driver of event registeration
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Connector event index
+ * @enable: Whether or not the event is enabled
+ * @display: Pointer to private display handle
+ */
+void dsi_conn_enable_event(struct drm_connector *connector,
+ uint32_t event_idx, bool enable, void *display);
+
struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
struct drm_device *dev,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index b8bf7a8..f7b0d7f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -35,20 +35,66 @@
#define DEFAULT_MDP_TRANSFER_TIME 14000
-#define DEFAULT_PANEL_JITTER 5
-#define MAX_PANEL_JITTER 25
-#define DEFAULT_PANEL_PREFILL_LINES 16
+#define DEFAULT_PANEL_JITTER_NUMERATOR 2
+#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
+#define DEFAULT_PANEL_JITTER_ARRAY_SIZE 2
+#define MAX_PANEL_JITTER 10
+#define DEFAULT_PANEL_PREFILL_LINES 25
+
+enum dsi_dsc_ratio_type {
+ DSC_8BPC_8BPP,
+ DSC_10BPC_8BPP,
+ DSC_12BPC_8BPP,
+ DSC_RATIO_TYPE_MAX
+};
static u32 dsi_dsc_rc_buf_thresh[] = {0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54,
0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e};
-static char dsi_dsc_rc_range_min_qp_1_1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5,
- 5, 5, 7, 13};
-static char dsi_dsc_rc_range_min_qp_1_1_scr1[] = {0, 0, 1, 1, 3, 3, 3, 3, 3, 3,
- 5, 5, 5, 9, 12};
-static char dsi_dsc_rc_range_max_qp_1_1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
- 12, 13, 13, 15};
-static char dsi_dsc_rc_range_max_qp_1_1_scr1[] = {4, 4, 5, 6, 7, 7, 7, 8, 9, 10,
- 11, 11, 12, 13};
+
+/*
+ * DSC 1.1
+ * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_min_qp_1_1[][15] = {
+ {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13},
+ {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 17},
+ {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21},
+ };
+
+/*
+ * DSC 1.1 SCR
+ * Rate control - Min QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_min_qp_1_1_scr1[][15] = {
+ {0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 12},
+ {0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 16},
+ {0, 4, 9, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 17, 20},
+ };
+
+/*
+ * DSC 1.1
+ * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_max_qp_1_1[][15] = {
+ {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15},
+ {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 15, 16, 17, 17, 19},
+ {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 19, 20, 21, 21, 23},
+ };
+
+/*
+ * DSC 1.1 SCR
+ * Rate control - Max QP values for each ratio type in dsi_dsc_ratio_type
+ */
+static char dsi_dsc_rc_range_max_qp_1_1_scr1[][15] = {
+ {4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13},
+ {8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17},
+ {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21},
+ };
+
+/*
+ * DSC 1.1 and DSC 1.1 SCR
+ * Rate control - bpg offset values
+ */
static char dsi_dsc_rc_range_bpg_offset[] = {2, 0, 0, -2, -4, -6, -8, -8,
-8, -10, -10, -12, -12, -12, -12};
@@ -425,11 +471,21 @@
{
int rc = 0, i = 0;
ssize_t len;
- struct dsi_cmd_desc *cmds = panel->cmd_sets[type].cmds;
- u32 count = panel->cmd_sets[type].count;
- enum dsi_cmd_set_state state = panel->cmd_sets[type].state;
+ struct dsi_cmd_desc *cmds;
+ u32 count;
+ enum dsi_cmd_set_state state;
+ struct dsi_display_mode *mode;
const struct mipi_dsi_host_ops *ops = panel->host->ops;
+ if (!panel || !panel->cur_mode)
+ return -EINVAL;
+
+ mode = panel->cur_mode;
+
+ cmds = mode->priv_info->cmd_sets[type].cmds;
+ count = mode->priv_info->cmd_sets[type].count;
+ state = mode->priv_info->cmd_sets[type].state;
+
if (count == 0) {
pr_debug("[%s] No commands to be sent for state(%d)\n",
panel->name, type);
@@ -925,7 +981,7 @@
panel->name);
if (rc) {
pr_err("[%s] failed to get pixel format, rc=%d\n",
- panel->name, rc);
+ panel->name, rc);
goto error;
}
@@ -1220,7 +1276,7 @@
}
}
- panel->mode.panel_mode = panel_mode;
+ panel->panel_mode = panel_mode;
error:
return rc;
}
@@ -1287,8 +1343,13 @@
"qcom,video-to-cmd-mode-switch-commands",
"qcom,video-to-cmd-mode-post-switch-commands",
"qcom,mdss-dsi-panel-status-command",
+ "qcom,mdss-dsi-lp1-command",
+ "qcom,mdss-dsi-lp2-command",
+ "qcom,mdss-dsi-nolp-command",
"PPS not parsed from DTSI, generated dynamically",
"ROI not parsed from DTSI, generated dynamically",
+ "qcom,mdss-dsi-timing-switch-command",
+ "qcom,mdss-dsi-post-mode-switch-on-command",
};
const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
@@ -1306,8 +1367,13 @@
"qcom,video-to-cmd-mode-switch-commands-state",
"qcom,video-to-cmd-mode-post-switch-commands-state",
"qcom,mdss-dsi-panel-status-command-state",
+ "qcom,mdss-dsi-lp1-command-state",
+ "qcom,mdss-dsi-lp2-command-state",
+ "qcom,mdss-dsi-nolp-command-state",
"PPS not parsed from DTSI, generated dynamically",
"ROI not parsed from DTSI, generated dynamically",
+ "qcom,mdss-dsi-timing-switch-command-state",
+ "qcom,mdss-dsi-post-mode-switch-on-command-state",
};
static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
@@ -1464,29 +1530,34 @@
}
-static int dsi_panel_parse_cmd_sets(struct dsi_panel *panel,
- struct device_node *of_node)
+static int dsi_panel_parse_cmd_sets(
+ struct dsi_display_mode_priv_info *priv_info,
+ struct device_node *of_node)
{
int rc = 0;
struct dsi_panel_cmd_set *set;
u32 i;
+ if (!priv_info) {
+ pr_err("invalid mode priv info\n");
+ return -EINVAL;
+ }
+
for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
- set = &panel->cmd_sets[i];
+ set = &priv_info->cmd_sets[i];
set->type = i;
set->count = 0;
if (i == DSI_CMD_SET_PPS) {
rc = dsi_panel_alloc_cmd_packets(set, 1);
if (rc)
- pr_err("[%s] failed to allocate cmd set %d, rc = %d\n",
- panel->name, i, rc);
+ pr_err("failed to allocate cmd set %d, rc = %d\n",
+ i, rc);
set->state = DSI_CMD_SET_STATE_LP;
} else {
rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
if (rc)
- pr_debug("[%s] failed to parse set %d\n",
- panel->name, i);
+ pr_debug("failed to parse set %d\n", i);
}
}
@@ -1575,32 +1646,46 @@
return 0;
}
-static int dsi_panel_parse_jitter_config(struct dsi_panel *panel,
- struct device_node *of_node)
+static int dsi_panel_parse_jitter_config(
+ struct dsi_display_mode *mode,
+ struct device_node *of_node)
{
int rc;
+ struct dsi_display_mode_priv_info *priv_info;
+ u32 jitter[DEFAULT_PANEL_JITTER_ARRAY_SIZE] = {0, 0};
+ u64 jitter_val = 0;
- rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-jitter",
- &panel->panel_jitter);
+ priv_info = mode->priv_info;
+
+ rc = of_property_read_u32_array(of_node, "qcom,mdss-dsi-panel-jitter",
+ jitter, DEFAULT_PANEL_JITTER_ARRAY_SIZE);
if (rc) {
- pr_debug("panel jitter is not defined rc=%d\n", rc);
- panel->panel_jitter = DEFAULT_PANEL_JITTER;
- } else if (panel->panel_jitter > MAX_PANEL_JITTER) {
- pr_debug("invalid jitter config=%d setting to:%d\n",
- panel->panel_jitter, DEFAULT_PANEL_JITTER);
- panel->panel_jitter = DEFAULT_PANEL_JITTER;
+ pr_debug("panel jitter not defined rc=%d\n", rc);
+ } else {
+ jitter_val = jitter[0];
+ jitter_val = div_u64(jitter_val, jitter[1]);
+ }
+
+ if (rc || !jitter_val || (jitter_val > MAX_PANEL_JITTER)) {
+ priv_info->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+ priv_info->panel_jitter_denom =
+ DEFAULT_PANEL_JITTER_DENOMINATOR;
+ } else {
+ priv_info->panel_jitter_numer = jitter[0];
+ priv_info->panel_jitter_denom = jitter[1];
}
rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-prefill-lines",
- &panel->panel_prefill_lines);
+ &priv_info->panel_prefill_lines);
if (rc) {
pr_debug("panel prefill lines are not defined rc=%d\n", rc);
- panel->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
- } else if (panel->panel_prefill_lines >=
- DSI_V_TOTAL(&panel->mode.timing)) {
+ priv_info->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
+ } else if (priv_info->panel_prefill_lines >=
+ DSI_V_TOTAL(&mode->timing)) {
pr_debug("invalid prefill lines config=%d setting to:%d\n",
- panel->panel_prefill_lines, DEFAULT_PANEL_PREFILL_LINES);
- panel->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
+ priv_info->panel_prefill_lines, DEFAULT_PANEL_PREFILL_LINES);
+
+ priv_info->panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
}
return 0;
@@ -1848,6 +1933,7 @@
int target_bpp_x16;
int data;
int final_value, final_scale;
+ int ratio_index;
dsc->version = 0x11;
dsc->scr_rev = 0;
@@ -1857,12 +1943,7 @@
else
dsc->first_line_bpg_offset = 12;
- dsc->min_qp_flatness = 3;
- dsc->max_qp_flatness = 12;
- dsc->line_buf_depth = 9;
dsc->edge_factor = 6;
- dsc->quant_incr_limit0 = 11;
- dsc->quant_incr_limit1 = 11;
dsc->tgt_offset_hi = 3;
dsc->tgt_offset_lo = 3;
dsc->enable_422 = 0;
@@ -1870,27 +1951,60 @@
dsc->vbr_enable = 0;
dsc->buf_thresh = dsi_dsc_rc_buf_thresh;
- if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
- dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1_scr1;
- dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1_scr1;
- } else {
- dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1;
- dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1;
- }
- dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset;
bpp = dsc->bpp;
bpc = dsc->bpc;
+ if (bpc == 12)
+ ratio_index = DSC_12BPC_8BPP;
+ else if (bpc == 10)
+ ratio_index = DSC_10BPC_8BPP;
+ else
+ ratio_index = DSC_8BPC_8BPP;
+
+ if (dsc->version == 0x11 && dsc->scr_rev == 0x1) {
+ dsc->range_min_qp =
+ dsi_dsc_rc_range_min_qp_1_1_scr1[ratio_index];
+ dsc->range_max_qp =
+ dsi_dsc_rc_range_max_qp_1_1_scr1[ratio_index];
+ } else {
+ dsc->range_min_qp = dsi_dsc_rc_range_min_qp_1_1[ratio_index];
+ dsc->range_max_qp = dsi_dsc_rc_range_max_qp_1_1[ratio_index];
+ }
+ dsc->range_bpg_offset = dsi_dsc_rc_range_bpg_offset;
+
if (bpp == 8)
dsc->initial_offset = 6144;
else
dsc->initial_offset = 2048; /* bpp = 12 */
- if (bpc <= 8)
- mux_words_size = 48;
+ if (bpc == 12)
+ mux_words_size = 64;
else
- mux_words_size = 64; /* bpc == 12 */
+ mux_words_size = 48; /* bpc == 8/10 */
+
+ if (bpc == 8) {
+ dsc->line_buf_depth = 9;
+ dsc->input_10_bits = 0;
+ dsc->min_qp_flatness = 3;
+ dsc->max_qp_flatness = 12;
+ dsc->quant_incr_limit0 = 11;
+ dsc->quant_incr_limit1 = 11;
+ } else if (bpc == 10) { /* 10bpc */
+ dsc->line_buf_depth = 11;
+ dsc->input_10_bits = 1;
+ dsc->min_qp_flatness = 7;
+ dsc->max_qp_flatness = 16;
+ dsc->quant_incr_limit0 = 15;
+ dsc->quant_incr_limit1 = 15;
+ } else { /* 12 bpc */
+ dsc->line_buf_depth = 9;
+ dsc->input_10_bits = 0;
+ dsc->min_qp_flatness = 11;
+ dsc->max_qp_flatness = 20;
+ dsc->quant_incr_limit0 = 19;
+ dsc->quant_incr_limit1 = 19;
+ }
dsc->slice_last_group_size = 3 - (dsc->slice_width % 3);
@@ -1958,38 +2072,85 @@
return 0;
}
-int dsi_panel_parse_dsc_params(struct dsi_panel *panel,
+
+static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode,
+ struct device_node *of_node)
+{
+ const char *data;
+ u32 len, i;
+ int rc = 0;
+ struct dsi_display_mode_priv_info *priv_info;
+
+ priv_info = mode->priv_info;
+
+ data = of_get_property(of_node,
+ "qcom,mdss-dsi-panel-phy-timings", &len);
+ if (!data) {
+ pr_debug("Unable to read Phy timing settings");
+ } else {
+ priv_info->phy_timing_val =
+ kzalloc((sizeof(u32) * len), GFP_KERNEL);
+ if (!priv_info->phy_timing_val)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++)
+ priv_info->phy_timing_val[i] = data[i];
+
+ priv_info->phy_timing_len = len;
+ };
+
+ mode->pixel_clk_khz = (DSI_H_TOTAL(&mode->timing) *
+ DSI_V_TOTAL(&mode->timing) *
+ mode->timing.refresh_rate) / 1000;
+ return rc;
+}
+
+static int dsi_panel_parse_dsc_params(struct dsi_display_mode *mode,
struct device_node *of_node)
{
u32 data;
int rc = -EINVAL;
int intf_width;
+ const char *compression;
+ struct dsi_display_mode_priv_info *priv_info;
- if (!panel->dsc_enabled)
+ if (!mode || !mode->priv_info)
+ return -EINVAL;
+
+ priv_info = mode->priv_info;
+
+ priv_info->dsc_enabled = false;
+ compression = of_get_property(of_node, "qcom,compression-mode", NULL);
+ if (compression && !strcmp(compression, "dsc"))
+ priv_info->dsc_enabled = true;
+
+ if (!priv_info->dsc_enabled) {
+ pr_debug("dsc compression is not enabled for the mode");
return 0;
+ }
rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-height", &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-slice-height\n");
goto error;
}
- panel->dsc.slice_height = data;
+ priv_info->dsc.slice_height = data;
rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-width", &data);
if (rc) {
pr_err("failed to parse qcom,mdss-dsc-slice-width\n");
goto error;
}
- panel->dsc.slice_width = data;
+ priv_info->dsc.slice_width = data;
- intf_width = panel->mode.timing.h_active;
- if (intf_width % panel->dsc.slice_width) {
+ intf_width = mode->timing.h_active;
+ if (intf_width % priv_info->dsc.slice_width) {
pr_err("invalid slice width for the panel\n");
goto error;
}
- panel->dsc.pic_width = panel->mode.timing.h_active;
- panel->dsc.pic_height = panel->mode.timing.v_active;
+ priv_info->dsc.pic_width = mode->timing.h_active;
+ priv_info->dsc.pic_height = mode->timing.v_active;
rc = of_property_read_u32(of_node, "qcom,mdss-dsc-slice-per-pkt",
&data);
@@ -1997,7 +2158,7 @@
pr_err("failed to parse qcom,mdss-dsc-slice-per-pkt\n");
goto error;
}
- panel->dsc.slice_per_pkt = data;
+ priv_info->dsc.slice_per_pkt = data;
rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-component",
&data);
@@ -2005,7 +2166,7 @@
pr_err("failed to parse qcom,mdss-dsc-bit-per-component\n");
goto error;
}
- panel->dsc.bpc = data;
+ priv_info->dsc.bpc = data;
rc = of_property_read_u32(of_node, "qcom,mdss-dsc-bit-per-pixel",
&data);
@@ -2013,16 +2174,16 @@
pr_err("failed to parse qcom,mdss-dsc-bit-per-pixel\n");
goto error;
}
- panel->dsc.bpp = data;
+ priv_info->dsc.bpp = data;
- panel->dsc.block_pred_enable = of_property_read_bool(of_node,
+ priv_info->dsc.block_pred_enable = of_property_read_bool(of_node,
"qcom,mdss-dsc-block-prediction-enable");
- panel->dsc.full_frame_slices = DIV_ROUND_UP(intf_width,
- panel->dsc.slice_width);
+ priv_info->dsc.full_frame_slices = DIV_ROUND_UP(intf_width,
+ priv_info->dsc.slice_width);
- dsi_dsc_populate_static_param(&panel->dsc);
- dsi_dsc_pclk_param_calc(&panel->dsc, intf_width);
+ dsi_dsc_populate_static_param(&priv_info->dsc);
+ dsi_dsc_pclk_param_calc(&priv_info->dsc, intf_width);
error:
return rc;
@@ -2074,9 +2235,9 @@
return 0;
}
-static int dsi_panel_parse_topology(struct dsi_panel *panel,
- struct device_node *of_node,
- int topology_override)
+static int dsi_panel_parse_topology(
+ struct dsi_display_mode_priv_info *priv_info,
+ struct device_node *of_node, int topology_override)
{
struct msm_display_topology *topology;
u32 top_count, top_sel, *array = NULL;
@@ -2147,9 +2308,7 @@
topology[top_sel].num_intf);
parse_done:
- panel->mode.mode_info = kzalloc(sizeof(struct msm_mode_info),
- GFP_KERNEL);
- memcpy(&panel->mode.mode_info->topology, &topology[top_sel],
+ memcpy(&priv_info->topology, &topology[top_sel],
sizeof(struct msm_display_topology));
parse_fail:
kfree(topology);
@@ -2240,14 +2399,40 @@
return rc;
}
+static int dsi_panel_parse_dms_info(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int dms_enabled;
+ const char *data;
+
+ if (!of_node || !panel) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ panel->dms_mode = DSI_DMS_MODE_DISABLED;
+ dms_enabled = of_property_read_bool(of_node,
+ "qcom,dynamic-mode-switch-enabled");
+ if (!dms_enabled)
+ return 0;
+
+ data = of_get_property(of_node, "qcom,dynamic-mode-switch-type", NULL);
+ if (data && !strcmp(data, "dynamic-resolution-switch-immediate")) {
+ panel->dms_mode = DSI_DMS_MODE_RES_SWITCH_IMMEDIATE;
+ } else {
+ pr_err("[%s] unsupported dynamic switch mode: %s\n",
+ panel->name, data);
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node,
int topology_override)
{
struct dsi_panel *panel;
- const char *data;
- const char *compression;
- u32 len = 0;
int rc = 0;
panel = kzalloc(sizeof(*panel), GFP_KERNEL);
@@ -2259,52 +2444,6 @@
if (!panel->name)
panel->name = DSI_PANEL_DEFAULT_LABEL;
- panel->dsc_enabled = false;
- compression = of_get_property(of_node, "qcom,compression-mode", NULL);
- if (compression && !strcmp(compression, "dsc"))
- panel->dsc_enabled = true;
-
- rc = dsi_panel_parse_timing(&panel->mode.timing, of_node);
- if (rc) {
- pr_err("failed to parse panel timing, rc=%d\n", rc);
- goto error;
- }
-
- rc = dsi_panel_parse_dsc_params(panel, of_node);
- if (rc) {
- pr_err("failed to parse dsc params, rc=%d\n", rc);
- goto error;
- }
-
- data = of_get_property(of_node,
- "qcom,mdss-dsi-panel-phy-timings", &len);
- if (!data) {
- pr_debug("%s:%d, Unable to read Phy timing settings",
- __func__, __LINE__);
- } else {
- int i = 0;
-
- panel->phy_timing_val = kzalloc((sizeof(u32) * len),
- GFP_KERNEL);
- if (!panel->phy_timing_val) {
- kfree(panel);
- return ERR_PTR(-ENOMEM);
- }
- for (i = 0; i < len; i++)
- panel->phy_timing_val[i] = data[i];
- }
- panel->phy_timing_len = len;
-
- panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
- DSI_V_TOTAL(&panel->mode.timing) *
- panel->mode.timing.refresh_rate) / 1000;
-
- rc = dsi_panel_parse_topology(panel, of_node, topology_override);
- if (rc) {
- pr_err("failed to parse panel topology, rc=%d\n", rc);
- goto error;
- }
-
rc = dsi_panel_parse_host_config(panel, of_node);
if (rc) {
pr_err("failed to parse host configuration, rc=%d\n", rc);
@@ -2327,12 +2466,6 @@
goto error;
}
- rc = dsi_panel_parse_cmd_sets(panel, of_node);
- if (rc) {
- pr_err("failed to parse command sets, rc=%d\n", rc);
- goto error;
- }
-
rc = dsi_panel_parse_power_cfg(parent, panel, of_node);
if (rc)
pr_err("failed to parse power config, rc=%d\n", rc);
@@ -2345,13 +2478,10 @@
if (rc)
pr_err("failed to parse backlight config, rc=%d\n", rc);
- rc = dsi_panel_parse_jitter_config(panel, of_node);
- if (rc)
- pr_err("failed to parse panel jitter config, rc=%d\n", rc);
rc = dsi_panel_parse_misc_features(panel, of_node);
if (rc)
- pr_err("failed to parse panel features, rc=%d\n", rc);
+ pr_err("failed to parse misc features, rc=%d\n", rc);
rc = dsi_panel_parse_hdr_config(panel, of_node);
if (rc)
@@ -2361,6 +2491,16 @@
if (rc)
pr_debug("failed to partial update caps, rc=%d\n", rc);
+ rc = dsi_panel_get_mode_count(panel, of_node);
+ if (rc) {
+ pr_err("failed to get mode count, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_dms_info(panel, of_node);
+ if (rc)
+ pr_debug("failed to get dms info, rc=%d\n", rc);
+
panel->panel_of_node = of_node;
drm_panel_init(&panel->drm_panel);
mutex_init(&panel->panel_lock);
@@ -2373,14 +2513,6 @@
void dsi_panel_put(struct dsi_panel *panel)
{
- u32 i;
-
- for (i = 0; i < DSI_CMD_SET_MAX; i++)
- dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
-
- kfree(panel->mode.mode_info);
-
- /* TODO: more free */
kfree(panel);
}
@@ -2493,20 +2625,42 @@
return 0;
}
-int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count)
+int dsi_panel_get_mode_count(struct dsi_panel *panel,
+ struct device_node *of_node)
{
- int rc = 0;
+ const u32 SINGLE_MODE_SUPPORT = 1;
+ struct device_node *timings_np;
+ int count, rc = 0;
- if (!panel || !count) {
+ if (!of_node || !panel) {
pr_err("invalid params\n");
return -EINVAL;
}
- mutex_lock(&panel->panel_lock);
- /* TODO: DT format has not been decided for multiple modes. */
- *count = 1;
+ panel->num_timing_nodes = 0;
- mutex_unlock(&panel->panel_lock);
+ timings_np = of_get_child_by_name(of_node,
+ "qcom,mdss-dsi-display-timings");
+ if (!timings_np) {
+ pr_err("no display timing nodes defined\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ count = of_get_child_count(timings_np);
+ if (!count || count > DSI_MODE_MAX) {
+ pr_err("invalid count of timing nodes: %d\n", count);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* No multiresolution support is available for video mode panels */
+ if (panel->panel_mode != DSI_OP_CMD_MODE)
+ count = SINGLE_MODE_SUPPORT;
+
+ panel->num_timing_nodes = count;
+
+error:
return rc;
}
@@ -2546,11 +2700,27 @@
return rc;
}
-int dsi_panel_get_mode(struct dsi_panel *panel,
- u32 index,
- struct dsi_display_mode *mode)
+void dsi_panel_put_mode(struct dsi_display_mode *mode)
{
- int rc = 0;
+ int i;
+
+ if (!mode->priv_info)
+ return;
+
+ for (i = 0; i < DSI_CMD_SET_MAX; i++)
+ dsi_panel_destroy_cmd_packets(&mode->priv_info->cmd_sets[i]);
+
+ kfree(mode->priv_info);
+}
+
+int dsi_panel_get_mode(struct dsi_panel *panel,
+ u32 index, struct dsi_display_mode *mode,
+ int topology_override)
+{
+ struct device_node *timings_np, *child_np;
+ struct dsi_display_mode_priv_info *prv_info;
+ u32 child_idx = 0;
+ int rc = 0, num_timings;
if (!panel || !mode) {
pr_err("invalid params\n");
@@ -2558,11 +2728,77 @@
}
mutex_lock(&panel->panel_lock);
- if (index != 0)
- rc = -ENOTSUPP; /* TODO: Support more than one mode */
- else
- memcpy(mode, &panel->mode, sizeof(*mode));
+ mode->priv_info = kzalloc(sizeof(*mode->priv_info), GFP_KERNEL);
+ if (!mode->priv_info) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ prv_info = mode->priv_info;
+
+ timings_np = of_get_child_by_name(panel->panel_of_node,
+ "qcom,mdss-dsi-display-timings");
+ if (!timings_np) {
+ pr_err("no display timing nodes defined\n");
+ rc = -EINVAL;
+ goto parse_fail;
+ }
+
+ num_timings = of_get_child_count(timings_np);
+ if (!num_timings || num_timings > DSI_MODE_MAX) {
+ pr_err("invalid count of timing nodes: %d\n", num_timings);
+ rc = -EINVAL;
+ goto parse_fail;
+ }
+
+ for_each_child_of_node(timings_np, child_np) {
+ if (index != child_idx++)
+ continue;
+
+ rc = dsi_panel_parse_timing(&mode->timing, child_np);
+ if (rc) {
+ pr_err("failed to parse panel timing, rc=%d\n", rc);
+ goto parse_fail;
+ }
+
+ rc = dsi_panel_parse_dsc_params(mode, child_np);
+ if (rc) {
+ pr_err("failed to parse dsc params, rc=%d\n", rc);
+ goto parse_fail;
+ }
+
+ rc = dsi_panel_parse_topology(prv_info, child_np,
+ topology_override);
+ if (rc) {
+ pr_err("failed to parse panel topology, rc=%d\n", rc);
+ goto parse_fail;
+ }
+
+ rc = dsi_panel_parse_cmd_sets(prv_info, child_np);
+ if (rc) {
+ pr_err("failed to parse command sets, rc=%d\n", rc);
+ goto parse_fail;
+ }
+
+ rc = dsi_panel_parse_jitter_config(mode, child_np);
+ if (rc)
+ pr_err(
+ "failed to parse panel jitter config, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_phy_timing(mode, child_np);
+ if (rc) {
+ pr_err(
+ "failed to parse panel phy timings, rc=%d\n", rc);
+ goto parse_fail;
+ }
+ }
+ goto done;
+
+parse_fail:
+ kfree(mode->priv_info);
+ mode->priv_info = NULL;
+done:
mutex_unlock(&panel->panel_lock);
return rc;
}
@@ -2580,11 +2816,11 @@
mutex_lock(&panel->panel_lock);
- config->panel_mode = panel->mode.panel_mode;
+ config->panel_mode = panel->panel_mode;
memcpy(&config->common_config, &panel->host_config,
sizeof(config->common_config));
- if (mode->panel_mode == DSI_OP_VIDEO_MODE) {
+ if (panel->panel_mode == DSI_OP_VIDEO_MODE) {
memcpy(&config->u.video_engine, &panel->video_config,
sizeof(config->u.video_engine));
} else {
@@ -2594,8 +2830,8 @@
memcpy(&config->video_timing, &mode->timing,
sizeof(config->video_timing));
- config->video_timing.dsc_enabled = panel->dsc_enabled;
- config->video_timing.dsc = &panel->dsc;
+ config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
+ config->video_timing.dsc = &mode->priv_info->dsc;
config->esc_clk_rate_hz = 19200000;
mutex_unlock(&panel->panel_lock);
@@ -2632,17 +2868,20 @@
{
int rc = 0;
struct dsi_panel_cmd_set *set = NULL;
+ struct dsi_display_mode_priv_info *priv_info = NULL;
- if (!panel) {
+ if (!panel || !panel->cur_mode) {
pr_err("invalid params\n");
return -EINVAL;
}
mutex_lock(&panel->panel_lock);
- set = &panel->cmd_sets[DSI_CMD_SET_PPS];
+ priv_info = panel->cur_mode->priv_info;
- dsi_dsc_create_pps_buf_cmd(&panel->dsc, panel->dsc_pps_cmd, 0);
+ set = &priv_info->cmd_sets[DSI_CMD_SET_PPS];
+
+ dsi_dsc_create_pps_buf_cmd(&priv_info->dsc, panel->dsc_pps_cmd, 0);
rc = dsi_panel_create_cmd_packets(panel->dsc_pps_cmd,
DSI_CMD_PPS_SIZE, 1, set->cmds);
if (rc) {
@@ -2662,6 +2901,60 @@
return rc;
}
+int dsi_panel_set_lp1(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP1);
+ if (rc)
+ pr_err("[%s] failed to send DSI_CMD_SET_LP1 cmd, rc=%d\n",
+ panel->name, rc);
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_set_lp2(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP2);
+ if (rc)
+ pr_err("[%s] failed to send DSI_CMD_SET_LP2 cmd, rc=%d\n",
+ panel->name, rc);
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_set_nolp(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_NOLP);
+ if (rc)
+ pr_err("[%s] failed to send DSI_CMD_SET_NOLP cmd, rc=%d\n",
+ panel->name, rc);
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
int dsi_panel_prepare(struct dsi_panel *panel)
{
int rc = 0;
@@ -2694,11 +2987,11 @@
return rc;
}
-static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel *panel,
+static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel_cmd_set *set,
struct dsi_rect *roi, int ctrl_idx, int unicast)
{
static const int ROI_CMD_LEN = 5;
- struct dsi_panel_cmd_set *set = &panel->cmd_sets[DSI_CMD_SET_ROI];
+
int rc = 0;
/* DTYPE_DCS_LWRITE */
@@ -2773,13 +3066,18 @@
struct dsi_rect *roi)
{
int rc = 0;
+ struct dsi_panel_cmd_set *set;
+ struct dsi_display_mode_priv_info *priv_info;
- if (!panel) {
+ if (!panel || !panel->cur_mode) {
pr_err("Invalid params\n");
return -EINVAL;
}
- rc = dsi_panel_roi_prepare_dcs_cmds(panel, roi, ctrl_idx, true);
+ priv_info = panel->cur_mode->priv_info;
+ set = &priv_info->cmd_sets[DSI_CMD_SET_ROI];
+
+ rc = dsi_panel_roi_prepare_dcs_cmds(set, roi, ctrl_idx, true);
if (rc) {
pr_err("[%s] failed to prepare DSI_CMD_SET_ROI cmds, rc=%d\n",
panel->name, rc);
@@ -2797,11 +3095,51 @@
mutex_unlock(&panel->panel_lock);
- dsi_panel_destroy_cmd_packets(&panel->cmd_sets[DSI_CMD_SET_ROI]);
+ dsi_panel_destroy_cmd_packets(set);
return rc;
}
+int dsi_panel_switch(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_TIMING_SWITCH);
+ if (rc)
+ pr_err("[%s] failed to send DSI_CMD_SET_TIMING_SWITCH cmds, rc=%d\n",
+ panel->name, rc);
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_post_switch(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_TIMING_SWITCH);
+ if (rc)
+ pr_err("[%s] failed to send DSI_CMD_SET_POST_TIMING_SWITCH cmds, rc=%d\n",
+ panel->name, rc);
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
int dsi_panel_enable(struct dsi_panel *panel)
{
int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 3569b5b..0ee23f3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -21,7 +21,6 @@
#include <linux/errno.h>
#include <linux/leds.h>
#include <drm/drm_panel.h>
-#include <drm/drm_mipi_dsi.h>
#include <drm/msm_drm.h>
#include "dsi_defs.h"
@@ -33,6 +32,8 @@
#define MAX_BL_LEVEL 4096
#define DSI_CMD_PPS_SIZE 135
+#define DSI_MODE_MAX 5
+
enum dsi_panel_rotation {
DSI_PANEL_ROTATE_NONE = 0,
DSI_PANEL_ROTATE_HV_FLIP,
@@ -40,32 +41,6 @@
DSI_PANEL_ROTATE_V_FLIP
};
-enum dsi_cmd_set_type {
- DSI_CMD_SET_PRE_ON = 0,
- DSI_CMD_SET_ON,
- DSI_CMD_SET_POST_ON,
- DSI_CMD_SET_PRE_OFF,
- DSI_CMD_SET_OFF,
- DSI_CMD_SET_POST_OFF,
- DSI_CMD_SET_PRE_RES_SWITCH,
- DSI_CMD_SET_RES_SWITCH,
- DSI_CMD_SET_POST_RES_SWITCH,
- DSI_CMD_SET_CMD_TO_VID_SWITCH,
- DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
- DSI_CMD_SET_VID_TO_CMD_SWITCH,
- DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
- DSI_CMD_SET_PANEL_STATUS,
- DSI_CMD_SET_PPS,
- DSI_CMD_SET_ROI,
- DSI_CMD_SET_MAX
-};
-
-enum dsi_cmd_set_state {
- DSI_CMD_SET_STATE_LP = 0,
- DSI_CMD_SET_STATE_HS,
- DSI_CMD_SET_STATE_MAX
-};
-
enum dsi_backlight_type {
DSI_BACKLIGHT_PWM = 0,
DSI_BACKLIGHT_WLED,
@@ -82,6 +57,11 @@
MODE_GPIO_LOW,
};
+enum dsi_dms_mode {
+ DSI_DMS_MODE_DISABLED = 0,
+ DSI_DMS_MODE_RES_SWITCH_IMMEDIATE,
+};
+
struct dsi_dfps_capabilities {
bool dfps_support;
enum dsi_dfps_type type;
@@ -101,20 +81,6 @@
enum dsi_panel_rotation rotation;
};
-struct dsi_cmd_desc {
- struct mipi_dsi_msg msg;
- bool last_command;
- u32 post_wait_ms;
-};
-
-struct dsi_panel_cmd_set {
- enum dsi_cmd_set_type type;
- enum dsi_cmd_set_state state;
- u32 count;
- int ctrl_idx;
- struct dsi_cmd_desc *cmds;
-};
-
struct dsi_backlight_config {
enum dsi_backlight_type type;
@@ -162,19 +128,17 @@
struct dsi_host_common_cfg host_config;
struct dsi_video_engine_cfg video_config;
struct dsi_cmd_engine_cfg cmd_config;
+ enum dsi_op_mode panel_mode;
struct dsi_dfps_capabilities dfps_caps;
struct msm_roi_caps roi_caps;
- struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
struct dsi_panel_phy_props phy_props;
- u32 *phy_timing_val;
- u32 phy_timing_len;
+ struct dsi_display_mode *cur_mode;
+ u32 num_timing_nodes;
struct dsi_regulator_info power_info;
- struct dsi_display_mode mode;
-
struct dsi_backlight_config bl_config;
struct dsi_panel_reset_config reset_config;
struct dsi_pinctrl_info pinctrl;
@@ -184,14 +148,11 @@
bool ulps_enabled;
bool allow_phy_power_off;
- u32 panel_jitter;
- u32 panel_prefill_lines;
bool panel_initialized;
bool te_using_watchdog_timer;
- bool dsc_enabled;
char dsc_pps_cmd[DSI_CMD_PPS_SIZE];
- struct msm_display_dsc_info dsc;
+ enum dsi_dms_mode dms_mode;
};
static inline bool dsi_panel_ulps_feature_enabled(struct dsi_panel *panel)
@@ -207,17 +168,26 @@
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node,
int topology_override);
+
void dsi_panel_put(struct dsi_panel *panel);
int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
+
int dsi_panel_drv_deinit(struct dsi_panel *panel);
-int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count);
+int dsi_panel_get_mode_count(struct dsi_panel *panel,
+ struct device_node *of_node);
+
+void dsi_panel_put_mode(struct dsi_display_mode *mode);
+
int dsi_panel_get_mode(struct dsi_panel *panel,
u32 index,
- struct dsi_display_mode *mode);
+ struct dsi_display_mode *mode,
+ int topology_override);
+
int dsi_panel_validate_mode(struct dsi_panel *panel,
struct dsi_display_mode *mode);
+
int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
struct dsi_display_mode *mode,
struct dsi_host_config *config);
@@ -229,6 +199,12 @@
int dsi_panel_pre_prepare(struct dsi_panel *panel);
+int dsi_panel_set_lp1(struct dsi_panel *panel);
+
+int dsi_panel_set_lp2(struct dsi_panel *panel);
+
+int dsi_panel_set_nolp(struct dsi_panel *panel);
+
int dsi_panel_prepare(struct dsi_panel *panel);
int dsi_panel_enable(struct dsi_panel *panel);
@@ -250,6 +226,10 @@
int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
struct dsi_rect *roi);
+int dsi_panel_switch(struct dsi_panel *panel);
+
+int dsi_panel_post_switch(struct dsi_panel *panel);
+
void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
#endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 9527daf..75e98dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -133,7 +133,7 @@
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -378,7 +378,8 @@
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -435,7 +436,7 @@
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 571a91e..acee5da 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -151,12 +152,28 @@
}
}
-static const char * const iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+ unsigned int i;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+}
static void mdp4_destroy(struct msm_kms *kms)
{
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu;
@@ -167,12 +184,18 @@
}
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+
kfree(mdp4_kms);
}
@@ -440,8 +463,8 @@
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
int irq, ret;
+ struct msm_gem_address_space *aspace;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -531,12 +554,23 @@
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
+
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ mmu, "mdp4", 0x1000, 0xffffffff);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
@@ -545,14 +579,7 @@
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
- }
-
- mdp4_kms->id = msm_register_mmu(dev, mmu);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
+ aspace = NULL;
}
ret = modeset_init(mdp4_kms);
@@ -571,7 +598,7 @@
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -599,5 +626,13 @@
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
+#else
+ if (cpu_is_apq8064())
+ config.max_clk = 266667000;
+ else
+ config.max_clk = 200000000;
+
+ config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index ddfabde..f9dcadf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *vdd;
@@ -43,7 +41,7 @@
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 3903dbc..934992e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c205c36..15e7da2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -525,7 +525,7 @@
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 4f204ff..f05d760 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -117,11 +118,12 @@
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
}
}
@@ -564,8 +566,8 @@
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
- struct msm_mmu *mmu;
int irq, i, ret;
+ struct msm_gem_address_space *aspace;
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
@@ -606,7 +608,8 @@
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
+ config->platform.iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
@@ -614,7 +617,16 @@
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ aspace = msm_gem_smmu_address_space_create(dev,
+ mmu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ mdp5_kms->aspace = aspace;
+
+ ret = mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
@@ -625,15 +637,7 @@
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
-
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
+ aspace = NULL;
}
ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 0373892..f21e912 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -38,8 +38,7 @@
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 5e67e8b..88e5d06 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -261,7 +261,7 @@
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -275,7 +275,7 @@
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -398,13 +398,13 @@
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index efeea31..5a48aae 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -131,6 +131,10 @@
&connector->encoder->crtc->state->mode))
continue;
+ if (msm_is_mode_seamless_dms(
+ &connector->encoder->crtc->state->adjusted_mode))
+ continue;
+
funcs = encoder->helper_private;
DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
@@ -166,6 +170,9 @@
if (msm_is_mode_seamless(&crtc->state->mode))
continue;
+ if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+ continue;
+
funcs = crtc->helper_private;
DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
@@ -303,6 +310,13 @@
if (msm_is_mode_seamless(&crtc->state->mode))
continue;
+ /**
+ * On DMS switch, wait for ping pong done to ensure the current
+ * frame transfer is complete.
+ */
+ if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+ kms->funcs->wait_for_tx_complete(kms, crtc);
+
funcs = crtc->helper_private;
if (crtc->state->enable) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f7d5d02..33ef04b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -58,13 +58,14 @@
#define MSM_VERSION_PATCHLEVEL 0
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#define HPD_STRING_SIZE 30
static void msm_drm_helper_hotplug_event(struct drm_device *dev)
{
struct drm_connector *connector;
- char *event_string;
+ char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE];
char const *connector_name;
- char *envp[2];
+ char *envp[3];
if (!dev) {
DRM_ERROR("hotplug_event failed, invalid input\n");
@@ -74,12 +75,6 @@
if (!dev->mode_config.poll_enabled)
return;
- event_string = kzalloc(SZ_4K, GFP_KERNEL);
- if (!event_string) {
- DRM_ERROR("failed to allocate event string\n");
- return;
- }
-
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(connector, dev) {
/* Only handle HPD capable connectors. */
@@ -93,17 +88,20 @@
else
connector_name = "unknown";
- snprintf(event_string, SZ_4K, "name=%s status=%s\n",
- connector_name,
+ snprintf(name, HPD_STRING_SIZE, "name=%s", connector_name);
+
+ snprintf(status, HPD_STRING_SIZE, "status=%s",
drm_get_connector_status_name(connector->status));
- DRM_DEBUG("generating hotplug event [%s]\n", event_string);
- envp[0] = event_string;
- envp[1] = NULL;
+
+ DRM_DEBUG("generating hotplug event [%s]: [%s]\n",
+ name, status);
+ envp[0] = name;
+ envp[1] = status;
+ envp[2] = NULL;
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
envp);
}
mutex_unlock(&dev->mode_config.mutex);
- kfree(event_string);
}
static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -123,13 +121,37 @@
msm_drm_helper_hotplug_event(dev);
}
+/**
+ * msm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This is a wrapper for the drm_atomic_helper_check to check the modeset
+ * and state checking for planes. Additionally it checks if any secure
+ * transition(moving CRTC and planes between secure and non-secure states and
+ * vice versa) is allowed or not. When going to secure state, planes
+ * with fb_mode as dir translated only can be staged on the CRTC, and only one
+ * CRTC should be active.
+ * Also mixing of secure and non-secure is not allowed.
+ *
+ * RETURNS
+ * Zero for success or -errorno.
+ */
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct msm_drm_private *priv;
+
if (msm_is_suspend_blocked(dev)) {
DRM_DEBUG("rejecting commit during suspend\n");
return -EBUSY;
}
+
+ priv = dev->dev_private;
+ if (priv && priv->kms && priv->kms->funcs &&
+ priv->kms->funcs->atomic_check)
+ return priv->kms->funcs->atomic_check(priv->kms, state);
+
return drm_atomic_helper_check(dev, state);
}
@@ -140,42 +162,6 @@
.atomic_commit = msm_atomic_commit,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
- return -EINVAL;
-
- priv->mmus[idx] = mmu;
-
- return idx;
-}
-
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx;
-
- if (priv->num_mmus <= 0) {
- dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
- return;
- }
-
- idx = priv->num_mmus - 1;
-
- /* only support reverse-order deallocation */
- if (priv->mmus[idx] != mmu) {
- dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
- return;
- }
-
- --priv->num_mmus;
- priv->mmus[idx] = 0;
-}
-
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -322,7 +308,7 @@
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- kthread_queue_work(&priv->event_thread[crtc_id].worker,
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
&vbl_ctrl->work);
return 0;
@@ -559,6 +545,7 @@
struct msm_kms *kms;
struct sde_dbg_power_ctrl dbg_power_ctrl = { 0 };
int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (!ddev) {
@@ -662,6 +649,12 @@
}
ddev->mode_config.funcs = &mode_config_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
/* initialize display thread */
@@ -672,6 +665,11 @@
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, ¶m);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
if (IS_ERR(priv->disp_thread[i].thread)) {
dev_err(dev, "failed to create crtc_commit kthread\n");
@@ -686,6 +684,18 @@
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, ¶m);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
if (IS_ERR(priv->event_thread[i].thread)) {
dev_err(dev, "failed to create crtc_event kthread\n");
@@ -1384,7 +1394,7 @@
return ret;
}
-void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
struct drm_device *dev, struct drm_event *event, u8 *payload)
{
struct msm_drm_private *priv = NULL;
@@ -1944,6 +1954,30 @@
return ret;
}
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain)
+{
+ struct msm_drm_private *priv = NULL;
+ struct msm_kms *kms;
+ const struct msm_kms_funcs *funcs;
+
+ if ((!dev) || (!dev->dev_private))
+ return NULL;
+
+ priv = dev->dev_private;
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ funcs = kms->funcs;
+
+ if ((!funcs) || (!funcs->get_address_space))
+ return NULL;
+
+ return funcs->get_address_space(priv->kms, domain);
+}
+
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -2055,6 +2089,7 @@
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_smmu_driver_init();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -2070,6 +2105,7 @@
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_smmu_driver_cleanup();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 86fec8b..2f665a4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -61,6 +61,8 @@
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
#define MAX_CRTCS 8
@@ -117,6 +119,7 @@
PLANE_PROP_ROTATION,
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
+ PLANE_PROP_FB_TRANSLATION_MODE,
/* total # of properties */
PLANE_PROP_COUNT
@@ -143,6 +146,7 @@
CRTC_PROP_ROT_PREFILL_BW,
CRTC_PROP_ROT_CLK,
CRTC_PROP_ROI_V1,
+ CRTC_PROP_SECURITY_LEVEL,
/* total # of properties */
CRTC_PROP_COUNT
@@ -152,6 +156,7 @@
/* blob properties, always put these first */
CONNECTOR_PROP_SDE_INFO,
CONNECTOR_PROP_HDR_INFO,
+ CONNECTOR_PROP_PP_DITHER,
/* # of blob properties */
CONNECTOR_PROP_BLOBCOUNT,
@@ -382,10 +387,22 @@
/**
* struct msm_mode_info - defines all msm custom mode info
- * @topology - supported topology for the mode
+ * @frame_rate: frame_rate of the mode
+ * @vtotal: vtotal calculated for the mode
+ * @prefill_lines: prefill lines based on porches.
+ * @jitter_numer: display panel jitter numerator configuration
+ * @jitter_denom: display panel jitter denominator configuration
+ * @topology: supported topology for the mode
+ * @comp_info: compression info supported
*/
struct msm_mode_info {
+ uint32_t frame_rate;
+ uint32_t vtotal;
+ uint32_t prefill_lines;
+ uint32_t jitter_numer;
+ uint32_t jitter_denom;
struct msm_display_topology topology;
+ struct msm_compression_info comp_info;
};
/**
@@ -405,11 +422,6 @@
* @is_primary: Set to true if display is primary display
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
- * @frame_rate: Display frame rate
- * @prefill_lines: prefill lines based on porches.
- * @vtotal: display vertical total
- * @jitter: display jitter configuration
- * @comp_info: Compression supported by the display
* @roi_caps: Region of interest capability info
*/
struct msm_display_info {
@@ -429,12 +441,6 @@
bool is_primary;
bool is_te_using_watchdog_timer;
- uint32_t frame_rate;
- uint32_t prefill_lines;
- uint32_t vtotal;
- uint32_t jitter;
-
- struct msm_compression_info comp_info;
struct msm_roi_caps roi_caps;
};
@@ -528,9 +534,13 @@
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[MAX_PLANES];
@@ -635,10 +645,73 @@
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
void msm_gem_submit_free(struct msm_gem_submit *submit);
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags);
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
+
+/* For SDE display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
+ const char *name);
+
+/**
+ * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
+ */
+void msm_gem_add_obj_to_aspace_active_list(
+ struct msm_gem_address_space *aspace,
+ struct drm_gem_object *obj);
+
+/**
+ * msm_gem_remove_obj_from_aspace_active_list: removes obj from active obj
+ * list in aspace
+ */
+void msm_gem_remove_obj_from_aspace_active_list(
+ struct msm_gem_address_space *aspace,
+ struct drm_gem_object *obj);
+
+/**
+ * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
+ * domain
+ */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+ unsigned int domain);
+
+/**
+ * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
+ * of the domain for this aspace
+ */
+void msm_gem_aspace_domain_attach_detach_update(
+ struct msm_gem_address_space *aspace,
+ bool is_detach);
+
+/**
+ * msm_gem_address_space_register_cb: function to register callback for attach
+ * and detach of the domain
+ */
+int msm_gem_address_space_register_cb(
+ struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data);
+
+/**
+ * msm_gem_address_space_register_cb: function to unregister callback
+ */
+int msm_gem_address_space_unregister_cb(
+ struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data);
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -650,13 +723,16 @@
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova);
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -693,9 +769,12 @@
struct dma_buf *dmabuf, struct sg_table *sgt);
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -707,16 +786,34 @@
void msm_fbdev_free(struct drm_device *dev);
struct hdmi;
+#ifdef CONFIG_DRM_MSM_HDMI
int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
+#else
+static inline void __init msm_hdmi_register(void)
+{
+}
+static inline void __exit msm_hdmi_unregister(void)
+{
+}
+#endif
struct msm_edp;
+#ifdef CONFIG_DRM_MSM_EDP
void __init msm_edp_register(void);
void __exit msm_edp_unregister(void);
int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
struct drm_encoder *encoder);
+#else
+static inline void __init msm_edp_register(void)
+{
+}
+static inline void __exit msm_edp_unregister(void)
+{
+}
+#endif
struct msm_dsi;
enum msm_dsi_encoder_id {
@@ -732,7 +829,7 @@
* @event: event that needs to be notified.
* @payload: payload for the event.
*/
-void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
struct drm_device *dev, struct drm_event *event, u8 *payload);
#ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 918427a..0a9f12d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -148,14 +148,15 @@
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
uint32_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -167,7 +168,8 @@
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
@@ -176,15 +178,16 @@
msm_framebuffer_kunmap(fb);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+ return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index acd7af5..d64dcc6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -26,6 +26,11 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+ return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -285,33 +290,67 @@
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_vma *domain, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
- if (obj->import_attach && mmu->funcs->unmap_dma_buf)
- mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- else
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt,
- obj->size);
- msm_obj->domain[id].iova = 0;
+ msm_gem_remove_obj_from_aspace_active_list(
+ domain->aspace,
+ obj);
}
+
+ obj_remove_domain(domain);
}
}
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -319,65 +358,67 @@
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- if (obj->import_attach && mmu->funcs->map_dma_buf) {
- ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL,
- msm_obj->flags);
- if (ret) {
- DRM_ERROR("Unable to map dma buf\n");
- return ret;
- }
- }
- msm_obj->domain[id].iova =
- sg_dma_address(msm_obj->sgt->sgl);
- } else {
- WARN_ONCE(1, "physical address being used\n");
- msm_obj->domain[id].iova = physaddr(obj);
- }
+ *iova = physaddr(obj);
+ return 0;
}
- if (!ret)
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
+ }
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj),
+ msm_obj->flags);
+ }
+
+ if (!ret && domain) {
+ *iova = domain->iova;
+ msm_gem_add_obj_to_aspace_active_list(aspace, obj);
+ } else {
+ obj_remove_domain(domain);
+ }
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint32_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -385,14 +426,18 @@
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint32_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -402,6 +447,63 @@
// things that are no longer needed..
}
+void msm_gem_aspace_domain_attach_detach_update(
+ struct msm_gem_address_space *aspace,
+ bool is_detach)
+{
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
+ struct aspace_client *aclient;
+ int ret;
+ uint32_t iova;
+
+ if (!aspace)
+ return;
+
+ mutex_lock(&aspace->dev->struct_mutex);
+ if (is_detach) {
+ /* Indicate to clients domain is getting detached */
+ list_for_each_entry(aclient, &aspace->clients, list) {
+ if (aclient->cb)
+ aclient->cb(aclient->cb_data,
+ is_detach);
+ }
+
+ /**
+ * Unmap active buffers,
+ * typically clients should do this when the callback is called,
+ * but this needs to be done for the framebuffers which are not
+ * attached to any planes. (background apps)
+ */
+ list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
+ obj = &msm_obj->base;
+ if (obj->import_attach) {
+ put_iova(obj);
+ put_pages(obj);
+ }
+ }
+ } else {
+ /* map active buffers */
+ list_for_each_entry(msm_obj, &aspace->active_list,
+ iova_list) {
+ obj = &msm_obj->base;
+ ret = msm_gem_get_iova_locked(obj, aspace, &iova);
+ if (ret) {
+ mutex_unlock(&obj->dev->struct_mutex);
+ return;
+ }
+ }
+
+ /* Indicate to clients domain is attached */
+ list_for_each_entry(aclient, &aspace->clients, list) {
+ if (aclient->cb)
+ aclient->cb(aclient->cb_data,
+ is_detach);
+ }
+ }
+ mutex_unlock(&aspace->dev->struct_mutex);
+}
+
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -644,6 +746,7 @@
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
struct fence *fence;
@@ -666,6 +769,7 @@
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size, madv);
@@ -685,6 +789,12 @@
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -721,7 +831,8 @@
if (obj->import_attach) {
if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -776,7 +887,6 @@
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -798,16 +908,17 @@
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base,
+ NULL);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
@@ -820,6 +931,9 @@
}
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+ INIT_LIST_HEAD(&msm_obj->iova_list);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 19c7726..c50c453 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -25,6 +25,55 @@
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
#define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */
+struct msm_gem_object;
+
+struct msm_gem_aspace_ops {
+ int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv, unsigned int flags);
+
+ void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv);
+
+ void (*destroy)(struct msm_gem_address_space *);
+ void (*add_to_active)(struct msm_gem_address_space *,
+ struct msm_gem_object *);
+ void (*remove_from_active)(struct msm_gem_address_space *,
+ struct msm_gem_object *);
+ int (*register_cb)(struct msm_gem_address_space *,
+ void (*cb)(void *, bool),
+ void *);
+ int (*unregister_cb)(struct msm_gem_address_space *,
+ void (*cb)(void *, bool),
+ void *);
+};
+
+struct aspace_client {
+ void (*cb)(void *, bool);
+ void *cb_data;
+ struct list_head list;
+};
+
+
+struct msm_gem_address_space {
+ const char *name;
+ struct msm_mmu *mmu;
+ const struct msm_gem_aspace_ops *ops;
+ bool domain_attached;
+ struct drm_device *dev;
+ /* list of mapped objects */
+ struct list_head active_list;
+ /* list of clients */
+ struct list_head clients;
+};
+
+struct msm_gem_vma {
+ /* Node used by the GPU address space, but not the SDE address space */
+ struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
+ uint64_t iova;
+ struct list_head list;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -62,9 +111,7 @@
struct sg_table *sgt;
void *vaddr;
- struct {
- dma_addr_t iova;
- } domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -74,6 +121,7 @@
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
+ struct list_head iova_list;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b6a0f37..c695dda 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -31,11 +31,14 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr_bos, int nr_cmds)
+ struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(*submit->cmd));
+ uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(submit->cmd[0]));
+
+ if (sz > SIZE_MAX)
+ return NULL;
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (!submit)
@@ -157,7 +160,7 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -245,7 +248,7 @@
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->gpu->aspace, &iova);
if (ret)
break;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 0000000..d02228a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv)
+{
+ struct dma_buf *buf = priv;
+
+ if (buf)
+ aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
+ sgt, buf, DMA_BIDIRECTIONAL);
+ else
+ aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ vma->iova = 0;
+}
+
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ struct dma_buf *buf = priv;
+ int ret;
+
+ if (!aspace || !aspace->domain_attached)
+ return -EINVAL;
+
+ if (buf)
+ ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
+ DMA_BIDIRECTIONAL, flags);
+ else
+ ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ if (!ret)
+ vma->iova = sg_dma_address(sgt->sgl);
+
+ return ret;
+}
+
+static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+ aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static void smmu_aspace_add_to_active(
+ struct msm_gem_address_space *aspace,
+ struct msm_gem_object *msm_obj)
+{
+ WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+ list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+}
+
+static void smmu_aspace_remove_from_active(
+ struct msm_gem_address_space *aspace,
+ struct msm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj, *next;
+
+ WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+
+ list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
+ iova_list) {
+ if (msm_obj == obj) {
+ list_del(&msm_obj->iova_list);
+ break;
+ }
+ }
+}
+
+static int smmu_aspace_register_cb(
+ struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data)
+{
+ struct aspace_client *aclient = NULL;
+ struct aspace_client *temp;
+
+ if (!aspace)
+ return -EINVAL;
+
+ if (!aspace->domain_attached)
+ return -EACCES;
+
+ aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
+ if (!aclient)
+ return -ENOMEM;
+
+ aclient->cb = cb;
+ aclient->cb_data = cb_data;
+ INIT_LIST_HEAD(&aclient->list);
+
+ /* check if callback is already registered */
+ mutex_lock(&aspace->dev->struct_mutex);
+ list_for_each_entry(temp, &aspace->clients, list) {
+ if ((temp->cb == aclient->cb) &&
+ (temp->cb_data == aclient->cb_data)) {
+ kfree(aclient);
+ mutex_unlock(&aspace->dev->struct_mutex);
+ return -EEXIST;
+ }
+ }
+
+ list_move_tail(&aclient->list, &aspace->clients);
+ mutex_unlock(&aspace->dev->struct_mutex);
+
+ return 0;
+}
+
+static int smmu_aspace_unregister_cb(
+ struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data)
+{
+ struct aspace_client *aclient = NULL;
+ int rc = -ENOENT;
+
+ if (!aspace || !cb)
+ return -EINVAL;
+
+ mutex_lock(&aspace->dev->struct_mutex);
+ list_for_each_entry(aclient, &aspace->clients, list) {
+ if ((aclient->cb == cb) &&
+ (aclient->cb_data == cb_data)) {
+ list_del(&aclient->list);
+ kfree(aclient);
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&aspace->dev->struct_mutex);
+
+ return rc;
+}
+
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+ .map = smmu_aspace_map_vma,
+ .unmap = smmu_aspace_unmap_vma,
+ .destroy = smmu_aspace_destroy,
+ .add_to_active = smmu_aspace_add_to_active,
+ .remove_from_active = smmu_aspace_remove_from_active,
+ .register_cb = smmu_aspace_register_cb,
+ .unregister_cb = smmu_aspace_unregister_cb,
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->dev = dev;
+ aspace->name = name;
+ aspace->mmu = mmu;
+ aspace->ops = &smmu_aspace_ops;
+ INIT_LIST_HEAD(&aspace->active_list);
+ INIT_LIST_HEAD(&aspace->clients);
+
+ return aspace;
+}
+
+/* GPU address space operations */
+struct msm_iommu_aspace {
+ struct msm_gem_address_space base;
+ struct drm_mm mm;
+};
+
+#define to_iommu_aspace(aspace) \
+ ((struct msm_iommu_aspace *) \
+ container_of(aspace, struct msm_iommu_aspace, base))
+
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+}
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+ size_t size = 0;
+ struct scatterlist *sg;
+ int ret = 0, i;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ size += sg->length + sg->offset;
+
+ ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova,
+ sgt, IOMMU_READ | IOMMU_WRITE);
+
+ return ret;
+}
+
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+
+ drm_mm_takedown(&local->mm);
+ aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+ .map = iommu_aspace_map_vma,
+ .unmap = iommu_aspace_unmap_vma,
+ .destroy = iommu_aspace_destroy,
+};
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_iommu_aspace *local;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ local = kzalloc(sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return ERR_PTR(-ENOMEM);
+
+ drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
+ (end >> PAGE_SHIFT) - 1);
+
+ local->base.name = name;
+ local->base.mmu = mmu;
+ local->base.ops = &msm_iommu_aspace_ops;
+
+ return &local->base;
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ if (aspace && aspace->ops->map)
+ return aspace->ops->map(aspace, vma, sgt, priv, flags);
+
+ return -EINVAL;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (aspace && aspace->ops->unmap)
+ aspace->ops->unmap(aspace, vma, sgt, priv);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+
+ if (IS_ERR(mmu))
+ return (struct msm_gem_address_space *) mmu;
+
+ return msm_gem_address_space_new(mmu, name,
+ domain->geometry.aperture_start,
+ domain->geometry.aperture_end);
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+ if (aspace && aspace->ops->destroy)
+ aspace->ops->destroy(aspace);
+
+ kfree(aspace);
+}
+
+void msm_gem_add_obj_to_aspace_active_list(
+ struct msm_gem_address_space *aspace,
+ struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (aspace && aspace->ops && aspace->ops->add_to_active)
+ aspace->ops->add_to_active(aspace, msm_obj);
+}
+
+void msm_gem_remove_obj_from_aspace_active_list(
+ struct msm_gem_address_space *aspace,
+ struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (aspace && aspace->ops && aspace->ops->remove_from_active)
+ aspace->ops->remove_from_active(aspace, msm_obj);
+}
+
+int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data)
+{
+ if (aspace && aspace->ops && aspace->ops->register_cb)
+ return aspace->ops->register_cb(aspace, cb, cb_data);
+
+ return -EINVAL;
+}
+
+int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
+ void (*cb)(void *, bool),
+ void *cb_data)
+{
+ if (aspace && aspace->ops && aspace->ops->unregister_cb)
+ return aspace->ops->unregister_cb(aspace, cb, cb_data);
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5bb0983..49d9e10 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -457,7 +457,7 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->id);
+ msm_gem_put_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -494,6 +494,8 @@
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
+
+ retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu))
@@ -538,8 +540,7 @@
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
-
+ submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -656,12 +657,17 @@
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = 0x1000;
+ iommu->geometry.aperture_end = 0xffffffff;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -669,8 +675,6 @@
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
-
/* Create ringbuffer: */
mutex_lock(&drm->struct_mutex);
@@ -701,13 +705,13 @@
if (gpu->rb) {
if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
-
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
+
+ if (gpu->aspace)
+ msm_gem_address_space_destroy(gpu->aspace);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index d61d98a..13ecd72 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -98,8 +98,7 @@
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
- int id;
+ struct msm_gem_address_space *aspace;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a294d0..bc9877c 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -46,7 +46,7 @@
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
@@ -85,7 +85,7 @@
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+ struct sg_table *sgt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index d8ac407..e7fae38 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -34,6 +34,8 @@
#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
/* Transition to new mode requires a wait-for-vblank before the modeset */
#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
+/* Request to switch the connector mode */
+#define MSM_MODE_FLAG_SEAMLESS_DMS (1<<2)
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
@@ -75,6 +77,9 @@
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
+ /* perform complete atomic check of given atomic state */
+ int (*atomic_check)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -90,6 +95,10 @@
struct drm_mode_object *obj, u32 event, bool en);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
+ /* get address space */
+ struct msm_gem_address_space *(*get_address_space)(
+ struct msm_kms *kms,
+ unsigned int domain);
};
struct msm_kms {
@@ -111,9 +120,24 @@
static inline
struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
#endif
-struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+
+#ifdef CONFIG_DRM_MSM_MDP5
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+#else
+static inline int msm_mdss_init(struct drm_device *dev)
+{
+ return 0;
+}
+static inline void msm_mdss_destroy(struct drm_device *dev)
+{
+}
+static inline struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+{
+ return NULL;
+}
+#endif
struct msm_kms *sde_kms_init(struct drm_device *dev);
/**
@@ -124,6 +148,12 @@
return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
}
+static inline bool msm_is_mode_seamless_dms(const struct drm_display_mode *mode)
+{
+ return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DMS)
+ : false;
+}
+
static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
{
return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index fbf7e7b..08e6f79 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,7 +21,6 @@
#include <linux/iommu.h>
struct msm_mmu;
-struct msm_gpu;
enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_UNSECURE,
@@ -35,9 +34,8 @@
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned int len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned int len);
+ int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -47,6 +45,7 @@
void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
struct dma_buf *dma_buf, int dir);
void (*destroy)(struct msm_mmu *mmu);
+ bool (*is_domain_secure)(struct msm_mmu *mmu);
};
struct msm_mmu {
@@ -62,8 +61,11 @@
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index f2996dd..d1991a4 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -21,8 +21,6 @@
uint32_t blob_count,
uint32_t state_size)
{
- int i;
-
/* prevent access if any of these are NULL */
if (!base || !dev || !property_array || !property_data) {
property_count = 0;
@@ -60,10 +58,6 @@
0,
sizeof(struct msm_property_data) *
property_count);
- INIT_LIST_HEAD(&info->dirty_list);
-
- for (i = 0; i < property_count; ++i)
- INIT_LIST_HEAD(&property_data[i].dirty_node);
}
}
@@ -72,9 +66,6 @@
if (!info)
return;
- /* reset dirty list */
- INIT_LIST_HEAD(&info->dirty_list);
-
/* free state cache */
while (info->state_cache_size > 0)
kfree(info->state_cache[--(info->state_cache_size)]);
@@ -82,24 +73,25 @@
mutex_destroy(&info->property_lock);
}
-int msm_property_pop_dirty(struct msm_property_info *info)
+int msm_property_pop_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state)
{
struct list_head *item;
int rc = 0;
- if (!info) {
- DRM_ERROR("invalid info\n");
+ if (!info || !property_state || !property_state->values) {
+ DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
- if (list_empty(&info->dirty_list)) {
+ if (list_empty(&property_state->dirty_list)) {
rc = -EAGAIN;
} else {
- item = info->dirty_list.next;
+ item = property_state->dirty_list.next;
list_del_init(item);
- rc = container_of(item, struct msm_property_data, dirty_node)
- - info->property_data;
+ rc = container_of(item, struct msm_property_value, dirty_node)
+ - property_state->values;
DRM_DEBUG_KMS("property %d dirty\n", rc);
}
mutex_unlock(&info->property_lock);
@@ -112,26 +104,28 @@
* This function doesn't mutex protect the
* dirty linked list.
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
*/
static void _msm_property_set_dirty_no_lock(
struct msm_property_info *info,
+ struct msm_property_state *property_state,
uint32_t property_idx)
{
- if (!info || property_idx >= info->property_count) {
- DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
- info, property_idx);
+ if (!info || !property_state || !property_state->values ||
+ property_idx >= info->property_count) {
+ DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
return;
}
/* avoid re-inserting if already dirty */
- if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+ if (!list_empty(&property_state->values[property_idx].dirty_node)) {
DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
return;
}
- list_add_tail(&info->property_data[property_idx].dirty_node,
- &info->dirty_list);
+ list_add_tail(&property_state->values[property_idx].dirty_node,
+ &property_state->dirty_list);
}
/**
@@ -371,35 +365,36 @@
return rc;
}
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx)
+int msm_property_set_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state,
+ int property_idx)
{
- if (!info) {
- DRM_ERROR("invalid property info\n");
+ if (!info || !property_state || !property_state->values) {
+ DRM_ERROR("invalid argument(s)\n");
return -EINVAL;
}
mutex_lock(&info->property_lock);
- _msm_property_set_dirty_no_lock(info, property_idx);
+ _msm_property_set_dirty_no_lock(info, property_state, property_idx);
mutex_unlock(&info->property_lock);
return 0;
}
int msm_property_atomic_set(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property, uint64_t val)
{
struct drm_property_blob *blob;
int property_idx, rc = -EINVAL;
property_idx = msm_property_index(info, property);
- if (!info || (property_idx == -EINVAL) || !property_values) {
- DRM_DEBUG("Invalid argument(s)\n");
+ if (!info || !property_state ||
+ (property_idx == -EINVAL) || !property_state->values) {
+ DRM_DEBUG("invalid argument(s)\n");
} else {
/* extra handling for incoming properties */
mutex_lock(&info->property_lock);
if ((property->flags & DRM_MODE_PROP_BLOB) &&
- (property_idx < info->blob_count) &&
- property_blobs) {
+ (property_idx < info->blob_count)) {
/* DRM lookup also takes a reference */
blob = drm_property_lookup_blob(info->dev,
(uint32_t)val);
@@ -411,18 +406,21 @@
val = blob->base.id;
/* save blob - need to clear previous ref */
- if (property_blobs[property_idx])
+ if (property_state->values[property_idx].blob)
drm_property_unreference_blob(
- property_blobs[property_idx]);
- property_blobs[property_idx] = blob;
+ property_state->values[
+ property_idx].blob);
+ property_state->values[property_idx].blob =
+ blob;
}
}
/* update value and flag as dirty */
- if (property_values[property_idx] != val ||
+ if (property_state->values[property_idx].value != val ||
info->property_data[property_idx].force_dirty) {
- property_values[property_idx] = val;
- _msm_property_set_dirty_no_lock(info, property_idx);
+ property_state->values[property_idx].value = val;
+ _msm_property_set_dirty_no_lock(info, property_state,
+ property_idx);
DBG("%s - %lld", property->name, val);
}
@@ -434,18 +432,18 @@
}
int msm_property_atomic_get(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property, uint64_t *val)
{
int property_idx, rc = -EINVAL;
property_idx = msm_property_index(info, property);
- if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+ if (!info || (property_idx == -EINVAL) ||
+ !property_state->values || !val) {
DRM_DEBUG("Invalid argument(s)\n");
} else {
mutex_lock(&info->property_lock);
- *val = property_values[property_idx];
+ *val = property_state->values[property_idx].value;
mutex_unlock(&info->property_lock);
rc = 0;
}
@@ -495,8 +493,8 @@
}
void msm_property_reset_state(struct msm_property_info *info, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values)
{
uint32_t i;
@@ -508,24 +506,29 @@
if (state)
memset(state, 0, info->state_size);
+ if (property_state) {
+ property_state->property_count = info->property_count;
+ property_state->values = property_values;
+ INIT_LIST_HEAD(&property_state->dirty_list);
+ }
+
/*
* Assign default property values. This helper is mostly used
* to initialize newly created state objects.
*/
if (property_values)
- for (i = 0; i < info->property_count; ++i)
- property_values[i] =
+ for (i = 0; i < info->property_count; ++i) {
+ property_values[i].value =
info->property_data[i].default_value;
-
- if (property_blobs)
- for (i = 0; i < info->blob_count; ++i)
- property_blobs[i] = 0;
+ property_values[i].blob = NULL;
+ INIT_LIST_HEAD(&property_values[i].dirty_node);
+ }
}
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values)
{
uint32_t i;
@@ -536,17 +539,24 @@
memcpy(state, old_state, info->state_size);
- if (property_blobs) {
- /* add ref count for blobs */
- for (i = 0; i < info->blob_count; ++i)
- if (property_blobs[i])
- drm_property_reference_blob(property_blobs[i]);
- }
+ if (!property_state)
+ return;
+
+ INIT_LIST_HEAD(&property_state->dirty_list);
+ property_state->values = property_values;
+
+ if (property_state->values)
+ /* add ref count for blobs and initialize dirty nodes */
+ for (i = 0; i < info->property_count; ++i) {
+ if (property_state->values[i].blob)
+ drm_property_reference_blob(
+ property_state->values[i].blob);
+ INIT_LIST_HEAD(&property_state->values[i].dirty_node);
+ }
}
void msm_property_destroy_state(struct msm_property_info *info, void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs)
+ struct msm_property_state *property_state)
{
uint32_t i;
@@ -554,19 +564,21 @@
DRM_ERROR("invalid argument(s)\n");
return;
}
- if (property_blobs) {
+ if (property_state && property_state->values) {
/* remove ref count for blobs */
- for (i = 0; i < info->blob_count; ++i)
- if (property_blobs[i])
+ for (i = 0; i < info->property_count; ++i)
+ if (property_state->values[i].blob) {
drm_property_unreference_blob(
- property_blobs[i]);
+ property_state->values[i].blob);
+ property_state->values[i].blob = NULL;
+ }
}
_msm_property_free_state(info, state);
}
void *msm_property_get_blob(struct msm_property_info *info,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx)
{
@@ -574,10 +586,11 @@
size_t len = 0;
void *rc = 0;
- if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+ if (!info || !property_state || !property_state->values ||
+ (property_idx >= info->blob_count)) {
DRM_ERROR("invalid argument(s)\n");
} else {
- blob = property_blobs[property_idx];
+ blob = property_state->values[property_idx].blob;
if (blob) {
len = blob->length;
rc = &blob->data;
@@ -636,14 +649,15 @@
}
int msm_property_set_property(struct msm_property_info *info,
- uint64_t *property_values,
+ struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val)
{
int rc = -EINVAL;
if (!info || (property_idx >= info->property_count) ||
- property_idx < info->blob_count || !property_values) {
+ property_idx < info->blob_count ||
+ !property_state || !property_state->values) {
DRM_ERROR("invalid argument(s)\n");
} else {
struct drm_property *drm_prop;
@@ -651,8 +665,7 @@
mutex_lock(&info->property_lock);
/* update cached value */
- if (property_values)
- property_values[property_idx] = val;
+ property_state->values[property_idx].value = val;
/* update the new default value for immutables */
drm_prop = info->property_array[property_idx];
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index e54c796..9a53e56 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -22,17 +22,28 @@
* struct msm_property_data - opaque structure for tracking per
* drm-object per property stuff
* @default_value: Default property value for this drm object
- * @dirty_node: Linked list node to track if property is dirty or not
* @force_dirty: Always dirty property on incoming sets, rather than checking
* for modified values
*/
struct msm_property_data {
uint64_t default_value;
- struct list_head dirty_node;
bool force_dirty;
};
/**
+ * struct msm_property_value - opaque structure for tracking per
+ * drm-object per property stuff
+ * @value: Current property value for this drm object
+ * @blob: Pointer to associated blob data, if available
+ * @dirty_node: Linked list node to track if property is dirty or not
+ */
+struct msm_property_value {
+ uint64_t value;
+ struct drm_property_blob *blob;
+ struct list_head dirty_node;
+};
+
+/**
* struct msm_property_info: Structure for property/state helper functions
* @base: Pointer to base drm object (plane/crtc/etc.)
* @dev: Pointer to drm device object
@@ -43,8 +54,6 @@
* @install_request: Total number of property 'install' requests
* @install_count: Total number of successful 'install' requests
* @recent_idx: Index of property most recently accessed by set/get
- * @dirty_list: List of all properties that have been 'atomic_set' but not
- * yet cleared with 'msm_property_pop_dirty'
* @is_active: Whether or not drm component properties are 'active'
* @state_cache: Cache of local states, to prevent alloc/free thrashing
* @state_size: Size of local state structures
@@ -64,7 +73,6 @@
int32_t recent_idx;
- struct list_head dirty_list;
bool is_active;
void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
@@ -74,6 +82,19 @@
};
/**
+ * struct msm_property_state - Structure for local property state information
+ * @property_count: Total number of properties
+ * @values: Pointer to array of msm_property_value objects
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ * yet cleared with 'msm_property_pop_dirty'
+ */
+struct msm_property_state {
+ uint32_t property_count;
+ struct msm_property_value *values;
+ struct list_head dirty_list;
+};
+
+/**
* msm_property_get_default - query default value of a property
* @info: Pointer to property info container struct
* @property_idx: Property index
@@ -134,12 +155,14 @@
* msm_property_pop_dirty - determine next dirty property and clear
* its dirty flag
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* Returns: Valid msm property index on success,
* -EAGAIN if no dirty properties are available
* Property indicies returned from this function are similar
* to those returned by the msm_property_index function.
*/
-int msm_property_pop_dirty(struct msm_property_info *info);
+int msm_property_pop_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state);
/**
* msm_property_init - initialize property info structure
@@ -268,38 +291,37 @@
/**
* msm_property_set_dirty - forcibly flag a property as dirty
* @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
* Returns: Zero on success
*/
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx);
+int msm_property_set_dirty(struct msm_property_info *info,
+ struct msm_property_state *property_state,
+ int property_idx);
/**
* msm_property_atomic_set - helper function for atomic property set callback
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Incoming property value
* Returns: Zero on success
*/
int msm_property_atomic_set(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property,
uint64_t val);
/**
* msm_property_atomic_get - helper function for atomic property get callback
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
* @property: Incoming property pointer
* @val: Pointer to variable for receiving property value
* Returns: Zero on success
*/
int msm_property_atomic_get(struct msm_property_info *info,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
struct drm_property *property,
uint64_t *val);
@@ -313,50 +335,47 @@
* msm_property_reset_state - helper function for state reset callback
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
+ * @property_state: Pointer to property state container struct
* @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
*/
-void msm_property_reset_state(struct msm_property_info *info,
- void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values);
/**
* msm_property_duplicate_state - helper function for duplicate state cb
* @info: Pointer to property info container struct
* @old_state: Pointer to original state structure
* @state: Pointer to newly created state structure
+ * @property_state: Pointer to destination property state container struct
* @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
*/
void msm_property_duplicate_state(struct msm_property_info *info,
void *old_state,
void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+ struct msm_property_state *property_state,
+ struct msm_property_value *property_values);
/**
* msm_property_destroy_state - helper function for destroy state cb
* @info: Pointer to property info container struct
* @state: Pointer to local state structure
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
*/
void msm_property_destroy_state(struct msm_property_info *info,
void *state,
- uint64_t *property_values,
- struct drm_property_blob **property_blobs);
+ struct msm_property_state *property_state);
/**
* msm_property_get_blob - obtain cached data pointer for drm blob property
* @info: Pointer to property info container struct
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
* @byte_len: Optional pointer to variable for accepting blob size
* @property_idx: Property index
* Returns: Pointer to blob data
*/
void *msm_property_get_blob(struct msm_property_info *info,
- struct drm_property_blob **property_blobs,
+ struct msm_property_state *property_state,
size_t *byte_len,
uint32_t property_idx);
@@ -385,13 +404,13 @@
* DRM_MODE_PROP_IMMUTABLE flag set.
* Note: This function cannot be called on a blob.
* @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
+ * @property_state: Pointer to property state container struct
* @property_idx: Property index
* @val: value of the property to set
* Returns: Zero on success
*/
int msm_property_set_property(struct msm_property_info *info,
- uint64_t *property_values,
+ struct msm_property_state *property_state,
uint32_t property_idx,
uint64_t val);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 7fbcff4..730fc06 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -37,6 +37,7 @@
struct device *dev;
struct dma_iommu_mapping *mmu_mapping;
bool domain_attached;
+ bool secure;
};
struct msm_smmu {
@@ -109,7 +110,7 @@
}
static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned int len, int prot)
+ struct sg_table *sgt, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -177,7 +178,7 @@
}
static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned int len)
+ struct sg_table *sgt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -229,6 +230,11 @@
unsigned long attrs = 0x0;
int ret;
+ if (!sgt || !client) {
+ DRM_ERROR("sg table is invalid\n");
+ return -ENOMEM;
+ }
+
if (flags & MSM_BO_KEEPATTRS)
attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
@@ -256,6 +262,11 @@
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ if (!sgt || !client) {
+ DRM_ERROR("sg table is invalid\n");
+ return;
+ }
+
if (sgt && sgt->sgl) {
DRM_DEBUG("%pad/0x%x/0x%x\n", &sgt->sgl->dma_address,
sgt->sgl->dma_length, dir);
@@ -265,6 +276,14 @@
msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
}
+static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ return client->secure;
+}
+
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
@@ -275,6 +294,7 @@
.map_dma_buf = msm_smmu_map_dma_buf,
.unmap_dma_buf = msm_smmu_unmap_dma_buf,
.destroy = msm_smmu_destroy,
+ .is_domain_secure = msm_smmu_is_domain_secure,
};
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
@@ -286,8 +306,8 @@
},
[MSM_SMMU_DOMAIN_SECURE] = {
.label = "mdp_s",
- .va_start = 0,
- .va_size = SZ_4G,
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
.secure = true,
},
[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
@@ -298,20 +318,20 @@
},
[MSM_SMMU_DOMAIN_NRT_SECURE] = {
.label = "rot_s",
- .va_start = 0,
- .va_size = SZ_4G,
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
.secure = true,
},
};
static const struct of_device_id msm_smmu_dt_match[] = {
- { .compatible = "qcom,smmu-mdp-unsec",
+ { .compatible = "qcom,smmu_sde_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
- { .compatible = "qcom,smmu-mdp-sec",
+ { .compatible = "qcom,smmu_sde_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
- { .compatible = "qcom,smmu-rot-unsec",
+ { .compatible = "qcom,smmu_sde_nrt_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
- { .compatible = "qcom,smmu-rot-sec",
+ { .compatible = "qcom,smmu_sde_nrt_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
{}
};
@@ -448,6 +468,7 @@
if (domain->secure) {
int secure_vmid = VMID_CP_PIXEL;
+ client->secure = true;
rc = iommu_domain_set_attr(client->mmu_mapping->domain,
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
if (rc) {
@@ -535,7 +556,7 @@
},
};
-static int __init msm_smmu_driver_init(void)
+int __init msm_smmu_driver_init(void)
{
int ret;
@@ -545,13 +566,11 @@
return ret;
}
-module_init(msm_smmu_driver_init);
-static void __exit msm_smmu_driver_cleanup(void)
+void __exit msm_smmu_driver_cleanup(void)
{
platform_driver_unregister(&msm_smmu_driver);
}
-module_exit(msm_smmu_driver_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 4a664a8..5a646e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -48,6 +48,9 @@
AD_SUSPEND,
AD_ASSERTIVE,
AD_BACKLIGHT,
+ AD_IPC_SUSPEND,
+ AD_IPC_RESUME,
+ AD_IPC_RESET,
AD_PROPMAX,
};
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index b410302..9409066 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -60,6 +60,8 @@
static void dspp_gc_install_property(struct drm_crtc *crtc);
+static void dspp_igc_install_property(struct drm_crtc *crtc);
+
typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
@@ -72,6 +74,9 @@
static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
+static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
+ enum ad_property ad_prop);
+
#define setup_dspp_prop_install_funcs(func) \
do { \
func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -80,6 +85,7 @@
func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
func[SDE_DSPP_GC] = dspp_gc_install_property; \
+ func[SDE_DSPP_IGC] = dspp_igc_install_property; \
} while (0)
typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
@@ -526,7 +532,7 @@
}
static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
- struct sde_crtc *sde_crtc, u32 last_feature)
+ struct sde_crtc *sde_crtc)
{
struct sde_hw_cp_cfg hw_cfg;
struct sde_hw_mixer *hw_lm;
@@ -541,16 +547,13 @@
hw_cfg.num_of_mixers = sde_crtc->num_mixers;
hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
+ hw_cfg.last_feature = 0;
for (i = 0; i < num_mixers && !ret; i++) {
hw_lm = sde_crtc->mixers[i].hw_lm;
hw_dspp = sde_crtc->mixers[i].hw_dspp;
hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
hw_cfg.mixer_info = hw_lm;
- if (i == num_mixers - 1)
- hw_cfg.last_feature = last_feature;
- else
- hw_cfg.last_feature = 0;
switch (prop_node->feature) {
case SDE_CP_CRTC_DSPP_VLUT:
if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
@@ -724,7 +727,6 @@
struct sde_hw_ctl *ctl;
uint32_t flush_mask = 0;
u32 num_mixers = 0, i = 0;
- u32 num_of_features;
if (!crtc || !crtc->dev) {
DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
@@ -754,18 +756,13 @@
DRM_DEBUG_DRIVER("Dirty list is empty\n");
return;
}
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
set_dspp_flush = true;
}
- num_of_features = 0;
- list_for_each_entry(prop_node, &sde_crtc->dirty_list, dirty_list)
- num_of_features++;
-
list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
dirty_list) {
- num_of_features--;
- sde_cp_crtc_setfeature(prop_node, sde_crtc,
- (num_of_features == 0));
+ sde_cp_crtc_setfeature(prop_node, sde_crtc);
/* Set the flush flag to true */
if (prop_node->is_dspp_feature)
set_dspp_flush = true;
@@ -773,16 +770,10 @@
set_lm_flush = true;
}
- num_of_features = 0;
- list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list)
- num_of_features++;
-
list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
dirty_list) {
- num_of_features--;
set_dspp_flush = true;
- sde_cp_crtc_setfeature(prop_node, sde_crtc,
- (num_of_features == 0));
+ sde_cp_crtc_setfeature(prop_node, sde_crtc);
}
for (i = 0; i < num_mixers; i++) {
@@ -1049,6 +1040,12 @@
sde_cp_update_list(prop_node, sde_crtc, true);
list_del_init(&prop_node->active_list);
}
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_active,
+ active_list) {
+ sde_cp_update_list(prop_node, sde_crtc, true);
+ list_del_init(&prop_node->active_list);
+ }
}
void sde_cp_crtc_resume(struct drm_crtc *crtc)
@@ -1071,6 +1068,7 @@
"SDE_DSPP_PCC_V", version);
switch (version) {
case 1:
+ case 4:
sde_cp_crtc_install_blob_property(crtc, feature_name,
SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
break;
@@ -1251,6 +1249,30 @@
}
}
+static void dspp_igc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+
+ version = catalog->dspp[0].sblk->igc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_IGC_V", version);
+ switch (version) {
+ case 3:
+ sde_cp_crtc_install_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_IGC, sizeof(struct drm_msm_igc_lut));
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
static void sde_cp_update_list(struct sde_cp_node *prop_node,
struct sde_crtc *crtc, bool dirty_list)
{
@@ -1357,7 +1379,7 @@
hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
event.length = sizeof(u32);
event.type = DRM_EVENT_AD_BACKLIGHT;
- msm_mode_object_event_nofity(&crtc_drm->base, crtc_drm->dev,
+ msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
&event, (u8 *)&bl);
}
@@ -1384,7 +1406,6 @@
return -EINVAL;
}
- mutex_lock(&crtc->crtc_lock);
kms = get_kms(crtc_drm);
num_mixers = crtc->num_mixers;
@@ -1438,6 +1459,63 @@
sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
}
exit:
- mutex_unlock(&crtc->crtc_lock);
return ret;
}
+
+static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
+ enum ad_property ad_prop)
+{
+ struct sde_ad_hw_cfg ad_cfg;
+ struct sde_hw_cp_cfg hw_cfg;
+ struct sde_hw_dspp *hw_dspp = NULL;
+ struct sde_hw_mixer *hw_lm = NULL;
+ u32 num_mixers = sde_crtc->num_mixers;
+ int i = 0, ret = 0;
+
+ hw_cfg.num_of_mixers = sde_crtc->num_mixers;
+ hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
+ hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
+
+ for (i = 0; i < num_mixers && !ret; i++) {
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_dspp = sde_crtc->mixers[i].hw_dspp;
+ if (!hw_lm || !hw_dspp || !hw_dspp->ops.validate_ad ||
+ !hw_dspp->ops.setup_ad) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ hw_cfg.mixer_info = hw_lm;
+ ad_cfg.prop = ad_prop;
+ ad_cfg.hw_cfg = &hw_cfg;
+ ret = hw_dspp->ops.validate_ad(hw_dspp, (u32 *)&ad_prop);
+ if (!ret)
+ hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ }
+}
+
+void sde_cp_crtc_pre_ipc(struct drm_crtc *drm_crtc)
+{
+ struct sde_crtc *sde_crtc;
+
+ sde_crtc = to_sde_crtc(drm_crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_SUSPEND);
+}
+
+void sde_cp_crtc_post_ipc(struct drm_crtc *drm_crtc)
+{
+ struct sde_crtc *sde_crtc;
+
+ sde_crtc = to_sde_crtc(drm_crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESUME);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index e78f690..08e345d 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -103,4 +103,18 @@
*/
int sde_cp_ad_interrupt(struct drm_crtc *crtc, bool en,
struct sde_irq_callback *irq);
+
+/**
+ * sde_cp_crtc_pre_ipc: Handle color processing features
+ * before entering IPC
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_pre_ipc(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_post_ipc: Handle color processing features
+ * after exiting IPC
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6551257..3af8278 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -29,6 +29,9 @@
#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
(c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
static const struct drm_prop_enum_list e_topology_name[] = {
{SDE_RM_TOPOLOGY_NONE, "sde_none"},
@@ -83,7 +86,7 @@
if (c_conn->ops.set_backlight) {
event.type = DRM_EVENT_SYS_BACKLIGHT;
event.length = sizeof(u32);
- msm_mode_object_event_nofity(&c_conn->base.base,
+ msm_mode_object_event_notify(&c_conn->base.base,
c_conn->base.dev, &event, (u8 *)&brightness);
c_conn->ops.set_backlight(c_conn->display, bl_lvl);
}
@@ -217,6 +220,129 @@
(void)sde_connector_register_event(connector, event_idx, 0, 0);
}
+static int _sde_connector_get_default_dither_cfg_v1(
+ struct sde_connector *c_conn, void *cfg)
+{
+ struct drm_msm_dither *dither_cfg = (struct drm_msm_dither *)cfg;
+ enum dsi_pixel_format dst_format = DSI_PIXEL_FORMAT_MAX;
+
+ if (!c_conn || !cfg) {
+ SDE_ERROR("invalid argument(s), c_conn %pK, cfg %pK\n",
+ c_conn, cfg);
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.get_dst_format) {
+ SDE_DEBUG("get_dst_format is unavailable\n");
+ return 0;
+ }
+
+ dst_format = c_conn->ops.get_dst_format(c_conn->display);
+ switch (dst_format) {
+ case DSI_PIXEL_FORMAT_RGB888:
+ dither_cfg->c0_bitdepth = 8;
+ dither_cfg->c1_bitdepth = 8;
+ dither_cfg->c2_bitdepth = 8;
+ dither_cfg->c3_bitdepth = 8;
+ break;
+ case DSI_PIXEL_FORMAT_RGB666:
+ case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ dither_cfg->c0_bitdepth = 6;
+ dither_cfg->c1_bitdepth = 6;
+ dither_cfg->c2_bitdepth = 6;
+ dither_cfg->c3_bitdepth = 6;
+ break;
+ default:
+ SDE_DEBUG("no default dither config for dst_format %d\n",
+ dst_format);
+ return -ENODATA;
+ }
+
+ memcpy(&dither_cfg->matrix, dither_matrix,
+ sizeof(u32) * DITHER_MATRIX_SZ);
+ dither_cfg->temporal_en = 0;
+ return 0;
+}
+
+static void _sde_connector_install_dither_property(struct drm_device *dev,
+ struct sde_kms *sde_kms, struct sde_connector *c_conn)
+{
+ char prop_name[DRM_PROP_NAME_LEN];
+ struct sde_mdss_cfg *catalog = NULL;
+ struct drm_property_blob *blob_ptr;
+ void *cfg;
+ int ret = 0;
+ u32 version = 0, len = 0;
+ bool defalut_dither_needed = false;
+
+ if (!dev || !sde_kms || !c_conn) {
+ SDE_ERROR("invld args (s), dev %pK, sde_kms %pK, c_conn %pK\n",
+ dev, sde_kms, c_conn);
+ return;
+ }
+
+ catalog = sde_kms->catalog;
+ version = SDE_COLOR_PROCESS_MAJOR(
+ catalog->pingpong[0].sblk->dither.version);
+ snprintf(prop_name, ARRAY_SIZE(prop_name), "%s%d",
+ "SDE_PP_DITHER_V", version);
+ switch (version) {
+ case 1:
+ msm_property_install_blob(&c_conn->property_info, prop_name,
+ DRM_MODE_PROP_BLOB,
+ CONNECTOR_PROP_PP_DITHER);
+ len = sizeof(struct drm_msm_dither);
+ cfg = kzalloc(len, GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ ret = _sde_connector_get_default_dither_cfg_v1(c_conn, cfg);
+ if (!ret)
+ defalut_dither_needed = true;
+ break;
+ default:
+ SDE_ERROR("unsupported dither version %d\n", version);
+ return;
+ }
+
+ if (defalut_dither_needed) {
+ blob_ptr = drm_property_create_blob(dev, len, cfg);
+ if (IS_ERR_OR_NULL(blob_ptr))
+ goto exit;
+ c_conn->blob_dither = blob_ptr;
+ }
+exit:
+ kfree(cfg);
+}
+
+int sde_connector_get_dither_cfg(struct drm_connector *conn,
+ struct drm_connector_state *state, void **cfg,
+ size_t *len)
+{
+ struct sde_connector *c_conn = NULL;
+ struct sde_connector_state *c_state = NULL;
+ size_t dither_sz = 0;
+
+ if (!conn || !state || !(*cfg))
+ return -EINVAL;
+
+ c_conn = to_sde_connector(conn);
+ c_state = to_sde_connector_state(state);
+
+ /* try to get user config data first */
+ *cfg = msm_property_get_blob(&c_conn->property_info,
+ &c_state->property_state,
+ &dither_sz,
+ CONNECTOR_PROP_PP_DITHER);
+ /* if user config data doesn't exist, use default dither blob */
+ if (*cfg == NULL && c_conn->blob_dither) {
+ *cfg = &c_conn->blob_dither->data;
+ dither_sz = c_conn->blob_dither->length;
+ }
+ *len = dither_sz;
+ return 0;
+}
+
int sde_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info)
{
@@ -301,10 +427,15 @@
c_conn = to_sde_connector(connector);
+ if (c_conn->ops.put_modes)
+ c_conn->ops.put_modes(connector, c_conn->display);
+
if (c_conn->blob_caps)
drm_property_unreference_blob(c_conn->blob_caps);
if (c_conn->blob_hdr)
drm_property_unreference_blob(c_conn->blob_hdr);
+ if (c_conn->blob_dither)
+ drm_property_unreference_blob(c_conn->blob_dither);
msm_property_destroy(&c_conn->property_info);
drm_connector_unregister(connector);
@@ -327,18 +458,16 @@
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
- if (c_conn) {
- c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+ if (c_conn)
+ c_state->property_values[CONNECTOR_PROP_OUT_FB].value =
msm_property_get_default(&c_conn->property_info,
CONNECTOR_PROP_OUT_FB);
- } else {
- c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
- }
+ else
+ c_state->property_values[CONNECTOR_PROP_OUT_FB].value = ~0;
}
static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
@@ -369,7 +498,7 @@
} else {
/* destroy value helper */
msm_property_destroy_state(&c_conn->property_info, c_state,
- c_state->property_values, 0);
+ &c_state->property_state);
}
}
@@ -398,7 +527,8 @@
/* reset value helper, zero out state structure and reset properties */
msm_property_reset_state(&c_conn->property_info, c_state,
- c_state->property_values, 0);
+ &c_state->property_state,
+ c_state->property_values);
c_state->base.connector = connector;
connector->state = &c_state->base;
@@ -426,13 +556,14 @@
/* duplicate value helper */
msm_property_duplicate_state(&c_conn->property_info,
- c_oldstate, c_state, c_state->property_values, 0);
+ c_oldstate, c_state,
+ &c_state->property_state, c_state->property_values);
/* additional handling for drm framebuffer objects */
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -546,7 +677,7 @@
return rc;
c_state->rois.roi[i] = roi_v1.roi[i];
- SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i,
+ SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
c_state->rois.roi[i].x1,
c_state->rois.roi[i].y1,
c_state->rois.roi[i].x2,
@@ -626,7 +757,7 @@
/* generic property handling */
rc = msm_property_atomic_set(&c_conn->property_info,
- c_state->property_values, 0, property, val);
+ &c_state->property_state, property, val);
if (rc)
goto end;
@@ -652,14 +783,14 @@
c_conn->fb_kmap);
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -733,7 +864,7 @@
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
- c_state->property_values, 0, property, val);
+ &c_state->property_state, property, val);
/* allow for custom override */
if (c_conn->ops.get_property)
@@ -755,7 +886,8 @@
sde_fence_prepare(&to_sde_connector(connector)->retire_fence);
}
-void sde_connector_complete_commit(struct drm_connector *connector)
+void sde_connector_complete_commit(struct drm_connector *connector,
+ ktime_t ts)
{
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -763,7 +895,7 @@
}
/* signal connector's retire fence */
- sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
+ sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, 0);
}
static enum drm_connector_status
@@ -1010,18 +1142,17 @@
c_conn->lp_mode = 0;
c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
@@ -1116,6 +1247,8 @@
&c_conn->property_info, "sde_drm_roi_v1", 0x0,
0, ~0, 0, CONNECTOR_PROP_ROI_V1);
}
+ /* install PP_DITHER properties */
+ _sde_connector_install_dither_property(dev, sde_kms, c_conn);
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
@@ -1156,6 +1289,9 @@
drm_property_unreference_blob(c_conn->blob_caps);
if (c_conn->blob_hdr)
drm_property_unreference_blob(c_conn->blob_hdr);
+ if (c_conn->blob_dither)
+ drm_property_unreference_blob(c_conn->blob_dither);
+
msm_property_destroy(&c_conn->property_info);
error_cleanup_fence:
mutex_destroy(&c_conn->lock);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 497d0db..4747d3a 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -13,6 +13,7 @@
#ifndef _SDE_CONNECTOR_H_
#define _SDE_CONNECTOR_H_
+#include <uapi/drm/msm_drm_pp.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_panel.h>
@@ -64,6 +65,14 @@
void *display);
/**
+ * put_modes - free up drm modes of the connector
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ */
+ void (*put_modes)(struct drm_connector *connector,
+ void *display);
+
+ /**
* update_pps - update pps command for the display panel
* @pps_cmd: Pointer to pps command
* @display: Pointer to private display handle
@@ -121,14 +130,14 @@
int (*get_info)(struct msm_display_info *info, void *display);
/**
- * get_topology - retrieve current topology for the mode selected
+ * get_mode_info - retrieve mode information
* @drm_mode: Display mode set for the display
- * @topology: Out parameter. Topology for the mode.
+ * @mode_info: Out parameter. information of the display mode
* @max_mixer_width: max width supported by HW layer mixer
* Returns: Zero on success
*/
- int (*get_topology)(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology,
+ int (*get_mode_info)(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info,
u32 max_mixer_width);
/**
@@ -183,6 +192,13 @@
*/
int (*set_power)(struct drm_connector *connector,
int power_mode, void *display);
+
+ /**
+ * get_dst_format - get dst_format from display
+ * @display: Pointer to private display handle
+ * Returns: dst_format of display
+ */
+ enum dsi_pixel_format (*get_dst_format)(void *display);
};
/**
@@ -227,6 +243,7 @@
* @property_data: Array of private data for generic property handling
* @blob_caps: Pointer to blob structure for 'capabilities' property
* @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @blob_dither: Pointer to blob structure for default dither config
* @fb_kmap: true if kernel mapping of framebuffer is requested
* @event_table: Array of registered events
* @event_lock: Lock object for event_table
@@ -240,7 +257,7 @@
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -255,6 +272,7 @@
struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
struct drm_property_blob *blob_caps;
struct drm_property_blob *blob_hdr;
+ struct drm_property_blob *blob_dither;
bool fb_kmap;
struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
@@ -304,17 +322,21 @@
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
+ * @property_state: Local storage for msm_prop properties
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
+ * @property_blobs: blob properties
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
- uint64_t property_values[CONNECTOR_PROP_COUNT];
+ struct msm_gem_address_space *aspace;
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[CONNECTOR_PROP_COUNT];
struct msm_roi_list rois;
+ struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
};
/**
@@ -334,15 +356,15 @@
*/
#define sde_connector_get_property(S, X) \
((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
- (to_sde_connector_state((S))->property_values[(X)]) : 0)
+ (to_sde_connector_state((S))->property_values[(X)].value) : 0)
/**
- * sde_connector_get_property_values - retrieve property values cache
+ * sde_connector_get_property_state - retrieve property state cache
* @S: Pointer to drm connector state
- * Returns: Integer value of requested property
+ * Returns: Pointer to local property state structure
*/
-#define sde_connector_get_property_values(S) \
- ((S) ? (to_sde_connector_state((S))->property_values) : 0)
+#define sde_connector_get_property_state(S) \
+ ((S) ? (&to_sde_connector_state((S))->property_state) : NULL)
/**
* sde_connector_get_out_fb - query out_fb value from sde connector state
@@ -367,6 +389,20 @@
}
/**
+ * sde_connector_get_lp - helper accessor to retrieve LP state
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_LP property or 0
+ */
+static inline uint64_t sde_connector_get_lp(
+ struct drm_connector *connector)
+{
+ if (!connector || !connector->state)
+ return 0;
+ return sde_connector_get_property(connector->state,
+ CONNECTOR_PROP_LP);
+}
+
+/**
* sde_connector_init - create drm connector object for a given display
* @dev: Pointer to drm device struct
* @encoder: Pointer to associated encoder
@@ -394,8 +430,9 @@
/**
* sde_connector_complete_commit - signal completion of current commit
* @connector: Pointer to drm connector object
+ * @ts: timestamp to be updated in the fence signalling
*/
-void sde_connector_complete_commit(struct drm_connector *connector);
+void sde_connector_complete_commit(struct drm_connector *connector, ktime_t ts);
/**
* sde_connector_get_info - query display specific information
@@ -497,5 +534,15 @@
return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
}
-#endif /* _SDE_CONNECTOR_H_ */
+/**
+ * sde_connector_get_dither_cfg - get dither property data
+ * @conn: Pointer to drm_connector struct
+ * @state: Pointer to drm_connector_state struct
+ * @cfg: Pointer to pointer to dither cfg
+ * @len: length of the dither data
+ * Returns: Zero on success
+ */
+int sde_connector_get_dither_cfg(struct drm_connector *conn,
+ struct drm_connector_state *state, void **cfg, size_t *len);
+#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index cec2b5f..dfdfc1a 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -34,8 +34,12 @@
pr_debug("irq_idx=%d\n", irq_idx);
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
+ SDE_EVT32_IRQ(irq_idx, atomic_read(
+ &sde_kms->irq_obj.enable_counts[irq_idx]),
+ SDE_EVTLOG_ERROR);
+ }
atomic_inc(&irq_obj->irq_counts[irq_idx]);
@@ -53,7 +57,7 @@
* NOTE: sde_core_irq_callback_handler is protected by top-level
* spinlock, so it is safe to clear any interrupt status here.
*/
- sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr->ops.clear_intr_status_nolock(
sde_kms->hw_intr,
irq_idx);
}
@@ -94,7 +98,6 @@
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
@@ -107,26 +110,33 @@
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
/* empty callback list but interrupt is enabled */
if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
SDE_ERROR("irq_idx=%d enabled with no callback\n",
irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
}
- spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return ret;
}
int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i;
- int ret = 0;
+ int i, ret = 0, counts;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts) {
+ SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
+ __builtin_return_address(0), irq_idxs[0], counts);
+ SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
+ }
+
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
@@ -140,7 +150,6 @@
*/
static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
{
- unsigned long irq_flags;
int ret = 0;
if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
@@ -156,7 +165,6 @@
SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
- spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
SDE_EVT32(irq_idx,
atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
@@ -168,27 +176,48 @@
irq_idx);
SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
}
- spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
return ret;
}
int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
{
- int i;
- int ret = 0;
+ int i, ret = 0, counts;
if (!sde_kms || !irq_idxs || !irq_count) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ counts = atomic_read(&sde_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2) {
+ SDE_ERROR("%pS: irq_idx=%d enable_count=%d\n",
+ __builtin_return_address(0), irq_idxs[0], counts);
+ SDE_EVT32(irq_idxs[0], counts, SDE_EVTLOG_ERROR);
+ }
+
for (i = 0; (i < irq_count) && !ret; i++)
ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
return ret;
}
+u32 sde_core_irq_read_nolock(struct sde_kms *sde_kms, int irq_idx, bool clear)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ SDE_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return sde_kms->hw_intr->ops.get_intr_status_nolock(sde_kms->hw_intr,
+ irq_idx, clear);
+}
+
u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
{
if (!sde_kms || !sde_kms->hw_intr ||
@@ -210,12 +239,19 @@
{
unsigned long irq_flags;
- if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
- !sde_kms->irq_obj.irq_cb_tbl) {
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ if (!register_irq_cb || !register_irq_cb->func) {
+ SDE_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
@@ -238,12 +274,19 @@
{
unsigned long irq_flags;
- if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
- !sde_kms->irq_obj.irq_cb_tbl) {
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ if (!register_irq_cb || !register_irq_cb->func) {
+ SDE_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index c775f8c..c32c19c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -114,6 +114,18 @@
bool clear);
/**
+ * sde_core_irq_read - no lock version of sde_core_irq_read
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 sde_core_irq_read_nolock(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ bool clear);
+
+/**
* sde_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @sde_kms: SDE handle
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 71dfc12..7243fe2 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -225,8 +225,17 @@
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs +=
- tmp_cstate->new_perf.bw_ctl[i];
+ SDE_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 30bb72b..935dc12 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -55,9 +55,13 @@
static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
bool en, struct sde_irq_callback *ad_irq);
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc_drm,
+ bool en, struct sde_irq_callback *noirq);
+
static struct sde_crtc_custom_events custom_events[] = {
{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
- {DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler}
+ {DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
+ {DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
};
/* default input fence timeout, in ms */
@@ -979,7 +983,7 @@
* On certain HW, if using 2 LM, ROIs must be split evenly between the
* LMs and be of equal width.
*/
- if (sde_crtc->num_mixers == 1)
+ if (sde_crtc->num_mixers < 2)
return 0;
roi[0] = &crtc_state->lm_roi[0];
@@ -1202,6 +1206,11 @@
state->fb ? state->fb->base.id : -1);
format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ SDE_ERROR("invalid format\n");
+ return;
+ }
+
if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
@@ -1463,7 +1472,7 @@
sde_crtc->event = NULL;
DRM_DEBUG_VBL("%s: send event: %pK\n",
sde_crtc->name, event);
- SDE_EVT32(DRMID(crtc));
+ SDE_EVT32_VERBOSE(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, event);
}
}
@@ -1510,8 +1519,9 @@
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
- struct drm_encoder *encoder;
unsigned long flags;
+ bool frame_done = false;
+ int i;
if (!work) {
SDE_ERROR("invalid work handle\n");
@@ -1534,13 +1544,16 @@
return;
}
priv = sde_kms->dev->dev_private;
+ SDE_ATRACE_BEGIN("crtc_frame_event");
SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
ktime_to_ns(fevent->ts));
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+ SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_ENTRY);
+
+ if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&sde_crtc->frame_pending) < 1) {
/* this should not happen */
@@ -1563,44 +1576,39 @@
SDE_EVTLOG_FUNC_CASE3);
}
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
- (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) {
- bool signal_fence = true;
-
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- signal_fence &=
- sde_encoder_is_cmd_mode(encoder);
- }
-
- /* signal release fence only for cmd mode panels here */
- if (signal_fence) {
- sde_fence_signal(&sde_crtc->output_fence, 0);
- SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
- SDE_EVTLOG_FUNC_CASE4);
- }
-
- complete_all(&sde_crtc->frame_done_comp);
- }
-
- if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_DONE)
sde_core_perf_crtc_update(crtc, 0, false);
- } else {
- SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
- ktime_to_ns(fevent->ts),
- fevent->event);
- SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
+
+ if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) {
+ SDE_ATRACE_BEGIN("signal_release_fence");
+ sde_fence_signal(&sde_crtc->output_fence, fevent->ts, 0);
+ SDE_ATRACE_END("signal_release_fence");
+ }
+
+ if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE) {
+ SDE_ATRACE_BEGIN("signal_retire_fence");
+ for (i = 0; i < cstate->num_connectors; ++i)
+ sde_connector_complete_commit(cstate->connectors[i],
+ fevent->ts);
+ SDE_ATRACE_END("signal_retire_fence");
}
if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
SDE_ERROR("crtc%d ts:%lld received panel dead event\n",
crtc->base.id, ktime_to_ns(fevent->ts));
+ if (frame_done)
+ complete_all(&sde_crtc->frame_done_comp);
+
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+ SDE_ATRACE_END("crtc_frame_event");
}
static void sde_crtc_frame_event_cb(void *data, u32 event)
@@ -1643,40 +1651,6 @@
kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
}
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
-{
- struct sde_crtc *sde_crtc;
- struct sde_crtc_state *cstate;
- struct drm_encoder *encoder;
- int i;
- bool signal_fence = true;
-
- if (!crtc || !crtc->state) {
- SDE_ERROR("invalid crtc\n");
- return;
- }
-
- sde_crtc = to_sde_crtc(crtc);
- cstate = to_sde_crtc_state(crtc->state);
- SDE_EVT32_VERBOSE(DRMID(crtc));
-
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- signal_fence &= !sde_encoder_is_cmd_mode(encoder);
- }
-
- /* signal release fence for non-cmd mode panels */
- if (signal_fence)
- sde_fence_signal(&sde_crtc->output_fence, 0);
-
- /* signal retire fence */
- for (i = 0; i < cstate->num_connectors; ++i)
- sde_connector_complete_commit(cstate->connectors[i]);
-}
-
/**
* _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
* @cstate: Pointer to sde crtc state
@@ -1925,7 +1899,7 @@
cstate->lm_bounds[i].h = adj_mode->vdisplay;
memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
sizeof(cstate->lm_roi[i]));
- SDE_EVT32(DRMID(crtc), i,
+ SDE_EVT32_VERBOSE(DRMID(crtc), i,
cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
@@ -2045,6 +2019,16 @@
if (unlikely(!sde_crtc->num_mixers))
return;
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ sde_plane_restore(plane);
+
/* wait for acquire fences before anything else is done */
_sde_crtc_wait_for_fences(crtc);
@@ -2100,7 +2084,7 @@
/* destroy value helper */
msm_property_destroy_state(&sde_crtc->property_info, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state);
}
static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
@@ -2119,7 +2103,7 @@
return 0;
}
- SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+ SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
if (!ret) {
@@ -2128,7 +2112,7 @@
SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
rc = -ETIMEDOUT;
}
- SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
+ SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
return rc;
}
@@ -2141,6 +2125,7 @@
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_crtc_state *cstate;
+ int ret;
if (!crtc) {
SDE_ERROR("invalid argument\n");
@@ -2149,6 +2134,12 @@
dev = crtc->dev;
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+
priv = sde_kms->dev->dev_private;
cstate = to_sde_crtc_state(crtc->state);
@@ -2178,7 +2169,10 @@
}
/* wait for frame_event_done completion */
- if (_sde_crtc_wait_for_frame_done(crtc)) {
+ SDE_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _sde_crtc_wait_for_frame_done(crtc);
+ SDE_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&sde_crtc->frame_pending));
@@ -2309,17 +2303,17 @@
}
/*
- * If the vblank refcount != 0, release a power reference on suspend
- * and take it back during resume (if it is still != 0).
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
*/
if (sde_crtc->suspend == enable)
SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
crtc->base.id, enable);
- else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+ else if (sde_crtc->vblank_enable)
_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
sde_crtc->suspend = enable;
- msm_mode_object_event_nofity(&crtc->base, crtc->dev, &event,
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
(u8 *)&power_on);
mutex_unlock(&sde_crtc->crtc_lock);
}
@@ -2350,7 +2344,7 @@
/* duplicate value helper */
msm_property_duplicate_state(&sde_crtc->property_info,
old_cstate, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state, cstate->property_values);
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
@@ -2395,7 +2389,8 @@
/* reset value helper */
msm_property_reset_state(&sde_crtc->property_info, cstate,
- cstate->property_values, cstate->property_blobs);
+ &cstate->property_state,
+ cstate->property_values);
_sde_crtc_set_input_fence_timeout(cstate);
@@ -2410,25 +2405,15 @@
if (!sde_crtc) {
SDE_ERROR("invalid crtc\n");
return -EINVAL;
- } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
- SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, true);
- } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
- SDE_ERROR("crtc%d invalid vblank disable\n",
- sde_crtc->base.base.id);
- return -EINVAL;
- } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
- SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
- if (!sde_crtc->suspend)
- _sde_crtc_vblank_enable_nolock(sde_crtc, false);
- } else {
- SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
- sde_crtc->base.base.id,
- en ? "enable" : "disable",
- atomic_read(&sde_crtc->vblank_refcount));
}
+ if (!sde_crtc->base.enabled || sde_crtc->suspend)
+ SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->base.enabled, en,
+ sde_crtc->vblank_enable, sde_crtc->suspend);
+ else if (sde_crtc->vblank_enable != en)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, en);
+ sde_crtc->vblank_enable = en;
+
return 0;
}
@@ -2437,6 +2422,8 @@
struct drm_crtc *crtc = arg;
struct sde_crtc *sde_crtc;
struct drm_encoder *encoder;
+ struct drm_event event;
+ u32 power_on = 0;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
@@ -2456,7 +2443,13 @@
sde_encoder_virt_restore(encoder);
}
+ sde_cp_crtc_post_ipc(crtc);
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 1;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
} else if (event_type == SDE_POWER_EVENT_POST_DISABLE) {
struct drm_plane *plane;
@@ -2466,6 +2459,16 @@
*/
drm_atomic_crtc_for_each_plane(plane, crtc)
sde_plane_set_revalidate(plane, true);
+
+ sde_cp_crtc_suspend(crtc);
+
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 0;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
+ } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+ sde_cp_crtc_pre_ipc(crtc);
}
mutex_unlock(&sde_crtc->crtc_lock);
@@ -2503,14 +2506,12 @@
crtc->base.id,
atomic_read(&sde_crtc->frame_pending));
- if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
- SDE_ERROR("crtc%d invalid vblank refcount\n",
+ if (sde_crtc->vblank_enable && !sde_crtc->suspend) {
+ SDE_DEBUG("crtc%d vblank left enabled at disable time\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
- SDE_EVTLOG_FUNC_CASE1);
- while (atomic_read(&sde_crtc->vblank_refcount))
- if (_sde_crtc_vblank_no_lock(sde_crtc, false))
- break;
+ SDE_EVT32(DRMID(crtc), sde_crtc->vblank_enable,
+ SDE_EVTLOG_FUNC_CASE1);
+ _sde_crtc_vblank_enable_nolock(sde_crtc, false);
}
if (atomic_read(&sde_crtc->frame_pending)) {
@@ -2520,6 +2521,17 @@
atomic_set(&sde_crtc->frame_pending, 0);
}
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+ ret = 0;
+ if (node->func)
+ ret = node->func(crtc, false, &node->irq);
+ if (ret)
+ SDE_ERROR("%s failed to disable event %x\n",
+ sde_crtc->name, node->event);
+ }
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
sde_core_perf_crtc_update(crtc, 0, true);
drm_for_each_encoder(encoder, crtc->dev) {
@@ -2541,17 +2553,6 @@
cstate->bw_control = false;
cstate->bw_split_vote = false;
- spin_lock_irqsave(&sde_crtc->spin_lock, flags);
- list_for_each_entry(node, &sde_crtc->user_event_list, list) {
- ret = 0;
- if (node->func)
- ret = node->func(crtc, false, &node->irq);
- if (ret)
- SDE_ERROR("%s failed to disable event %x\n",
- sde_crtc->name, node->event);
- }
- spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
-
mutex_unlock(&sde_crtc->crtc_lock);
}
@@ -2581,6 +2582,16 @@
sde_crtc_frame_event_cb, (void *)crtc);
}
+ mutex_lock(&sde_crtc->crtc_lock);
+ if (sde_crtc->vblank_enable) {
+ /* honor user vblank request on crtc while it was disabled */
+ SDE_DEBUG("%s vblank found enabled at crtc enable time\n",
+ sde_crtc->name);
+ SDE_EVT32(DRMID(crtc), sde_crtc->vblank_enable);
+ _sde_crtc_vblank_enable_nolock(sde_crtc, true);
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
list_for_each_entry(node, &sde_crtc->user_event_list, list) {
ret = 0;
@@ -2594,7 +2605,8 @@
sde_crtc->power_event = sde_power_handle_register_event(
&priv->phandle,
- SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE,
+ SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
+ SDE_POWER_EVENT_PRE_DISABLE,
sde_crtc_handle_power_event, crtc, sde_crtc->name);
}
@@ -2634,7 +2646,7 @@
for (i = curr_cnt; i < cnt; i++) {
pstate = pstates[i].drm_pstate;
POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y,
- pstate->crtc_w, pstate->crtc_h, true);
+ pstate->crtc_w, pstate->crtc_h, false);
sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect);
if (intersect.w == excl_rect->w && intersect.h == excl_rect->h
@@ -2699,6 +2711,130 @@
return rc;
}
+static int _sde_crtc_find_plane_fb_modes(struct drm_crtc_state *state,
+ uint32_t *fb_ns,
+ uint32_t *fb_sec,
+ uint32_t *fb_ns_dir,
+ uint32_t *fb_sec_dir)
+{
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ struct sde_plane_state *sde_pstate;
+ uint32_t mode = 0;
+ int rc;
+
+ if (!state) {
+ SDE_ERROR("invalid state\n");
+ return -EINVAL;
+ }
+
+ *fb_ns = 0;
+ *fb_sec = 0;
+ *fb_ns_dir = 0;
+ *fb_sec_dir = 0;
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ SDE_ERROR("crtc%d failed to get plane%d state%d\n",
+ state->crtc->base.id,
+ plane->base.id, rc);
+ return rc;
+ }
+ sde_pstate = to_sde_plane_state(pstate);
+ mode = sde_plane_get_property(sde_pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+ switch (mode) {
+ case SDE_DRM_FB_NON_SEC:
+ (*fb_ns)++;
+ break;
+ case SDE_DRM_FB_SEC:
+ (*fb_sec)++;
+ break;
+ case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+ (*fb_ns_dir)++;
+ break;
+ case SDE_DRM_FB_SEC_DIR_TRANS:
+ (*fb_sec_dir)++;
+ break;
+ default:
+ SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
+ plane->base.id,
+ mode);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_encoder *encoder;
+ struct sde_crtc_state *cstate;
+ uint32_t secure;
+ uint32_t fb_ns = 0, fb_sec = 0, fb_ns_dir = 0, fb_sec_dir = 0;
+ int encoder_cnt = 0;
+ int rc;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ cstate = to_sde_crtc_state(state);
+
+ secure = sde_crtc_get_property(cstate,
+ CRTC_PROP_SECURITY_LEVEL);
+
+ rc = _sde_crtc_find_plane_fb_modes(state,
+ &fb_ns,
+ &fb_sec,
+ &fb_ns_dir,
+ &fb_sec_dir);
+ if (rc)
+ return rc;
+
+ /**
+ * validate planes
+ * fb_ns_dir is for secure display use case,
+ * fb_sec_dir is for secure camera preview use case,
+ * fb_sec is for secure video playback,
+ * fb_ns is for normal non secure use cases.
+ */
+ if (((secure == SDE_DRM_SEC_ONLY) &&
+ (fb_ns || fb_sec || fb_sec_dir)) ||
+ (fb_sec || fb_sec_dir)) {
+ SDE_ERROR(
+ "crtc%d: invalid planes fb_modes Sec:%d, NS:%d, Sec_Dir:%d, NS_Dir%d\n",
+ crtc->base.id,
+ fb_sec, fb_ns, fb_sec_dir,
+ fb_ns_dir);
+ return -EINVAL;
+ }
+
+ /**
+ * secure_crtc is not allowed in a shared toppolgy
+ * across different encoders.
+ */
+ if (fb_ns_dir || fb_sec_dir) {
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ encoder_cnt++;
+
+ if (encoder_cnt >
+ MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
+ SDE_ERROR(
+ "crtc%d, invalid virtual encoder crtc%d\n",
+ crtc->base.id,
+ encoder_cnt);
+ return -EINVAL;
+
+ }
+ }
+ SDE_DEBUG("crtc:%d Secure validation successful\n", crtc->base.id);
+ return 0;
+}
+
static int sde_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -2745,6 +2881,10 @@
_sde_crtc_setup_is_ppsplit(state);
_sde_crtc_setup_lm_bounds(crtc, state);
+ rc = _sde_crtc_check_secure_state(crtc, state);
+ if (rc)
+ return rc;
+
/* get plane state for all drm planes associated with crtc state */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (IS_ERR_OR_NULL(pstate)) {
@@ -2764,7 +2904,8 @@
/* check dim layer stage with every plane */
for (i = 0; i < cstate->num_dim_layers; i++) {
- if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
+ if (cstate->dim_layer[i].stage
+ == (pstates[cnt].stage + SDE_STAGE_0)) {
SDE_ERROR(
"plane:%d/dim_layer:%i-same stage:%d\n",
plane->base.id, i,
@@ -2805,8 +2946,10 @@
sde_plane_clear_multirect(pipe_staged[i]);
if (is_sde_plane_virtual(pipe_staged[i]->plane)) {
- SDE_ERROR("invalid use of virtual plane: %d\n",
+ SDE_ERROR(
+ "r1 only virt plane:%d not supported\n",
pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
goto end;
}
}
@@ -3001,6 +3144,10 @@
struct drm_device *dev;
struct sde_kms_info *info;
struct sde_kms *sde_kms;
+ static const struct drm_prop_enum_list e_secure_level[] = {
+ {SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+ {SDE_DRM_SEC_ONLY, "sec_only"},
+ };
SDE_DEBUG("\n");
@@ -3013,6 +3160,11 @@
dev = crtc->dev;
sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
if (!info) {
SDE_ERROR("failed to allocate info memory\n");
@@ -3074,6 +3226,11 @@
msm_property_install_volatile_range(&sde_crtc->property_info,
"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
+ msm_property_install_enum(&sde_crtc->property_info, "security_level",
+ 0x0, 0, e_secure_level,
+ ARRAY_SIZE(e_secure_level),
+ CRTC_PROP_SECURITY_LEVEL);
+
sde_kms_info_reset(info);
if (catalog->has_dim_layer) {
@@ -3168,8 +3325,7 @@
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
ret = msm_property_atomic_set(&sde_crtc->property_info,
- cstate->property_values, cstate->property_blobs,
- property, val);
+ &cstate->property_state, property, val);
if (!ret) {
idx = msm_property_index(&sde_crtc->property_info,
property);
@@ -3243,8 +3399,10 @@
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
+ struct drm_encoder *encoder;
int i, ret = -EINVAL;
bool conn_offset = 0;
+ bool is_cmd = true;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
@@ -3259,19 +3417,36 @@
break;
}
+ /**
+ * set the cmd flag only when all the encoders attached
+ * to the crtc are in cmd mode. Consider all other cases
+ * as video mode.
+ */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc == crtc)
+ is_cmd &= sde_encoder_is_cmd_mode(encoder);
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
uint32_t offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
+ /**
+ * set the offset to 0 only for cmd mode panels, so
+ * the release fence for the current frame can be
+ * triggered right after PP_DONE interrupt.
+ */
+ offset = is_cmd ? 0 : (offset + conn_offset);
+
ret = sde_fence_create(&sde_crtc->output_fence, val,
- offset + conn_offset);
+ offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
ret = msm_property_atomic_get(&sde_crtc->property_info,
- cstate->property_values,
- cstate->property_blobs, property, val);
+ &cstate->property_state,
+ property, val);
if (ret)
ret = sde_cp_crtc_get_property(crtc,
property, val);
@@ -3413,8 +3588,7 @@
sde_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_refcount:%d\n",
- atomic_read(&sde_crtc->vblank_refcount));
+ seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_enable);
mutex_unlock(&sde_crtc->crtc_lock);
@@ -3780,7 +3954,6 @@
crtc = &sde_crtc->base;
crtc->dev = dev;
- atomic_set(&sde_crtc->vblank_refcount, 0);
mutex_init(&sde_crtc->crtc_lock);
spin_lock_init(&sde_crtc->spin_lock);
@@ -3965,3 +4138,13 @@
{
return 0;
}
+
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *noirq)
+{
+ /*
+ * IRQ object noirq is not being used here since there is
+ * no crtc irq from pm event.
+ */
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0d72ff1..439aeac 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -30,7 +30,7 @@
#define SDE_CRTC_NAME_SIZE 12
/* define the maximum number of in-flight frame events */
-#define SDE_CRTC_FRAME_EVENT_SIZE 2
+#define SDE_CRTC_FRAME_EVENT_SIZE 4
/**
* enum sde_crtc_client_type: crtc client type
@@ -123,7 +123,7 @@
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_refcount : reference count for vblank enable request
+ * @vblank_enable : whether the user has requested vblank events
* @suspend : whether or not a suspend operation is in progress
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
@@ -171,7 +171,7 @@
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- atomic_t vblank_refcount;
+ bool vblank_enable;
bool suspend;
struct list_head feature_list;
@@ -269,9 +269,9 @@
* @lm_roi : Current LM ROI, possibly sub-rectangle of mode.
* Origin top left of CRTC.
* @user_roi_list : List of user's requested ROIs as from set property
+ * @property_state: Local storage for msm_prop properties
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
- * @property_blobs: Reference pointers for blob properties
* @num_dim_layers: Number of dim layers
* @dim_layer: Dim layer configs
* @new_perf: new performance state being requested
@@ -296,9 +296,9 @@
struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
struct msm_roi_list user_roi_list;
- uint64_t property_values[CRTC_PROP_COUNT];
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[CRTC_PROP_COUNT];
uint64_t input_fence_timeout_ns;
- struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
uint32_t num_dim_layers;
struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
@@ -320,7 +320,7 @@
* Returns: Integer value of requested property
*/
#define sde_crtc_get_property(S, X) \
- ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+ ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)].value) : 0)
static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
struct drm_display_mode *mode)
@@ -369,14 +369,6 @@
struct drm_crtc_state *old_state);
/**
- * sde_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
- */
-void sde_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state);
-
-/**
* sde_crtc_init - create a new crtc object
* @dev: sde device
* @plane: base plane
@@ -500,4 +492,21 @@
void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
const struct sde_rect **crtc_roi);
+/** sde_crt_get_secure_level - retrieve the secure level from the give state
+ * object, this is used to determine the secure state of the crtc
+ * @crtc : Pointer to drm crtc structure
+ * @usr: Pointer to drm crtc state
+ * return: secure_level
+ */
+static inline int sde_crtc_get_secure_level(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (!crtc || !state)
+ return -EINVAL;
+
+ return sde_crtc_get_property(to_sde_crtc_state(state),
+ CRTC_PROP_SECURITY_LEVEL);
+}
+
+
#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 09882cd..4ba2b75 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -17,6 +17,7 @@
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sde_rsc.h>
@@ -71,6 +72,7 @@
#define MISR_BUFF_SIZE 256
#define IDLE_TIMEOUT 64
+#define IDLE_SHORT_TIMEOUT 1
/**
* enum sde_enc_rc_events - events for resource control state machine
@@ -158,6 +160,7 @@
* @rsc_client: rsc client pointer
* @rsc_state_init: boolean to indicate rsc config init
* @disp_info: local copy of msm_display_info struct
+ * @mode_info: local copy of msm_mode_info struct
* @misr_enable: misr enable/disable status
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
@@ -201,12 +204,13 @@
struct sde_rsc_client *rsc_client;
bool rsc_state_init;
struct msm_display_info disp_info;
+ struct msm_mode_info mode_info;
bool misr_enable;
bool idle_pc_supported;
struct mutex rc_lock;
enum sde_enc_rc_states rc_state;
- struct delayed_work delayed_off_work;
+ struct kthread_delayed_work delayed_off_work;
struct msm_display_topology topology;
bool mode_set_complete;
@@ -227,7 +231,7 @@
return false;
sde_enc = to_sde_encoder_virt(drm_enc);
- comp_info = &sde_enc->disp_info.comp_info;
+ comp_info = &sde_enc->mode_info.comp_info;
return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
}
@@ -333,9 +337,9 @@
SDE_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
atomic_read(wait_info->atomic_cnt));
- SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx,
- atomic_read(wait_info->atomic_cnt),
- SDE_EVTLOG_FUNC_ENTRY);
+ SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);
ret = sde_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
@@ -348,9 +352,10 @@
if (irq_status) {
unsigned long flags;
- SDE_EVT32(DRMID(phys_enc->parent),
- irq->hw_idx,
- atomic_read(wait_info->atomic_cnt));
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
SDE_DEBUG_PHYS(phys_enc,
"done but irq %d not triggered\n",
irq->irq_idx);
@@ -360,13 +365,22 @@
ret = 0;
} else {
ret = -ETIMEDOUT;
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt), irq_status,
+ SDE_EVTLOG_ERROR);
}
} else {
ret = 0;
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
}
- SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx, ret,
- SDE_EVTLOG_FUNC_EXIT);
+ SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_EXIT);
return ret;
}
@@ -417,6 +431,9 @@
sde_core_irq_unregister_callback(phys_enc->sde_kms,
irq->irq_idx, &irq->cb);
+
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, SDE_EVTLOG_ERROR);
irq->irq_idx = -EINVAL;
return ret;
}
@@ -432,6 +449,7 @@
enum sde_intr_idx intr_idx)
{
struct sde_encoder_irq *irq;
+ int ret;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -440,17 +458,32 @@
irq = &phys_enc->irq[intr_idx];
/* silently skip irqs that weren't registered */
- if (irq->irq_idx < 0)
+ if (irq->irq_idx < 0) {
+ SDE_ERROR(
+ "extra unregister irq, enc%d intr_idx:0x%x hw_idx:0x%x irq_idx:0x%x\n",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, SDE_EVTLOG_ERROR);
return 0;
+ }
- sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
- sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
+ ret = sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
+ if (ret)
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret, SDE_EVTLOG_ERROR);
+
+ ret = sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
&irq->cb);
- irq->irq_idx = -EINVAL;
+ if (ret)
+ SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret, SDE_EVTLOG_ERROR);
SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
+ irq->irq_idx = -EINVAL;
+
return 0;
}
@@ -481,7 +514,7 @@
phys->ops.get_hw_resources(phys, hw_res, conn_state);
}
- hw_res->topology = sde_enc->topology;
+ hw_res->topology = sde_enc->mode_info.topology;
}
void sde_encoder_destroy(struct drm_encoder *drm_enc)
@@ -605,7 +638,7 @@
cur_mode->hdisplay == adj_mode->hdisplay &&
cur_mode->vrefresh == adj_mode->vrefresh) {
adj_mode->private = cur_mode->private;
- adj_mode->private_flags = cur_mode->private_flags;
+ adj_mode->private_flags |= cur_mode->private_flags;
}
}
}
@@ -843,7 +876,7 @@
struct sde_encoder_phys *enc_master = sde_enc->cur_master;
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
struct msm_display_dsc_info *dsc =
- &sde_enc->disp_info.comp_info.dsc_info;
+ &sde_enc->mode_info.comp_info.dsc_info;
if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
@@ -908,8 +941,8 @@
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- memcpy(&dsc[0], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[0]));
- memcpy(&dsc[1], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[1]));
+ memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+ memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
/*
* Since both DSC use same pic dimension, set same pic dimension
@@ -973,7 +1006,7 @@
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
struct msm_display_dsc_info *dsc =
- &sde_enc->disp_info.comp_info.dsc_info;
+ &sde_enc->mode_info.comp_info.dsc_info;
bool half_panel_partial_update;
int i;
@@ -1101,6 +1134,69 @@
return ret;
}
+static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
+ struct msm_display_info *disp_info, bool is_dummy)
+{
+ struct sde_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ struct msm_mode_info *mode_info;
+ int i;
+
+ if (!sde_enc || !disp_info) {
+ SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
+ sde_enc != NULL, disp_info != NULL);
+ return;
+ } else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
+ SDE_ERROR("invalid num phys enc %d/%d\n",
+ sde_enc->num_phys_encs,
+ (int) ARRAY_SIZE(sde_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &sde_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ hw_mdptop = sde_kms->hw_mdp;
+ if (!hw_mdptop) {
+ SDE_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ mode_info = &sde_enc->mode_info;
+ if (!mode_info) {
+ SDE_ERROR("invalid mode info\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = sde_enc->num_phys_encs;
+ vsync_cfg.frame_rate = mode_info->frame_rate;
+ if (is_dummy)
+ vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
+ else if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = SDE_VSYNC0_SOURCE_GPIO;
+ vsync_cfg.is_dummy = is_dummy;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
static int sde_encoder_update_rsc_client(
struct drm_encoder *drm_enc,
struct sde_encoder_rsc_config *config, bool enable)
@@ -1110,6 +1206,7 @@
struct sde_rsc_cmd_config rsc_config;
int ret;
struct msm_display_info *disp_info;
+ struct msm_mode_info *mode_info;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
@@ -1118,6 +1215,7 @@
sde_enc = to_sde_encoder_virt(drm_enc);
disp_info = &sde_enc->disp_info;
+ mode_info = &sde_enc->mode_info;
if (!sde_enc->rsc_client) {
SDE_DEBUG("rsc client not created\n");
@@ -1140,10 +1238,11 @@
if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
&& disp_info->is_primary) {
- rsc_config.fps = disp_info->frame_rate;
- rsc_config.vtotal = disp_info->vtotal;
- rsc_config.prefill_lines = disp_info->prefill_lines;
- rsc_config.jitter = disp_info->jitter;
+ rsc_config.fps = mode_info->frame_rate;
+ rsc_config.vtotal = mode_info->vtotal;
+ rsc_config.prefill_lines = mode_info->prefill_lines;
+ rsc_config.jitter_numer = mode_info->jitter_numer;
+ rsc_config.jitter_denom = mode_info->jitter_denom;
rsc_config.prefill_lines += config ?
config->inline_rotate_prefill : 0;
/* update it only once */
@@ -1216,6 +1315,9 @@
rsc_cfg.inline_rotate_prefill =
sde_crtc_get_inline_prefill(drm_enc->crtc);
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
+ false);
+
/* enable RSC */
sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
@@ -1224,6 +1326,14 @@
/* disable RSC */
sde_encoder_update_rsc_client(drm_enc, NULL, false);
+ /**
+ * this call is for hardware workaround on sdm845 and should
+ * not be removed without considering the design changes for
+ * sde rsc + command mode concurrency. It may lead to pp
+ * timeout due to vsync from panel for command mode panel.
+ */
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
+ true);
/* disable all the irq */
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys =
@@ -1247,13 +1357,25 @@
u32 sw_event)
{
bool schedule_off = false;
+ bool autorefresh_enabled = false;
+ unsigned int lp, idle_timeout;
struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
SDE_ERROR("invalid parameters\n");
return -EINVAL;
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ SDE_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
/*
* when idle_pc is not supported, process only KICKOFF and STOP
@@ -1266,13 +1388,14 @@
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc_supported:%d\n", sw_event,
sde_enc->idle_pc_supported);
- SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
switch (sw_event) {
case SDE_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work))
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1318,18 +1441,41 @@
return 0;
}
- /* schedule delayed off work */
- schedule_delayed_work(&sde_enc->delayed_off_work,
- msecs_to_jiffies(IDLE_TIMEOUT));
+ /* schedule delayed off work if autorefresh is disabled */
+ if (sde_enc->cur_master &&
+ sde_enc->cur_master->ops.is_autorefresh_enabled)
+ autorefresh_enabled =
+ sde_enc->cur_master->ops.is_autorefresh_enabled(
+ sde_enc->cur_master);
+
+ /* set idle timeout based on master connector's lp value */
+ if (sde_enc->cur_master)
+ lp = sde_connector_get_lp(
+ sde_enc->cur_master->connector);
+ else
+ lp = SDE_MODE_DPMS_ON;
+
+ if (lp == SDE_MODE_DPMS_LP2)
+ idle_timeout = IDLE_SHORT_TIMEOUT;
+ else
+ idle_timeout = IDLE_TIMEOUT;
+
+ if (!autorefresh_enabled)
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &sde_enc->delayed_off_work,
+ msecs_to_jiffies(idle_timeout));
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
- SDE_EVTLOG_FUNC_CASE2);
+ autorefresh_enabled,
+ idle_timeout, SDE_EVTLOG_FUNC_CASE2);
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
sw_event);
break;
case SDE_ENC_RC_EVENT_STOP:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work))
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work))
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1353,6 +1499,7 @@
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE3);
+
sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
mutex_unlock(&sde_enc->rc_lock);
@@ -1360,7 +1507,8 @@
case SDE_ENC_RC_EVENT_EARLY_WAKE_UP:
/* cancel delayed off work, if any */
- if (cancel_delayed_work_sync(&sde_enc->delayed_off_work)) {
+ if (kthread_cancel_delayed_work_sync(
+ &sde_enc->delayed_off_work)) {
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
sw_event);
schedule_off = true;
@@ -1398,7 +1546,9 @@
*/
if (schedule_off && !sde_crtc_frame_pending(drm_enc->crtc)) {
/* schedule delayed off work */
- schedule_delayed_work(&sde_enc->delayed_off_work,
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &sde_enc->delayed_off_work,
msecs_to_jiffies(IDLE_TIMEOUT));
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
sw_event);
@@ -1413,6 +1563,22 @@
if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
sw_event, sde_enc->rc_state);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state);
+ mutex_unlock(&sde_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (sde_enc->frame_busy_mask[0]) {
+ SDE_DEBUG_ENC(sde_enc,
+ "sw_event:%d, rc:%d frame pending\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state);
mutex_unlock(&sde_enc->rc_lock);
return 0;
}
@@ -1427,20 +1593,20 @@
break;
default:
+ SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
SDE_ERROR("unexpected sw_event: %d\n", sw_event);
break;
}
- SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
return 0;
}
-static void sde_encoder_off_work(struct work_struct *work)
+static void sde_encoder_off_work(struct kthread_work *work)
{
- struct delayed_work *dw = to_delayed_work(work);
- struct sde_encoder_virt *sde_enc = container_of(dw,
- struct sde_encoder_virt, delayed_off_work);
+ struct sde_encoder_virt *sde_enc = container_of(work,
+ struct sde_encoder_virt, delayed_off_work.work);
if (!sde_enc) {
SDE_ERROR("invalid sde encoder\n");
@@ -1492,7 +1658,7 @@
sde_conn = to_sde_connector(conn);
if (sde_conn) {
- ret = sde_conn->ops.get_topology(adj_mode, &sde_enc->topology,
+ ret = sde_conn->ops.get_mode_info(adj_mode, &sde_enc->mode_info,
sde_kms->catalog->max_mixer_width);
if (ret) {
SDE_ERROR_ENC(sde_enc,
@@ -1550,15 +1716,18 @@
struct sde_encoder_virt *sde_enc = NULL;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
- struct sde_hw_mdp *hw_mdptop;
- int i = 0;
- struct sde_watchdog_te_status te_cfg = { 0 };
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
return;
}
+
priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
sde_enc = to_sde_encoder_virt(drm_enc);
if (!sde_enc || !sde_enc->cur_master) {
@@ -1566,35 +1735,13 @@
return;
}
- sde_kms = to_sde_kms(priv->kms);
- hw_mdptop = sde_kms->hw_mdp;
-
- if (!hw_mdptop) {
- SDE_ERROR("invalid mdptop\n");
- return;
- }
-
- sde_kms = to_sde_kms(priv->kms);
- if (!sde_kms) {
- SDE_ERROR("invalid sde_kms\n");
- return;
- }
-
if (sde_enc->cur_master->hw_mdptop &&
sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
sde_enc->cur_master->hw_mdptop,
sde_kms->catalog);
- if (hw_mdptop->ops.setup_vsync_sel) {
- for (i = 0; i < sde_enc->num_phys_encs; i++)
- te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
-
- te_cfg.pp_count = sde_enc->num_phys_encs;
- te_cfg.frame_rate = sde_enc->disp_info.frame_rate;
- hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
- sde_enc->disp_info.is_te_using_watchdog_timer);
- }
+ _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
@@ -1628,16 +1775,21 @@
{
struct sde_encoder_virt *sde_enc = NULL;
int i, ret = 0;
+ struct msm_compression_info *comp_info = NULL;
+ struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ comp_info = &sde_enc->mode_info.comp_info;
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc));
+ cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
+
sde_enc->cur_master = NULL;
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -1664,11 +1816,28 @@
for (i = 0; i < sde_enc->num_phys_encs; i++) {
struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- if (phys && (phys != sde_enc->cur_master) && phys->ops.enable)
- phys->ops.enable(phys);
+ if (!phys)
+ continue;
+
+ phys->comp_type = comp_info->comp_type;
+ if (phys != sde_enc->cur_master) {
+ /**
+ * on DMS request, the encoder will be enabled
+ * already. Invoke restore to reconfigure the
+ * new mode.
+ */
+ if (msm_is_mode_seamless_dms(cur_mode) &&
+ phys->ops.restore)
+ phys->ops.restore(phys);
+ else if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
}
- if (sde_enc->cur_master->ops.enable)
+ if (msm_is_mode_seamless_dms(cur_mode) &&
+ sde_enc->cur_master->ops.restore)
+ sde_enc->cur_master->ops.restore(sde_enc->cur_master);
+ else if (sde_enc->cur_master->ops.enable)
sde_enc->cur_master->ops.enable(sde_enc->cur_master);
_sde_encoder_virt_enable_helper(drm_enc);
@@ -1709,15 +1878,15 @@
}
}
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+ sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&sde_enc->frame_done_timer);
}
- if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
- sde_enc->cur_master->ops.disable(sde_enc->cur_master);
-
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
if (sde_enc->cur_master) {
@@ -1847,21 +2016,41 @@
struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
unsigned int i;
- /* One of the physical encoders has become idle */
- for (i = 0; i < sde_enc->num_phys_encs; i++)
- if (sde_enc->phys_encs[i] == ready_phys) {
- clear_bit(i, sde_enc->frame_busy_mask);
- SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
- sde_enc->frame_busy_mask[0]);
+ if (event & (SDE_ENCODER_FRAME_EVENT_DONE
+ | SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!sde_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx);
+ return;
}
- if (!sde_enc->frame_busy_mask[0]) {
- atomic_set(&sde_enc->frame_done_timeout, 0);
- del_timer(&sde_enc->frame_done_timer);
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ if (sde_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, sde_enc->frame_busy_mask);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
+ sde_enc->frame_busy_mask[0]);
+ }
+ }
- sde_encoder_resource_control(drm_enc,
- SDE_ENC_RC_EVENT_FRAME_DONE);
+ if (!sde_enc->frame_busy_mask[0]) {
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ del_timer(&sde_enc->frame_done_timer);
+ sde_encoder_resource_control(drm_enc,
+ SDE_ENC_RC_EVENT_FRAME_DONE);
+
+ if (sde_enc->crtc_frame_event_cb)
+ sde_enc->crtc_frame_event_cb(
+ sde_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
if (sde_enc->crtc_frame_event_cb)
sde_enc->crtc_frame_event_cb(
sde_enc->crtc_frame_event_cb_data, event);
@@ -1902,6 +2091,9 @@
pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+ if (phys->ops.is_master && phys->ops.is_master(phys))
+ atomic_inc(&phys->pending_retire_fence_cnt);
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1971,7 +2163,7 @@
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
- SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+ SDE_EVT32_VERBOSE(drm_id, hw_id, rc, time, expected_time,
atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
@@ -2011,7 +2203,8 @@
if (rc) {
SDE_ERROR_ENC(sde_enc,
"connector soft reset failure\n");
- SDE_DBG_DUMP("panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus",
+ "panic");
}
}
}
@@ -2019,7 +2212,7 @@
rc = ctl->ops.reset(ctl);
if (rc) {
SDE_ERROR_ENC(sde_enc, "ctl %d reset failure\n", ctl->idx);
- SDE_DBG_DUMP("panic");
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
phys_enc->enable_state = SDE_ENC_ENABLED;
@@ -2263,6 +2456,27 @@
}
}
+static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
+{
+ void *dither_cfg;
+ int ret = 0;
+ size_t len = 0;
+ enum sde_rm_topology_name topology;
+
+ if (!phys || !phys->connector || !phys->hw_pp ||
+ !phys->hw_pp->ops.setup_dither)
+ return;
+ topology = sde_connector_get_topology_name(phys->connector);
+ if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) &&
+ (phys->split_role == ENC_ROLE_SLAVE))
+ return;
+
+ ret = sde_connector_get_dither_cfg(phys->connector,
+ phys->connector->state, &dither_cfg, &len);
+ if (!ret)
+ phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
+}
+
void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
struct sde_encoder_kickoff_params *params)
{
@@ -2282,6 +2496,7 @@
SDE_EVT32(DRMID(drm_enc));
/* prepare for next kickoff, may include waiting on previous kickoff */
+ SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys) {
@@ -2289,8 +2504,10 @@
phys->ops.prepare_for_kickoff(phys, params);
if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
+ _sde_encoder_setup_dither(phys);
}
}
+ SDE_ATRACE_END("enc_prepare_for_kickoff");
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
@@ -2816,8 +3033,6 @@
SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
- phys_params.comp_type = disp_info->comp_info.comp_type;
-
if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
sde_enc->idle_pc_supported = sde_kms->catalog->has_idle_pc;
@@ -2981,7 +3196,8 @@
}
mutex_init(&sde_enc->rc_lock);
- INIT_DELAYED_WORK(&sde_enc->delayed_off_work, sde_encoder_off_work);
+ kthread_init_delayed_work(&sde_enc->delayed_off_work,
+ sde_encoder_off_work);
memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
@@ -3024,7 +3240,9 @@
};
if (phys && fn_wait) {
+ SDE_ATRACE_BEGIN("wait_for_completion_event");
ret = fn_wait(phys);
+ SDE_ATRACE_END("wait_for_completion_event");
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 9c2d3e9..3dae994 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -24,9 +24,11 @@
#include "msm_prop.h"
#include "sde_hw_mdss.h"
-#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
-#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
-#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE BIT(3)
+#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE BIT(4)
/**
* Encoder functions and data types
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6e6960a..7170d55 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -125,6 +125,8 @@
* @irq_control: Handler to enable/disable all the encoder IRQs
* @update_split_role: Update the split role of the phys enc
* @restore: Restore all the encoder configs.
+ * @is_autorefresh_enabled: provides the autorefresh current
+ * enable/disable state.
*/
struct sde_encoder_phys_ops {
@@ -164,6 +166,7 @@
void (*update_split_role)(struct sde_encoder_phys *phys_enc,
enum sde_enc_split_role role);
void (*restore)(struct sde_encoder_phys *phys);
+ bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
};
/**
@@ -172,6 +175,8 @@
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning
+ * autorefresh has triggered a double buffer flip
*/
enum sde_intr_idx {
INTR_IDX_VSYNC,
@@ -179,6 +184,7 @@
INTR_IDX_UNDERRUN,
INTR_IDX_CTL_START,
INTR_IDX_RDPTR,
+ INTR_IDX_AUTOREFRESH_DONE,
INTR_IDX_MAX,
};
@@ -232,6 +238,8 @@
* scheduled. Decremented in irq handler
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
+ * @pending_retire_fence_cnt: Atomic counter tracking the pending retire
+ * fences that have to be signalled.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
* @irq: IRQ tracking structures
*/
@@ -258,6 +266,7 @@
atomic_t underrun_cnt;
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
+ atomic_t pending_retire_fence_cnt;
wait_queue_head_t pending_kickoff_wq;
struct sde_encoder_irq irq[INTR_IDX_MAX];
};
@@ -284,6 +293,18 @@
};
/**
+ * struct sde_encoder_phys_cmd_autorefresh - autorefresh state tracking
+ * @cfg: current active autorefresh configuration
+ * @kickoff_cnt: atomic count tracking autorefresh done irq kickoffs pending
+ * @kickoff_wq: wait queue for waiting on autorefresh done irq
+ */
+struct sde_encoder_phys_cmd_autorefresh {
+ struct sde_hw_autorefresh cfg;
+ atomic_t kickoff_cnt;
+ wait_queue_head_t kickoff_wq;
+};
+
+/**
* struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
@@ -292,12 +313,19 @@
* @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
* after ctl_start instead of before next frame kickoff
* @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
+ * signaled at the next rd_ptr_irq
+ * @rd_ptr_timestamp: last rd_ptr_irq timestamp
+ * @autorefresh: autorefresh feature state
*/
struct sde_encoder_phys_cmd {
struct sde_encoder_phys base;
int stream_sel;
bool serialize_wait4pp;
int pp_timeout_report_cnt;
+ struct sde_encoder_phys_cmd_autorefresh autorefresh;
+ atomic_t pending_rd_ptr_cnt;
+ ktime_t rd_ptr_timestamp;
};
/**
@@ -316,7 +344,7 @@
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -338,7 +366,7 @@
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 447fdcc..fc723a6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -17,6 +17,7 @@
#include "sde_hw_interrupts.h"
#include "sde_core_irq.h"
#include "sde_formats.h"
+#include "sde_trace.h"
#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
@@ -44,6 +45,22 @@
#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+#define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+/*
+ * Threshold for signalling retire fences in cases where
+ * CTL_START_IRQ is received just after RD_PTR_IRQ
+ */
+#define SDE_ENC_CTL_START_THRESHOLD_US 500
+
+static inline int _sde_encoder_phys_cmd_get_idle_timeout(
+ struct sde_encoder_phys_cmd *cmd_enc)
+{
+ return cmd_enc->autorefresh.cfg.frame_count ?
+ cmd_enc->autorefresh.cfg.frame_count *
+ KICKOFF_TIMEOUT_MS : KICKOFF_TIMEOUT_MS;
+}
+
static inline bool sde_encoder_phys_cmd_is_master(
struct sde_encoder_phys *phys_enc)
{
@@ -60,6 +77,52 @@
return true;
}
+static uint64_t _sde_encoder_phys_cmd_get_autorefresh_property(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct drm_connector *conn = phys_enc->connector;
+
+ if (!conn || !conn->state)
+ return 0;
+
+ return sde_connector_get_property(conn->state,
+ CONNECTOR_PROP_AUTOREFRESH);
+}
+
+static void _sde_encoder_phys_cmd_config_autorefresh(
+ struct sde_encoder_phys *phys_enc,
+ u32 new_frame_count)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ struct drm_connector *conn = phys_enc->connector;
+ struct sde_hw_autorefresh *cfg_cur, cfg_nxt;
+
+ if (!conn || !conn->state || !hw_pp)
+ return;
+
+ cfg_cur = &cmd_enc->autorefresh.cfg;
+
+ /* autorefresh property value should be validated already */
+ memset(&cfg_nxt, 0, sizeof(cfg_nxt));
+ cfg_nxt.frame_count = new_frame_count;
+ cfg_nxt.enable = (cfg_nxt.frame_count != 0);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "autorefresh state %d->%d framecount %d\n",
+ cfg_cur->enable, cfg_nxt.enable, cfg_nxt.frame_count);
+ SDE_EVT32(DRMID(phys_enc->parent), hw_pp->idx, cfg_cur->enable,
+ cfg_nxt.enable, cfg_nxt.frame_count);
+
+ /* only proceed on state changes */
+ if (cfg_nxt.enable == cfg_cur->enable)
+ return;
+
+ memcpy(cfg_cur, &cfg_nxt, sizeof(*cfg_cur));
+ if (hw_pp->ops.setup_autorefresh)
+ hw_pp->ops.setup_autorefresh(hw_pp, cfg_cur);
+}
+
static void _sde_encoder_phys_cmd_update_flush_mask(
struct sde_encoder_phys *phys_enc)
{
@@ -104,58 +167,146 @@
struct sde_encoder_phys *phys_enc = arg;
unsigned long lock_flags;
int new_cnt;
+ u32 event = SDE_ENCODER_FRAME_EVENT_DONE |
+ SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_pp)
return;
+ SDE_ATRACE_BEGIN("pp_done_irq");
/* notify all synchronous clients first, then asynchronous clients */
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+ phys_enc, event);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
SDE_EVT32_IRQ(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+ phys_enc->hw_pp->idx - PINGPONG_0, new_cnt, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("pp_done_irq");
+}
+
+static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys *phys_enc = arg;
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ unsigned long lock_flags;
+ int new_cnt;
+
+ if (!cmd_enc)
+ return;
+
+ phys_enc = &cmd_enc->base;
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&cmd_enc->autorefresh.kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&cmd_enc->autorefresh.kickoff_wq);
}
static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys *phys_enc = arg;
+ struct sde_encoder_phys_cmd *cmd_enc;
+ u32 event = 0;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_pp)
return;
+ SDE_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+ /**
+ * signal only for master, when the ctl_start irq is
+ * done and incremented the pending_rd_ptr_cnt.
+ */
+ if (sde_encoder_phys_cmd_is_master(phys_enc)
+ && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0)
+ && atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0)) {
+
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc, event);
+ }
+
SDE_EVT32_IRQ(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+ phys_enc->hw_pp->idx - PINGPONG_0, event, 0xfff);
if (phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
phys_enc);
+
+ cmd_enc->rd_ptr_timestamp = ktime_get();
+
+ SDE_ATRACE_END("rd_ptr_irq");
}
static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
{
struct sde_encoder_phys *phys_enc = arg;
+ struct sde_encoder_phys_cmd *cmd_enc;
struct sde_hw_ctl *ctl;
+ u32 event = 0;
+ s64 time_diff_us;
- if (!phys_enc)
+ if (!phys_enc || !phys_enc->hw_ctl)
return;
- if (!phys_enc->hw_ctl)
- return;
+ SDE_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
ctl = phys_enc->hw_ctl;
- SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+ time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
+
+ /* handle retire fence based on only master */
+ if (sde_encoder_phys_cmd_is_master(phys_enc)
+ && atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+ /**
+ * Handle rare cases where the ctl_start_irq is received
+ * after rd_ptr_irq. If it falls within a threshold, it is
+ * guaranteed the frame would be picked up in the current TE.
+ * Signal retire fence immediately in such case.
+ */
+ if ((time_diff_us <= SDE_ENC_CTL_START_THRESHOLD_US)
+ && atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0)) {
+
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc, event);
+
+ /**
+ * In ideal cases, ctl_start_irq is received before the
+ * rd_ptr_irq, so set the atomic flag to indicate the event
+ * and rd_ptr_irq will handle signalling the retire fence
+ */
+ } else {
+ atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+ }
+ }
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0,
+ time_diff_us, event, 0xfff);
+
/* Signal any waiting ctl start interrupt */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("ctl_start_irq");
}
static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
@@ -190,6 +341,10 @@
irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
irq->hw_idx = phys_enc->intf_idx;
irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
}
static void sde_encoder_phys_cmd_mode_set(
@@ -249,7 +404,8 @@
{
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR;
+ u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
bool do_log = false;
cmd_enc->pp_timeout_report_cnt++;
@@ -262,7 +418,8 @@
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
- atomic_read(&phys_enc->pending_kickoff_cnt));
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
/* to avoid flooding, only log first time, and "dead" time */
if (do_log) {
@@ -275,9 +432,8 @@
SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
- SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
- "dsi1_phy", "vbif", "dbg_bus",
- "vbif_dbg_bus", "panic");
+ sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -302,6 +458,73 @@
phys_enc->split_role == ENC_ROLE_SLAVE;
}
+static int _sde_encoder_phys_cmd_poll_write_pointer_started(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ struct sde_hw_pp_vsync_info info;
+ u32 timeout_us = SDE_ENC_WR_PTR_START_TIMEOUT_US;
+ int ret;
+
+ if (!hw_pp || !hw_pp->ops.get_vsync_info ||
+ !hw_pp->ops.poll_timeout_wr_ptr)
+ return 0;
+
+ ret = hw_pp->ops.get_vsync_info(hw_pp, &info);
+ if (ret)
+ return ret;
+
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "pp:%d rd_ptr %d wr_ptr %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ info.rd_ptr_line_count,
+ info.wr_ptr_line_count);
+ SDE_EVT32_VERBOSE(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ info.wr_ptr_line_count);
+
+ ret = hw_pp->ops.poll_timeout_wr_ptr(hw_pp, timeout_us);
+ if (ret) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ timeout_us,
+ ret);
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+
+ return ret;
+}
+
+static bool _sde_encoder_phys_cmd_is_ongoing_pptx(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_hw_pp_vsync_info info;
+
+ if (!phys_enc)
+ return false;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp || !hw_pp->ops.get_vsync_info)
+ return false;
+
+ hw_pp->ops.get_vsync_info(hw_pp, &info);
+
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ info.wr_ptr_line_count,
+ phys_enc->cached_mode.vdisplay);
+
+ if (info.wr_ptr_line_count > 0 && info.wr_ptr_line_count <
+ phys_enc->cached_mode.vdisplay)
+ return true;
+
+ return false;
+}
+
static int _sde_encoder_phys_cmd_wait_for_idle(
struct sde_encoder_phys *phys_enc)
{
@@ -333,6 +556,42 @@
return ret;
}
+static int _sde_encoder_phys_cmd_wait_for_autorefresh_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_encoder_wait_info wait_info;
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* only master deals with autorefresh */
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
+
+ wait_info.wq = &cmd_enc->autorefresh.kickoff_wq;
+ wait_info.atomic_cnt = &cmd_enc->autorefresh.kickoff_cnt;
+ wait_info.timeout_ms = _sde_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ /* wait for autorefresh kickoff to start */
+ ret = sde_encoder_helper_wait_for_irq(phys_enc,
+ INTR_IDX_AUTOREFRESH_DONE, &wait_info);
+
+ /* double check that kickoff has started by reading write ptr reg */
+ if (!ret)
+ ret = _sde_encoder_phys_cmd_poll_write_pointer_started(
+ phys_enc);
+ else
+ sde_encoder_helper_report_irq_timeout(phys_enc,
+ INTR_IDX_AUTOREFRESH_DONE);
+
+ return ret;
+}
+
static int sde_encoder_phys_cmd_control_vblank_irq(
struct sde_encoder_phys *phys_enc,
bool enable)
@@ -382,19 +641,28 @@
cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
if (enable) {
sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
- if (sde_encoder_phys_cmd_is_master(phys_enc))
+ if (sde_encoder_phys_cmd_is_master(phys_enc)) {
sde_encoder_helper_register_irq(phys_enc,
INTR_IDX_CTL_START);
- } else {
+ sde_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_AUTOREFRESH_DONE);
+ }
- if (sde_encoder_phys_cmd_is_master(phys_enc))
+ } else {
+ if (sde_encoder_phys_cmd_is_master(phys_enc)) {
sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_CTL_START);
+ sde_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_AUTOREFRESH_DONE);
+ }
sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
@@ -408,7 +676,7 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
struct sde_hw_tear_check tc_cfg = { 0 };
- struct drm_display_mode *mode = &phys_enc->cached_mode;
+ struct drm_display_mode *mode;
bool tc_enable = true;
u32 vsync_hz;
struct msm_drm_private *priv;
@@ -418,6 +686,7 @@
SDE_ERROR("invalid encoder\n");
return;
}
+ mode = &phys_enc->cached_mode;
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
@@ -428,7 +697,12 @@
}
sde_kms = phys_enc->sde_kms;
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device\n");
+ return;
+ }
priv = sde_kms->dev->dev_private;
+
/*
* TE default: dsi byte clock calculated base on 70 fps;
* around 14 ms to complete a kickoff cycle if te disabled;
@@ -439,13 +713,17 @@
* frequency divided by the no. of rows (lines) in the LCDpanel.
*/
vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
- if (!vsync_hz) {
- SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
+ if (!vsync_hz || !mode->vtotal || !mode->vrefresh) {
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "invalid params - vsync_hz %u vtot %u vrefresh %u\n",
+ vsync_hz, mode->vtotal, mode->vrefresh);
return;
}
tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
- tc_cfg.hw_vsync_mode = 1;
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
/*
* By setting sync_cfg_height to near max register value, we essentially
@@ -486,8 +764,8 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- if (!phys_enc || !phys_enc->hw_ctl ||
- !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
return;
}
@@ -516,7 +794,7 @@
struct sde_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->hw_ctl) {
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
return;
}
@@ -546,10 +824,11 @@
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid phys encoder\n");
return;
}
+
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
if (phys_enc->enable_state == SDE_ENC_ENABLED) {
@@ -561,13 +840,48 @@
phys_enc->enable_state = SDE_ENC_ENABLED;
}
+static bool sde_encoder_phys_cmd_is_autorefresh_enabled(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_hw_autorefresh cfg;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return 0;
+
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_autorefresh)
+ return 0;
+
+ ret = hw_pp->ops.get_autorefresh(hw_pp, &cfg);
+ if (ret)
+ return 0;
+
+ return cfg.enable;
+}
+
+static void _sde_encoder_phys_cmd_connect_te(
+ struct sde_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ SDE_EVT32(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_cmd *cmd_enc =
to_sde_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return;
}
@@ -621,6 +935,12 @@
SDE_ERROR("invalid encoder\n");
return;
}
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ SDE_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
SDE_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
@@ -633,12 +953,15 @@
to_sde_encoder_phys_cmd(phys_enc);
int ret;
- if (!phys_enc) {
+ if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return;
}
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
- SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ atomic_read(&cmd_enc->autorefresh.kickoff_cnt));
/*
* Mark kickoff request as outstanding. If there are more than one,
@@ -652,6 +975,10 @@
phys_enc->hw_pp->idx - PINGPONG_0);
SDE_ERROR("failed wait_for_idle: %d\n", ret);
}
+
+ SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
}
static int _sde_encoder_phys_cmd_wait_for_ctl_start(
@@ -722,6 +1049,10 @@
if (sde_encoder_phys_cmd_is_master(phys_enc))
rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+ if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) &&
+ cmd_enc->autorefresh.cfg.enable)
+ rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc);
+
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
@@ -765,6 +1096,86 @@
static void sde_encoder_phys_cmd_prepare_commit(
struct sde_encoder_phys *phys_enc)
{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ unsigned long lock_flags;
+
+ if (!phys_enc)
+ return;
+
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ cmd_enc->autorefresh.cfg.enable);
+
+ if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
+ return;
+
+ /**
+ * Autorefresh must be disabled carefully:
+ * - Autorefresh must be disabled between pp_done and te
+ * signal prior to sdm845 targets. All targets after sdm845
+ * supports autorefresh disable without turning off the
+ * hardware TE and pp_done wait.
+ *
+ * - Wait for TX to Complete
+ * Wait for PPDone confirms the last frame transfer is complete.
+ *
+ * - Leave Autorefresh Disabled
+ * - Assume disable of Autorefresh since it is now safe
+ * - Can now safely Disable Encoder, do debug printing, etc.
+ * without worrying that Autorefresh will kickoff
+ */
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+ _sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
+
+ /* check for outstanding TX */
+ if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc))
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* wait for ppdone if necessary due to catching ongoing TX */
+ if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc))
+ SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n");
+}
+
+static void sde_encoder_phys_cmd_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _sde_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void sde_encoder_phys_cmd_trigger_start(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ u32 frame_cnt;
+
+ if (!phys_enc)
+ return;
+
+ /* we don't issue CTL_START when using autorefresh */
+ frame_cnt = _sde_encoder_phys_cmd_get_autorefresh_property(phys_enc);
+ if (frame_cnt) {
+ _sde_encoder_phys_cmd_config_autorefresh(phys_enc, frame_cnt);
+ atomic_inc(&cmd_enc->autorefresh.kickoff_cnt);
+ } else {
+ sde_encoder_helper_trigger_start(phys_enc);
+ }
}
static void sde_encoder_phys_cmd_init_ops(
@@ -782,12 +1193,15 @@
ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete;
- ops->trigger_start = sde_encoder_helper_trigger_start;
+ ops->trigger_start = sde_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
ops->hw_reset = sde_encoder_helper_hw_reset;
ops->irq_control = sde_encoder_phys_cmd_irq_control;
ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
ops->restore = sde_encoder_phys_cmd_enable_helper;
+ ops->is_autorefresh_enabled =
+ sde_encoder_phys_cmd_is_autorefresh_enabled;
+ ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
}
struct sde_encoder_phys *sde_encoder_phys_cmd_init(
@@ -860,10 +1274,20 @@
irq->intr_idx = INTR_IDX_UNDERRUN;
irq->cb.func = sde_encoder_phys_cmd_underrun_irq;
+ irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
+ irq->name = "autorefresh_done";
+ irq->intr_type = SDE_IRQ_TYPE_PING_PONG_AUTO_REF;
+ irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE;
+ irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq;
+
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
+ atomic_set(&cmd_enc->pending_rd_ptr_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0);
+ init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq);
SDE_DEBUG_CMDENC(cmd_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 007738a6..933e4812 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -16,6 +16,7 @@
#include "sde_core_irq.h"
#include "sde_formats.h"
#include "dsi_display.h"
+#include "sde_trace.h"
#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
@@ -49,6 +50,23 @@
struct intf_timing_params *timing)
{
memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ SDE_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ SDE_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
/*
* https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
* Active Region Front Porch Sync Back Porch
@@ -139,6 +157,15 @@
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
u32 actual_vfp_lines = 0;
+ if (worst_case_needed_lines < start_of_frame_lines) {
+ needed_vfp_lines = 0;
+ SDE_ERROR("invalid params - needed_lines:%d, frame_lines:%d\n",
+ worst_case_needed_lines, start_of_frame_lines);
+ } else {
+ needed_vfp_lines = worst_case_needed_lines
+ - start_of_frame_lines;
+ }
+
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
SDE_DEBUG_VIDENC(vid_enc,
@@ -352,11 +379,25 @@
unsigned long lock_flags;
u32 flush_register = 0;
int new_cnt = -1, old_cnt = -1;
+ u32 event = 0;
if (!phys_enc)
return;
hw_ctl = phys_enc->hw_ctl;
+ SDE_ATRACE_BEGIN("vblank_irq");
+
+ /* signal only for master, where there is a pending kickoff */
+ if (sde_encoder_phys_vid_is_master(phys_enc)
+ && atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0)) {
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+ }
if (phys_enc->parent_ops.handle_vblank_virt)
phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
@@ -379,10 +420,11 @@
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
- old_cnt, new_cnt, flush_register);
+ old_cnt, new_cnt, flush_register, event);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+ SDE_ATRACE_END("vblank_irq");
}
static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
@@ -442,15 +484,18 @@
struct sde_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->sde_kms) {
- SDE_ERROR("invalid encoder\n");
+ SDE_ERROR("invalid encoder/kms\n");
return;
}
rm = &phys_enc->sde_kms->rm;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
- phys_enc->cached_mode = *adj_mode;
- SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
- drm_mode_debug_printmodeline(adj_mode);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
@@ -615,13 +660,18 @@
static int sde_encoder_phys_vid_wait_for_vblank(
struct sde_encoder_phys *phys_enc, bool notify)
{
- struct sde_encoder_wait_info wait_info = {
- .wq = &phys_enc->pending_kickoff_wq,
- .atomic_cnt = &phys_enc->pending_kickoff_cnt,
- .timeout_ms = KICKOFF_TIMEOUT_MS,
- };
+ struct sde_encoder_wait_info wait_info;
int ret;
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
if (!sde_encoder_phys_vid_is_master(phys_enc)) {
/* signal done for slave video encoder, unless it is pp-split */
if (!_sde_encoder_phys_is_ppsplit(phys_enc) &&
@@ -649,11 +699,7 @@
static int sde_encoder_phys_vid_wait_for_commit_done(
struct sde_encoder_phys *phys_enc)
{
- int ret;
-
- ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
-
- return ret;
+ return sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
}
static void sde_encoder_phys_vid_prepare_for_kickoff(
@@ -682,7 +728,8 @@
if (rc) {
SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
- SDE_DBG_DUMP("panic");
+ sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
@@ -912,6 +959,7 @@
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = SDE_ENC_DISABLED;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 54c1397..2b736e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -250,7 +250,8 @@
struct sde_hw_wb_cfg *wb_cfg;
struct sde_hw_wb_cdp_cfg *cdp_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
SDE_ERROR("invalid encoder\n");
@@ -264,9 +265,9 @@
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -288,7 +289,7 @@
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -297,7 +298,7 @@
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -643,6 +644,7 @@
struct sde_encoder_phys_wb *wb_enc = arg;
struct sde_encoder_phys *phys_enc = &wb_enc->base;
struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ u32 event = 0;
SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
wb_enc->frame_count);
@@ -651,12 +653,20 @@
if (phys_enc->enable_state == SDE_ENC_DISABLING)
goto complete;
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_DONE;
+
+ atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0);
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
- phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+ phys_enc, event);
- phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
- phys_enc);
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), hw_wb->idx - WB_0, event);
complete:
complete_all(&wb_enc->wbdone_complete);
@@ -782,7 +792,7 @@
{
unsigned long ret;
struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
- u32 irq_status;
+ u32 irq_status, event = 0;
u64 wb_time = 0;
int rc = 0;
u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS);
@@ -801,7 +811,6 @@
if (!ret) {
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
wb_enc->frame_count);
-
irq_status = sde_core_irq_read(phys_enc->sde_kms,
wb_enc->irq_idx, true);
if (irq_status) {
@@ -811,10 +820,15 @@
} else {
SDE_ERROR("wb:%d kickoff timed out\n",
wb_enc->wb_dev->wb_idx - WB_0);
+ atomic_add_unless(
+ &phys_enc->pending_retire_fence_cnt, -1, 0);
+
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_ERROR;
if (phys_enc->parent_ops.handle_frame_done)
phys_enc->parent_ops.handle_frame_done(
- phys_enc->parent, phys_enc,
- SDE_ENCODER_FRAME_EVENT_ERROR);
+ phys_enc->parent, phys_enc, event);
rc = -ETIMEDOUT;
}
}
@@ -843,7 +857,7 @@
}
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
- wb_time);
+ wb_time, event, rc);
return rc;
}
@@ -904,22 +918,31 @@
* @pixel_format: DRM pixel format
* @width: Desired fb width
* @height: Desired fb height
+ * @pitch: Desired fb pitch
*/
static int _sde_encoder_phys_wb_init_internal_fb(
struct sde_encoder_phys_wb *wb_enc,
- uint32_t pixel_format, uint32_t width, uint32_t height)
+ uint32_t pixel_format, uint32_t width,
+ uint32_t height, uint32_t pitch)
{
struct drm_device *dev;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
uint32_t size;
int nplanes, i, ret;
+ struct msm_gem_address_space *aspace;
if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
+ aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+ if (!aspace) {
+ SDE_ERROR("invalid address space\n");
+ return -EINVAL;
+ }
+
dev = wb_enc->base.sde_kms->dev;
if (!dev) {
SDE_ERROR("invalid dev\n");
@@ -930,9 +953,11 @@
mode_cmd.pixel_format = pixel_format;
mode_cmd.width = width;
mode_cmd.height = height;
+ mode_cmd.pitches[0] = pitch;
size = sde_format_get_framebuffer_size(pixel_format,
- mode_cmd.width, mode_cmd.height, 0, 0);
+ mode_cmd.width, mode_cmd.height,
+ mode_cmd.pitches, NULL, 0);
if (!size) {
SDE_DEBUG("not creating zero size buffer\n");
return -EINVAL;
@@ -974,8 +999,7 @@
}
/* prepare the backing buffer now so that it's available later */
- ret = msm_framebuffer_prepare(fb,
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]);
+ ret = msm_framebuffer_prepare(fb, aspace);
if (!ret)
wb_enc->fb_disable = fb;
return ret;
@@ -1234,15 +1258,15 @@
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
@@ -1289,11 +1313,12 @@
phys_enc->intf_mode = INTF_MODE_WB_LINE;
phys_enc->intf_idx = p->intf_idx;
phys_enc->enc_spinlock = p->enc_spinlock;
+ atomic_set(&phys_enc->pending_retire_fence_cnt, 0);
INIT_LIST_HEAD(&wb_enc->irq_cb.list);
/* create internal buffer for disable logic */
if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
- DRM_FORMAT_RGB888, 2, 1)) {
+ DRM_FORMAT_RGB888, 2, 1, 6)) {
SDE_ERROR("failed to init internal fb\n");
goto fail_wb_init;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index bd9fdac..b654e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -338,7 +338,7 @@
return rc;
}
-void sde_fence_signal(struct sde_fence_context *ctx, bool is_error)
+void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, bool is_error)
{
unsigned long flags;
struct sde_fence *fc, *next;
@@ -358,16 +358,19 @@
if ((int)(ctx->done_count - ctx->commit_count) < 0) {
++ctx->done_count;
SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
- ctx->commit_count, ctx->done_count);
+ ctx->done_count, ctx->commit_count);
} else {
SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
ctx->done_count, ctx->commit_count);
+ SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
+ ktime_to_us(ts), SDE_EVTLOG_FATAL);
spin_unlock_irqrestore(&ctx->lock, flags);
return;
}
spin_unlock_irqrestore(&ctx->lock, flags);
- SDE_EVT32(ctx->drm_id, ctx->done_count);
+ SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
+ ktime_to_us(ts));
spin_lock(&ctx->list_lock);
if (list_empty(&ctx->fence_list_head)) {
@@ -382,6 +385,7 @@
list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
spin_lock_irqsave(&ctx->lock, flags);
+ fc->base.timestamp = ts;
is_signaled = fence_is_signaled_locked(&fc->base);
spin_unlock_irqrestore(&ctx->lock, flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 207f29c..51afdae 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -127,9 +127,11 @@
/**
* sde_fence_signal - advance fence timeline to signal outstanding fences
* @fence: Pointer fence container
+ * @ts: fence timestamp
* @is_error: Set to non-zero if the commit didn't complete successfully
*/
-void sde_fence_signal(struct sde_fence_context *fence, bool is_error);
+void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
+ bool is_error);
#else
static inline void *sde_sync_get(uint64_t fd)
{
@@ -168,7 +170,7 @@
}
static inline void sde_fence_signal(struct sde_fence_context *fence,
- bool is_error)
+ ktime_t ts, bool is_error)
{
/* do nothing */
}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index c3477b5..3acf4c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -701,7 +701,8 @@
const struct sde_format *fmt,
const uint32_t width,
const uint32_t height,
- struct sde_hw_fmt_layout *layout)
+ struct sde_hw_fmt_layout *layout,
+ const uint32_t *pitches)
{
int i;
@@ -751,6 +752,17 @@
}
}
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < SDE_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
for (i = 0; i < SDE_MAX_PLANES; i++)
layout->total_size += layout->plane_size[i];
@@ -761,7 +773,8 @@
const struct sde_format *fmt,
const uint32_t w,
const uint32_t h,
- struct sde_hw_fmt_layout *layout)
+ struct sde_hw_fmt_layout *layout,
+ const uint32_t *pitches)
{
if (!layout || !fmt) {
DRM_ERROR("invalid pointer\n");
@@ -776,7 +789,7 @@
if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
- return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+ return _sde_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
}
int sde_format_get_block_size(const struct sde_format *fmt,
@@ -801,6 +814,7 @@
const uint32_t format,
const uint32_t width,
const uint32_t height,
+ const uint32_t *pitches,
const uint64_t *modifiers,
const uint32_t modifiers_len)
{
@@ -811,14 +825,17 @@
if (!fmt)
return 0;
- if (sde_format_get_plane_sizes(fmt, width, height, &layout))
+ if (!pitches)
+ return -EINVAL;
+
+ if (sde_format_get_plane_sizes(fmt, width, height, &layout, pitches))
layout.total_size = 0;
return layout.total_size;
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -830,7 +847,7 @@
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -909,7 +926,7 @@
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -917,7 +934,7 @@
/* Can now check the pitches given vs pitches expected */
for (i = 0; i < layout->num_planes; ++i) {
- if (layout->plane_pitch[i] != fb->pitches[i]) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
DRM_ERROR("plane %u expected pitch %u, fb %u\n",
i, layout->plane_pitch[i], fb->pitches[i]);
return -EINVAL;
@@ -926,7 +943,7 @@
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -937,7 +954,7 @@
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -959,7 +976,7 @@
/* Populate the plane sizes etc via get_format */
ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
- layout);
+ layout, fb->pitches);
if (ret)
return ret;
@@ -969,9 +986,9 @@
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format) ||
SDE_FORMAT_IS_TILE(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -1013,14 +1030,14 @@
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
@@ -1063,7 +1080,7 @@
num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
- &layout);
+ &layout, cmd->pitches);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 40aab22..58065ab 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -63,6 +64,8 @@
* @w: width of the buffer
* @h: height of the buffer
* @layout: layout of the buffer
+ * @pitches: array of size [SDE_MAX_PLANES] to populate
+ * pitch for each plane
*
* Return: size of the buffer
*/
@@ -70,7 +73,8 @@
const struct sde_format *fmt,
const uint32_t w,
const uint32_t h,
- struct sde_hw_fmt_layout *layout);
+ struct sde_hw_fmt_layout *layout,
+ const uint32_t *pitches);
/**
* sde_format_get_block_size - get block size of given format when
@@ -103,7 +107,7 @@
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -111,14 +115,14 @@
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -126,7 +130,7 @@
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
@@ -136,6 +140,8 @@
* @format: DRM pixel format
* @width: pixel width
* @height: pixel height
+ * @pitches: array of size [SDE_MAX_PLANES] to populate
+ * pitch for each plane
* @modifiers: array to populate with drm modifiers, can be NULL
* @modifiers_len: length of modifers array
*
@@ -145,6 +151,7 @@
const uint32_t format,
const uint32_t width,
const uint32_t height,
+ const uint32_t *pitches,
const uint64_t *modifiers,
const uint32_t modifiers_len);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index b02cc06..5307464 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -19,6 +19,7 @@
#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
#define MERGE_WIDTH_RIGHT 6
#define MERGE_WIDTH_LEFT 5
+#define AD_IPC_FRAME_COUNT 2
enum ad4_ops_bitmask {
ad4_init = BIT(AD_INIT),
@@ -31,34 +32,66 @@
enum ad4_state {
ad4_state_idle,
ad4_state_run,
+ /* idle power collapse resume state */
+ ad4_state_ipcr,
ad4_state_max,
};
typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *ad);
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+
+static int ad4_no_op_setup(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
-static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
-static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
-static int ad4_input_setup(struct sde_hw_dspp *dspp,
+static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
-static int ad4_params_check(struct sde_hw_dspp *dspp,
- struct sde_ad_hw_cfg *cfg);
static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+
static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
@@ -67,13 +100,29 @@
[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup,
+ [ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup,
+ [ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup,
[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
- [ad4_state_run][AD_INIT] = ad4_init_setup,
- [ad4_state_run][AD_CFG] = ad4_cfg_setup,
+ [ad4_state_run][AD_INIT] = ad4_init_setup_run,
+ [ad4_state_run][AD_CFG] = ad4_cfg_setup_run,
[ad4_state_run][AD_INPUT] = ad4_input_setup,
[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run,
+ [ad4_state_run][AD_IPC_RESUME] = ad4_ipc_resume_setup_run,
+ [ad4_state_run][AD_IPC_RESET] = ad4_no_op_setup,
+ [ad4_state_ipcr][AD_MODE] = ad4_mode_setup_common,
+ [ad4_state_ipcr][AD_INIT] = ad4_init_setup_ipcr,
+ [ad4_state_ipcr][AD_CFG] = ad4_cfg_setup_ipcr,
+ [ad4_state_ipcr][AD_INPUT] = ad4_input_setup_ipcr,
+ [ad4_state_ipcr][AD_SUSPEND] = ad4_suspend_setup,
+ [ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup,
+ [ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup,
+ [ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_no_op_setup,
+ [ad4_state_ipcr][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcr,
+ [ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr,
};
struct ad4_info {
@@ -81,14 +130,19 @@
u32 completed_ops_mask;
bool ad4_support;
enum ad4_modes cached_mode;
+ bool is_master;
+ u32 frame_count;
+ u32 tf_ctrl;
+ u32 vc_control_0;
+ u32 last_str;
u32 cached_als;
};
static struct ad4_info info[DSPP_MAX] = {
- [DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF},
- [DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF},
- [DSPP_2] = {ad4_state_max, 0, false, AD4_OFF},
- [DSPP_3] = {ad4_state_max, 0, false, AD4_OFF},
+ [DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF, false},
+ [DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF, false},
+ [DSPP_2] = {ad4_state_max, 0, false, AD4_OFF, false},
+ [DSPP_3] = {ad4_state_max, 0, false, AD4_OFF, false},
};
void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
@@ -118,7 +172,7 @@
return -EINVAL;
}
- if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+ if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
return -EINVAL;
}
@@ -142,7 +196,7 @@
return -EINVAL;
}
- if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+ if (dspp->idx >= DSPP_MAX || !info[dspp->idx].ad4_support) {
DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
return -EINVAL;
}
@@ -170,6 +224,10 @@
return -EINVAL;
}
hw_lm = cfg->hw_cfg->mixer_info;
+ if (!hw_lm) {
+ DRM_ERROR("invalid mixer info\n");
+ return -EINVAL;
+ }
if (cfg->hw_cfg->num_of_mixers == 1 &&
hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
@@ -179,7 +237,7 @@
cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
return -EINVAL;
} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
- hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 1)) {
+ hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 1)) {
DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
hw_lm->cfg.out_height, hw_lm->cfg.out_width,
cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
@@ -189,6 +247,11 @@
return 0;
}
+static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+ return 0;
+}
+
static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
{
u32 blk_offset;
@@ -200,9 +263,10 @@
info[dspp->idx].state = ad4_state_idle;
info[dspp->idx].completed_ops_mask = 0;
} else {
- info[dspp->idx].state = ad4_state_run;
+ if (info[dspp->idx].state == ad4_state_idle)
+ info[dspp->idx].state = ad4_state_run;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- 0);
+ 0x100);
}
return 0;
@@ -235,6 +299,7 @@
proc_start = 0;
proc_end = 0xffff;
tile_ctl = 0;
+ info[dspp->idx].is_master = true;
} else {
tile_ctl = 0x5;
if (hw_lm->cfg.right_mixer) {
@@ -244,6 +309,7 @@
proc_start = (cfg->hw_cfg->displayh >> 1);
proc_end = frame_end;
tile_ctl |= 0x10;
+ info[dspp->idx].is_master = false;
} else {
frame_start = 0;
frame_end = (cfg->hw_cfg->displayh >> 1) +
@@ -251,23 +317,21 @@
proc_start = 0;
proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
tile_ctl |= 0x10;
+ info[dspp->idx].is_master = true;
}
}
init = cfg->hw_cfg->payload;
- blk_offset = 8;
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- init->init_param_009);
blk_offset = 0xc;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- init->init_param_010);
+ init->init_param_010);
init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
blk_offset = 0x10;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- ((init->init_param_011 << 16) | init->init_param_012));
+ ((init->init_param_011 << 16) | init->init_param_012));
blk_offset = 0x14;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
@@ -275,8 +339,8 @@
blk_offset = 0x44;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
- ((((init->init_param_013) & (BIT(17) - 1)) << 16) |
- (init->init_param_014 & (BIT(17) - 1))));
+ ((((init->init_param_013) & (BIT(17) - 1)) << 16) |
+ (init->init_param_014 & (BIT(17) - 1))));
blk_offset = 0x5c;
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
@@ -583,23 +647,25 @@
val = (ad_cfg->cfg_param_004 & (BIT(16) - 1));
val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16);
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ blk_offset = 0x20;
val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+ blk_offset = 0x24;
val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
blk_offset = 0x30;
val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
- val = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ info[dspp->idx].tf_ctrl = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
+
+ blk_offset = 0x38;
val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+
+ blk_offset = 0x3c;
val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
blk_offset += 4;
@@ -607,7 +673,6 @@
val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
blk_offset = 0x88;
val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
@@ -697,14 +762,10 @@
blk_offset = 0x134;
val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
- val = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset = 0x15c;
- val = (ad_cfg->cfg_param_042 & (BIT(10) - 1));
- SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
- blk_offset += 4;
+ info[dspp->idx].vc_control_0 = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
+
+ blk_offset += 160;
val = (ad_cfg->cfg_param_043 & (BIT(10) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
@@ -791,6 +852,52 @@
if (ret)
return ret;
+ ret = ad4_mem_init_enable(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_init;
+
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+ return 0;
+}
+
+static int ad4_init_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ ret = ad4_init_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_mem_init_disable(dspp, cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ad4_init_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ ret = ad4_init_setup(dspp, cfg);
+ if (ret)
+ return ret;
+
info[dspp->idx].completed_ops_mask |= ad4_init;
if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
@@ -812,6 +919,52 @@
ret = ad4_cfg_setup(dspp, cfg);
if (ret)
return ret;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_cfg;
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+ return 0;
+}
+
+static int ad4_cfg_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+ return 0;
+ }
+
+ ret = ad4_cfg_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ad4_cfg_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+ return 0;
+ }
+
+ ret = ad4_cfg_setup(dspp, cfg);
+ if (ret)
+ return ret;
+ ret = ad4_cfg_ipc_resume(dspp, cfg);
+ if (ret)
+ return ret;
info[dspp->idx].completed_ops_mask |= ad4_cfg;
if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
@@ -835,6 +988,22 @@
return 0;
}
+static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ ret = ad4_input_setup(dspp, cfg);
+ if (ret)
+ return ret;
+
+ info[dspp->idx].completed_ops_mask |= ad4_input;
+ if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+ ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+ return 0;
+}
+
static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg)
{
@@ -900,3 +1069,182 @@
break;
}
}
+
+static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 strength = 0, i = 0;
+ struct sde_hw_mixer *hw_lm;
+
+ hw_lm = cfg->hw_cfg->mixer_info;
+ if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
+ /* this AD core is the salve core */
+ for (i = DSPP_0; i < DSPP_MAX; i++) {
+ if (info[i].is_master) {
+ strength = info[i].last_str;
+ break;
+ }
+ }
+ } else {
+ strength = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x4c);
+ }
+ info[dspp->idx].last_str = strength;
+
+ return 0;
+}
+
+static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+
+ info[dspp->idx].state = ad4_state_ipcr;
+
+ info[dspp->idx].frame_count = 0;
+ ret = ad4_cfg_ipc_resume(dspp, cfg);
+
+ return ret;
+}
+
+static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ info[dspp->idx].frame_count = 0;
+ return 0;
+}
+
+static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ int ret;
+ u32 strength = 0, i = 0;
+ struct sde_hw_mixer *hw_lm;
+
+ /* Read AD calculator strength output during the 2 frames of manual
+ * strength mode, and assign the strength output to last_str
+ * when frame count reaches AD_IPC_FRAME_COUNT to avoid flickers
+ * caused by strength was not converged before entering IPC mode
+ */
+ hw_lm = cfg->hw_cfg->mixer_info;
+ if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
+ /* this AD core is the salve core */
+ for (i = DSPP_0; i < DSPP_MAX; i++) {
+ if (info[i].is_master) {
+ strength = info[i].last_str;
+ break;
+ }
+ }
+ } else {
+ strength = SDE_REG_READ(&dspp->hw,
+ dspp->cap->sblk->ad.base + 0x4c);
+ }
+
+ if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
+ info[dspp->idx].state = ad4_state_run;
+ info[dspp->idx].last_str = strength;
+ ret = ad4_cfg_ipc_reset(dspp, cfg);
+ if (ret)
+ return ret;
+ } else {
+ info[dspp->idx].frame_count++;
+ }
+
+ return 0;
+}
+
+static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+ struct drm_msm_ad4_init *init;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+ DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+ sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+ cfg->hw_cfg->payload);
+ return -EINVAL;
+ }
+
+ init = cfg->hw_cfg->payload;
+ blk_offset = 0x8;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ (init->init_param_009 & 0xdfff));
+ blk_offset = 0x450;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 1);
+
+ return 0;
+}
+
+static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+ struct drm_msm_ad4_init *init;
+
+ if (!cfg->hw_cfg->payload) {
+ info[dspp->idx].completed_ops_mask &= ~ad4_init;
+ return 0;
+ }
+
+ if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+ DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+ sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+ cfg->hw_cfg->payload);
+ return -EINVAL;
+ }
+
+ init = cfg->hw_cfg->payload;
+ blk_offset = 0x8;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ (init->init_param_009 | 0x2000));
+ blk_offset = 0x450;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
+
+ return 0;
+}
+
+static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset, val;
+
+ /* disable temporal filters */
+ blk_offset = 0x34;
+ val = (0x55 & (BIT(8) - 1));
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+ /* set manual strength */
+ blk_offset = 0x15c;
+ val = (info[dspp->idx].last_str & (BIT(10) - 1));
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+ /* enable manul mode */
+ blk_offset = 0x138;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
+
+ return 0;
+}
+
+static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 blk_offset;
+
+ /* enable temporal filters */
+ blk_offset = 0x34;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ info[dspp->idx].tf_ctrl);
+
+ /* disable manul mode */
+ blk_offset = 0x138;
+ SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+ info[dspp->idx].vc_control_0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 1cbbe1e..b1772ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -27,11 +27,11 @@
/**
* Max hardware block in certain hardware. For ex: sspp pipes
- * can have QSEED, pcc, igc, pa, csc, etc. This count is max
- * 12 based on software design. It should be increased if any of the
+ * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is
+ * 64 based on software design. It should be increased if any of the
* hardware block has more subblocks.
*/
-#define MAX_SDE_HW_BLK 12
+#define MAX_SDE_HW_BLK 64
/* each entry will have register address and bit offset in that register */
#define MAX_BIT_OFFSET 2
@@ -217,6 +217,9 @@
TE2_OFF,
TE2_LEN,
PP_SLAVE,
+ DITHER_OFF,
+ DITHER_LEN,
+ DITHER_VER,
PP_PROP_MAX,
};
@@ -227,6 +230,12 @@
};
enum {
+ DSPP_TOP_OFF,
+ DSPP_TOP_SIZE,
+ DSPP_TOP_PROP_MAX,
+};
+
+enum {
DSPP_OFF,
DSPP_SIZE,
DSPP_BLOCKS,
@@ -460,6 +469,11 @@
{MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
};
+static struct sde_prop_type dspp_top_prop[] = {
+ {DSPP_TOP_OFF, "qcom,sde-dspp-top-off", true, PROP_TYPE_U32},
+ {DSPP_TOP_SIZE, "qcom,sde-dspp-top-size", false, PROP_TYPE_U32},
+};
+
static struct sde_prop_type dspp_prop[] = {
{DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
{DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
@@ -494,6 +508,9 @@
{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+ {DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY},
+ {DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32},
+ {DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32},
};
static struct sde_prop_type dsc_prop[] = {
@@ -569,8 +586,16 @@
static int _parse_dt_u32_handler(struct device_node *np,
char *prop_name, u32 *offsets, int len, bool mandatory)
{
- int rc = of_property_read_u32_array(np, prop_name, offsets, len);
+ int rc = -EINVAL;
+ if (len > MAX_SDE_HW_BLK) {
+ SDE_ERROR(
+ "prop: %s tries out of bound access for u32 array read len: %d\n",
+ prop_name, len);
+ return -E2BIG;
+ }
+
+ rc = of_property_read_u32_array(np, prop_name, offsets, len);
if (rc && mandatory)
SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
prop_name, len);
@@ -592,6 +617,14 @@
if (arr) {
len /= sizeof(u32);
len &= ~0x1;
+
+ if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) {
+ SDE_ERROR(
+ "prop: %s len: %d will lead to out of bound access\n",
+ prop_name, len / MAX_BIT_OFFSET);
+ return -E2BIG;
+ }
+
for (i = 0, j = 0; i < len; j++) {
PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
be32_to_cpu(arr[i]);
@@ -626,8 +659,8 @@
sde_prop[0].prop_name);
if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
if (sde_prop[0].is_mandatory) {
- SDE_ERROR("invalid hw offset prop name:%s\"\
- count: %d\n",
+ SDE_ERROR(
+ "invalid hw offset prop name:%s count: %d\n",
sde_prop[0].prop_name, *off_count);
rc = -EINVAL;
}
@@ -670,8 +703,9 @@
sde_prop[i].type);
break;
}
- SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
- prop_count:%d\n", i, sde_prop[i].prop_name,
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d prop_count:%d\n",
+ i, sde_prop[i].prop_name,
sde_prop[i].type, prop_count[i]);
if (rc && sde_prop[i].is_mandatory &&
@@ -689,14 +723,16 @@
if (off_count && (prop_count[i] != *off_count) &&
sde_prop[i].is_mandatory) {
- SDE_ERROR("prop:%s count:%d is different compared to \"\
- offset array:%d\n", sde_prop[i].prop_name,
+ SDE_ERROR(
+ "prop:%s count:%d is different compared to offset array:%d\n",
+ sde_prop[i].prop_name,
prop_count[i], *off_count);
rc = -EINVAL;
goto end;
} else if (off_count && prop_count[i] != *off_count) {
- SDE_DEBUG("prop:%s count:%d is different compared to \"\
- offset array:%d\n", sde_prop[i].prop_name,
+ SDE_DEBUG(
+ "prop:%s count:%d is different compared to offset array:%d\n",
+ sde_prop[i].prop_name,
prop_count[i], *off_count);
rc = 0;
prop_count[i] = 0;
@@ -732,8 +768,9 @@
case PROP_TYPE_U32:
rc = of_property_read_u32(np, sde_prop[i].prop_name,
&PROP_VALUE_ACCESS(prop_value, i, 0));
- SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
- value:0x%x\n", i, sde_prop[i].prop_name,
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d value:0x%x\n",
+ i, sde_prop[i].prop_name,
sde_prop[i].type,
PROP_VALUE_ACCESS(prop_value, i, 0));
if (rc)
@@ -743,8 +780,9 @@
PROP_VALUE_ACCESS(prop_value, i, 0) =
of_property_read_bool(np,
sde_prop[i].prop_name);
- SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
- value:0x%x\n", i, sde_prop[i].prop_name,
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d value:0x%x\n",
+ i, sde_prop[i].prop_name,
sde_prop[i].type,
PROP_VALUE_ACCESS(prop_value, i, 0));
break;
@@ -753,8 +791,9 @@
&PROP_VALUE_ACCESS(prop_value, i, 0),
prop_count[i], sde_prop[i].is_mandatory);
if (rc && sde_prop[i].is_mandatory) {
- SDE_ERROR("%s prop validation success but \"\
- read failed\n", sde_prop[i].prop_name);
+ SDE_ERROR(
+ "%s prop validation success but read failed\n",
+ sde_prop[i].prop_name);
prop_exists[i] = false;
goto end;
} else {
@@ -776,19 +815,21 @@
prop_value, i, prop_count[i],
sde_prop[i].is_mandatory);
if (rc && sde_prop[i].is_mandatory) {
- SDE_ERROR("%s prop validation success but \"\
- read failed\n", sde_prop[i].prop_name);
+ SDE_ERROR(
+ "%s prop validation success but read failed\n",
+ sde_prop[i].prop_name);
prop_exists[i] = false;
goto end;
} else {
if (rc)
prop_exists[i] = false;
- SDE_DEBUG("prop id:%d prop name:%s prop \"\
- type:%d", i, sde_prop[i].prop_name,
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d",
+ i, sde_prop[i].prop_name,
sde_prop[i].type);
for (j = 0; j < prop_count[i]; j++)
- SDE_DEBUG(" count[%d]: bit:0x%x \"\
- off:0x%x \n", j,
+ SDE_DEBUG(
+ "count[%d]: bit:0x%x off:0x%x\n", j,
PROP_BITVALUE_ACCESS(prop_value,
i, j, 0),
PROP_BITVALUE_ACCESS(prop_value,
@@ -1125,6 +1166,13 @@
snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
sspp->id - SSPP_VIG0);
+ if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+ SDE_ERROR("%s: invalid clk ctrl: %d\n",
+ sblk->src_blk.name, sspp->clk_ctrl);
+ rc = -EINVAL;
+ goto end;
+ }
+
sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
sblk->maxvdeciexp = MAX_VERT_DECIMATION;
@@ -1508,6 +1556,13 @@
PROP_VALUE_ACCESS(prop_value, WB_ID, i);
wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
+ if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+ SDE_ERROR("%s: invalid clk ctrl: %d\n",
+ wb->name, wb->clk_ctrl);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion),
SDE_HW_VER_170))
wb->vbif_idx = VBIF_NRT;
@@ -1815,6 +1870,54 @@
return rc;
}
+static int sde_dspp_top_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[DSPP_TOP_PROP_MAX];
+ bool prop_exists[DSPP_TOP_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ u32 off_count;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(DSPP_TOP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, dspp_top_prop, ARRAY_SIZE(dspp_top_prop),
+ prop_count, prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ if (off_count != 1) {
+ SDE_ERROR("invalid dspp_top off_count:%d\n", off_count);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sde_cfg->dspp_top.base =
+ PROP_VALUE_ACCESS(prop_value, DSPP_TOP_OFF, 0);
+ sde_cfg->dspp_top.len =
+ PROP_VALUE_ACCESS(prop_value, DSPP_TOP_SIZE, 0);
+ snprintf(sde_cfg->dspp_top.name, SDE_HW_BLK_NAME_LEN, "dspp_top");
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
static int sde_dspp_parse_dt(struct device_node *np,
struct sde_mdss_cfg *sde_cfg)
{
@@ -2334,6 +2437,18 @@
pp->id - PINGPONG_0);
set_bit(SDE_PINGPONG_DSC, &pp->features);
}
+
+ sblk->dither.base = PROP_VALUE_ACCESS(prop_value, DITHER_OFF,
+ i);
+ if (sblk->dither.base) {
+ sblk->dither.id = SDE_PINGPONG_DITHER;
+ snprintf(sblk->dither.name, SDE_HW_BLK_NAME_LEN,
+ "dither_%u", pp->id);
+ set_bit(SDE_PINGPONG_DITHER, &pp->features);
+ }
+ sblk->dither.len = PROP_VALUE_ACCESS(prop_value, DITHER_LEN, 0);
+ sblk->dither.version = PROP_VALUE_ACCESS(prop_value, DITHER_VER,
+ 0);
}
end:
@@ -2512,7 +2627,7 @@
goto end;
}
- prop_value = kzalloc(SDE_PROP_MAX *
+ prop_value = kzalloc(PERF_PROP_MAX *
sizeof(struct sde_prop_value), GFP_KERNEL);
if (!prop_value) {
rc = -ENOMEM;
@@ -2678,7 +2793,7 @@
sizeof(struct sde_qos_lut_entry), GFP_KERNEL);
if (!cfg->perf.qos_lut_tbl[j].entries) {
rc = -ENOMEM;
- goto end;
+ goto freeprop;
}
for (k = 0, m = 0; k < count; k++, m += entry_size) {
@@ -2936,6 +3051,10 @@
if (rc)
goto end;
+ rc = sde_dspp_top_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
rc = sde_dspp_parse_dt(np, sde_cfg);
if (rc)
goto end;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 74fa8f9..db5a6b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -194,6 +194,7 @@
* @SDE_PINGPONG_SPLIT PP block supports split fifo
* @SDE_PINGPONG_SLAVE PP block is a suitable slave for split fifo
* @SDE_PINGPONG_DSC, Display stream compression blocks
+ * @SDE_PINGPONG_DITHER, Dither blocks
* @SDE_PINGPONG_MAX
*/
enum {
@@ -202,6 +203,7 @@
SDE_PINGPONG_SPLIT,
SDE_PINGPONG_SLAVE,
SDE_PINGPONG_DSC,
+ SDE_PINGPONG_DITHER,
SDE_PINGPONG_MAX
};
@@ -457,6 +459,7 @@
struct sde_pp_blk te;
struct sde_pp_blk te2;
struct sde_pp_blk dsc;
+ struct sde_pp_blk dither;
};
struct sde_wb_sub_blocks {
@@ -565,6 +568,17 @@
};
/**
+ * struct sde_dspp_cfg - information of DSPP top block
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ */
+struct sde_dspp_top_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
* struct sde_dspp_cfg - information of DSPP blocks
* @id enum identifying this block
* @base register offset of this block
@@ -888,6 +902,8 @@
u32 mixer_count;
struct sde_lm_cfg mixer[MAX_BLOCKS];
+ struct sde_dspp_top_cfg dspp_top;
+
u32 dspp_count;
struct sde_dspp_cfg dspp[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
index 8f7764d..5cbfe8e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
@@ -37,4 +37,33 @@
#define GC_TBL_NUM 3
#define GC_LUT_SWAP_OFF 0x1c
+#define IGC_TBL_NUM 3
+#define IGC_DITHER_OFF 0x7e0
+#define IGC_OPMODE_OFF 0x0
+#define IGC_C0_OFF 0x0
+#define IGC_DATA_MASK (BIT(12) - 1)
+#define IGC_DSPP_SEL_MASK_MAX (BIT(4) - 1)
+#define IGC_DSPP_SEL_MASK(n) \
+ ((IGC_DSPP_SEL_MASK_MAX & ~(1 << (n))) << 28)
+#define IGC_INDEX_UPDATE BIT(25)
+#define IGC_EN BIT(0)
+#define IGC_DIS 0
+#define IGC_DITHER_DATA_MASK (BIT(4) - 1)
+
+#define PCC_NUM_PLANES 3
+#define PCC_NUM_COEFF 11
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_C_OFF 0x4
+#define PCC_R_OFF 0x10
+#define PCC_G_OFF 0x1c
+#define PCC_B_OFF 0x28
+#define PCC_RG_OFF 0x34
+#define PCC_RB_OFF 0x40
+#define PCC_GB_OFF 0x4c
+#define PCC_RGB_OFF 0x58
+#define PCC_RR_OFF 0x64
+#define PCC_GG_OFF 0x70
+#define PCC_BB_OFF 0x7c
+
#endif /* _SDE_HW_COLOR_PROC_COMMON_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
index 42d1480..4da0456 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.c
@@ -109,3 +109,127 @@
&op_mode);
}
+
+void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_igc_lut *lut_cfg;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ int i = 0, j = 0;
+ u32 *addr = NULL;
+ u32 offset = 0;
+
+ if (!ctx || !cfg) {
+ DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
+ return;
+ }
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable igc feature\n");
+ SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, 0);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_igc_lut));
+ return;
+ }
+
+ lut_cfg = hw_cfg->payload;
+
+ for (i = 0; i < IGC_TBL_NUM; i++) {
+ addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
+ offset = IGC_C0_OFF + (i * sizeof(u32));
+
+ for (j = 0; j < IGC_TBL_LEN; j++) {
+ addr[j] &= IGC_DATA_MASK;
+ addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
+ if (j == 0)
+ addr[j] |= IGC_INDEX_UPDATE;
+ /* IGC lut registers are part of DSPP Top HW block */
+ SDE_REG_WRITE(&ctx->hw_top, offset, addr[j]);
+ }
+ }
+
+ if (lut_cfg->flags & IGC_DITHER_ENABLE) {
+ SDE_REG_WRITE(&ctx->hw, IGC_DITHER_OFF,
+ lut_cfg->strength & IGC_DITHER_DATA_MASK);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, IGC_OPMODE_OFF, IGC_EN);
+}
+
+void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct drm_msm_pcc *pcc_cfg;
+ struct drm_msm_pcc_coeff *coeffs = NULL;
+ int i = 0;
+ u32 base = 0;
+
+ if (!ctx || !cfg) {
+ DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
+ return;
+ }
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, 0);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_pcc));
+ return;
+ }
+
+ pcc_cfg = hw_cfg->payload;
+
+ for (i = 0; i < PCC_NUM_PLANES; i++) {
+ base = ctx->cap->sblk->pcc.base + (i * sizeof(u32));
+ switch (i) {
+ case 0:
+ coeffs = &pcc_cfg->r;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->r_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->r_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->r_bb);
+ break;
+ case 1:
+ coeffs = &pcc_cfg->g;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->g_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->g_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->g_bb);
+ break;
+ case 2:
+ coeffs = &pcc_cfg->b;
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_RR_OFF, pcc_cfg->b_rr);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_GG_OFF, pcc_cfg->b_gg);
+ SDE_REG_WRITE(&ctx->hw,
+ base + PCC_BB_OFF, pcc_cfg->b_bb);
+ break;
+ default:
+ DRM_ERROR("invalid pcc plane: %d\n", i);
+ return;
+ }
+
+ SDE_REG_WRITE(&ctx->hw, base + PCC_C_OFF, coeffs->c);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_R_OFF, coeffs->r);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_G_OFF, coeffs->g);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_B_OFF, coeffs->b);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RG_OFF, coeffs->rg);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RB_OFF, coeffs->rb);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_GB_OFF, coeffs->gb);
+ SDE_REG_WRITE(&ctx->hw, base + PCC_RGB_OFF, coeffs->rgb);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, PCC_EN);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
index 250830e..ad4f556 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_v4.h
@@ -22,5 +22,19 @@
* @cfg: pointer to sde_hw_cp_cfg
*/
void sde_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
+/**
+ * sde_setup_dspp_igcv3 - Function for igc v3 version feature
+ * programming.
+ * @ctx: dspp ctx pointer
+ * @cfg: pointer to sde_hw_cp_cfg
+ */
+void sde_setup_dspp_igcv3(struct sde_hw_dspp *ctx, void *cfg);
+/**
+ * sde_setup_dspp_pccv4 - Function for pcc v4 version feature
+ * programming.
+ * @ctx: dspp ctx pointer
+ * @cfg: pointer to sde_hw_cp_cfg
+ */
+void sde_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
#endif /* _SDE_HW_COLOR_PROC_V4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index ab2c473..4191367 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -70,8 +70,6 @@
#define DSPP_OP_PA_FOL_EN BIT(6)
#define DSPP_OP_PA_SKY_EN BIT(7)
-#define REG_MASK(n) ((BIT(n)) - 1)
-
#define PA_VIG_DISABLE_REQUIRED(x) \
!((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index ba55086..0b3432b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -15,6 +15,7 @@
#include "sde_hw_ctl.h"
#include "sde_dbg.h"
#include "sde_kms.h"
+#include "sde_reg_dma.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -111,6 +112,11 @@
static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
{
+ struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();
+
+ if (ops && ops->last_command)
+ ops->last_command(ctx, DMA_CTL_QUEUE0);
+
SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
index 1a346f0..9fd3c25 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -120,7 +120,6 @@
data |= dsc->max_qp_flatness << 5;
data |= dsc->min_qp_flatness;
SDE_REG_WRITE(dsc_c, DSC_FLATNESS, data);
- SDE_REG_WRITE(dsc_c, DSC_FLATNESS, 0x983);
data = dsc->rc_model_size;
SDE_REG_WRITE(dsc_c, DSC_RC_MODEL_SIZE, data);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index e766cdb..5b3f51e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -58,6 +58,16 @@
if (c->cap->sblk->pcc.version ==
(SDE_COLOR_PROCESS_VER(0x1, 0x7)))
c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
+ else if (c->cap->sblk->pcc.version ==
+ (SDE_COLOR_PROCESS_VER(0x4, 0x0))) {
+ ret = reg_dmav1_init_dspp_op_v4(i, c->idx);
+ if (!ret)
+ c->ops.setup_pcc =
+ reg_dmav1_setup_dspp_pccv4;
+ else
+ c->ops.setup_pcc =
+ sde_setup_dspp_pccv4;
+ }
break;
case SDE_DSPP_HSIC:
if (c->cap->sblk->hsic.version ==
@@ -104,6 +114,18 @@
sde_setup_dspp_gc_v1_7;
}
break;
+ case SDE_DSPP_IGC:
+ if (c->cap->sblk->igc.version ==
+ SDE_COLOR_PROCESS_VER(0x3, 0x1)) {
+ ret = reg_dmav1_init_dspp_op_v4(i, c->idx);
+ if (!ret)
+ c->ops.setup_igc =
+ reg_dmav1_setup_dspp_igcv31;
+ else
+ c->ops.setup_igc =
+ sde_setup_dspp_igcv3;
+ }
+ break;
case SDE_DSPP_AD:
if (c->cap->sblk->ad.version ==
SDE_COLOR_PROCESS_VER(4, 0)) {
@@ -145,6 +167,13 @@
return ERR_PTR(-EINVAL);
}
+ /* Populate DSPP Top HW block */
+ c->hw_top.base_off = addr;
+ c->hw_top.blk_off = m->dspp_top.base;
+ c->hw_top.length = m->dspp_top.len;
+ c->hw_top.hwversion = m->hwversion;
+ c->hw_top.log_mask = SDE_DBG_MASK_DSPP;
+
/* Assign ops */
c->idx = idx;
c->cap = cfg;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 0baa970..44b3831 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -170,13 +170,17 @@
* struct sde_hw_dspp - dspp description
* @base: Hardware block base structure
* @hw: Block hardware details
+ * @hw_top: Block hardware top details
* @idx: DSPP index
* @cap: Pointer to layer_cfg
* @ops: Pointer to operations possible for this DSPP
*/
struct sde_hw_dspp {
struct sde_hw_blk base;
- struct sde_hw_blk_reg_map hw;
+ struct sde_hw_blk_reg_map hw;
+
+ /* dspp top */
+ struct sde_hw_blk_reg_map hw_top;
/* dspp */
enum sde_dspp idx;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 8c3d4fc..8eebf89fc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -710,6 +710,9 @@
return;
SDE_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
}
static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
@@ -731,7 +734,7 @@
* Now need to go through each IRQ status and find matching
* irq lookup index.
*/
- spin_lock_irqsave(&intr->status_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
irq_status = intr->save_irq_status[reg_idx];
@@ -766,7 +769,7 @@
if (cbfunc)
cbfunc(arg, irq_idx);
else
- intr->ops.clear_interrupt_status(
+ intr->ops.clear_intr_status_nolock(
intr, irq_idx);
/*
@@ -777,7 +780,7 @@
irq_status &= ~sde_irq_map[irq_idx].irq_mask;
}
}
- spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
@@ -801,7 +804,7 @@
reg_idx = irq->reg_idx;
reg = &sde_intr_set[reg_idx];
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & irq->irq_mask) {
dbgstr = "SDE IRQ already set:";
@@ -814,9 +817,12 @@
/* Enabling interrupts with the new mask */
SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* ensure register write goes through */
+ wmb();
+
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
irq->irq_mask, cache_irq_mask);
@@ -845,7 +851,7 @@
reg_idx = irq->reg_idx;
reg = &sde_intr_set[reg_idx];
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if ((cache_irq_mask & irq->irq_mask) == 0) {
dbgstr = "SDE IRQ is already cleared:";
@@ -858,9 +864,12 @@
/* Cleaning any pending interrupt */
SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* ensure register write goes through */
+ wmb();
+
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
irq->irq_mask, cache_irq_mask);
@@ -878,6 +887,9 @@
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+ /* ensure register writes go through */
+ wmb();
+
return 0;
}
@@ -891,6 +903,9 @@
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+ /* ensure register writes go through */
+ wmb();
+
return 0;
}
@@ -926,7 +941,7 @@
if (!intr)
return;
- spin_lock_irqsave(&intr->status_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
/* Read interrupt status */
intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
@@ -943,25 +958,68 @@
/* Finally update IRQ status based on enable mask */
intr->save_irq_status[i] &= enable_mask;
}
- spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
}
-static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+static void sde_hw_intr_clear_intr_status_nolock(struct sde_hw_intr *intr,
int irq_idx)
{
int reg_idx;
- unsigned long irq_flags;
if (!intr)
return;
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
-
reg_idx = sde_irq_map[irq_idx].reg_idx;
SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
sde_irq_map[irq_idx].irq_mask);
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ sde_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 sde_hw_intr_get_intr_status_nolock(struct sde_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ intr_status = SDE_REG_READ(&intr->hw,
+ sde_intr_set[reg_idx].status_off) &
+ sde_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return intr_status;
}
static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
@@ -979,7 +1037,7 @@
return 0;
}
- spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
reg_idx = sde_irq_map[irq_idx].reg_idx;
intr_status = SDE_REG_READ(&intr->hw,
@@ -989,7 +1047,10 @@
SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
intr_status);
- spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
return intr_status;
}
@@ -1007,7 +1068,9 @@
ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = sde_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
+ ops->get_intr_status_nolock = sde_hw_intr_get_intr_status_nolock;
}
static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
@@ -1059,8 +1122,7 @@
return ERR_PTR(-ENOMEM);
}
- spin_lock_init(&intr->mask_lock);
- spin_lock_init(&intr->status_lock);
+ spin_lock_init(&intr->irq_lock);
return intr;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index aaba1be..ced4077 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -187,6 +187,15 @@
int irq_idx);
/**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
* @intr: HW interrupt handle
@@ -199,6 +208,17 @@
bool clear);
/**
+ * get_intr_status_nolock - nolock version of get_interrupt_status
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_intr_status_nolock)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
* get_valid_interrupts - Gets a mask of all valid interrupt sources
* within SDE. These are actually status bits
* within interrupt registers that specify the
@@ -232,8 +252,7 @@
* @cache_irq_mask: array of IRQ enable masks reg storage created during init
* @save_irq_status: array of IRQ status reg storage created during init
* @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
- * @mask_lock: spinlock for accessing IRQ mask
- * @status_lock: spinlock for accessing IRQ status
+ * @irq_lock: spinlock for accessing IRQ resources
*/
struct sde_hw_intr {
struct sde_hw_blk_reg_map hw;
@@ -241,8 +260,7 @@
u32 *cache_irq_mask;
u32 *save_irq_status;
u32 irq_idx_tbl_size;
- spinlock_t mask_lock;
- spinlock_t status_lock;
+ spinlock_t irq_lock;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 582ab5a..f07f5ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -82,6 +82,19 @@
#define SDE_BLEND_BG_INV_MOD_ALPHA (1 << 12)
#define SDE_BLEND_BG_TRANSP_EN (1 << 13)
+#define SDE_VSYNC0_SOURCE_GPIO 0
+#define SDE_VSYNC1_SOURCE_GPIO 1
+#define SDE_VSYNC2_SOURCE_GPIO 2
+#define SDE_VSYNC_SOURCE_INTF_0 3
+#define SDE_VSYNC_SOURCE_INTF_1 4
+#define SDE_VSYNC_SOURCE_INTF_2 5
+#define SDE_VSYNC_SOURCE_INTF_3 6
+#define SDE_VSYNC_SOURCE_WD_TIMER_4 11
+#define SDE_VSYNC_SOURCE_WD_TIMER_3 12
+#define SDE_VSYNC_SOURCE_WD_TIMER_2 13
+#define SDE_VSYNC_SOURCE_WD_TIMER_1 14
+#define SDE_VSYNC_SOURCE_WD_TIMER_0 15
+
enum sde_hw_blk_type {
SDE_HW_BLK_TOP = 0,
SDE_HW_BLK_SSPP,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index 37b74df..e88f40f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -40,6 +40,11 @@
#define PP_DCE_DATA_IN_SWAP 0x0ac
#define PP_DCE_DATA_OUT_SWAP 0x0c8
+#define DITHER_DEPTH_MAP_INDEX 9
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 1, 2, 3, 3
+};
+
static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -167,6 +172,57 @@
return 0;
}
+static int sde_hw_pp_setup_dither_v1(struct sde_hw_pingpong *pp,
+ void *cfg, size_t len)
+{
+ struct sde_hw_blk_reg_map *c;
+ struct drm_msm_dither *dither = (struct drm_msm_dither *)cfg;
+ u32 base = 0, offset = 0, data = 0, i = 0;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ base = pp->caps->sblk->dither.base;
+ if (!dither) {
+ /* dither property disable case */
+ SDE_REG_WRITE(c, base, 0);
+ return 0;
+ }
+
+ if (len != sizeof(struct drm_msm_dither)) {
+ DRM_ERROR("input len %zu, expected len %zu\n", len,
+ sizeof(struct drm_msm_dither));
+ return -EINVAL;
+ }
+
+ if (dither->c0_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+ dither->c1_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+ dither->c2_bitdepth >= DITHER_DEPTH_MAP_INDEX ||
+ dither->c3_bitdepth >= DITHER_DEPTH_MAP_INDEX)
+ return -EINVAL;
+
+ offset += 4;
+ data = dither_depth_map[dither->c0_bitdepth] & REG_MASK(2);
+ data |= (dither_depth_map[dither->c1_bitdepth] & REG_MASK(2)) << 2;
+ data |= (dither_depth_map[dither->c2_bitdepth] & REG_MASK(2)) << 4;
+ data |= (dither_depth_map[dither->c3_bitdepth] & REG_MASK(2)) << 6;
+ data |= (dither->temporal_en) ? (1 << 8) : 0;
+ SDE_REG_WRITE(c, base + offset, data);
+
+ for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+ offset += 4;
+ data = (dither->matrix[i] & REG_MASK(4)) |
+ ((dither->matrix[i + 1] & REG_MASK(4)) << 4) |
+ ((dither->matrix[i + 2] & REG_MASK(4)) << 8) |
+ ((dither->matrix[i + 3] & REG_MASK(4)) << 12);
+ SDE_REG_WRITE(c, base + offset, data);
+ }
+ SDE_REG_WRITE(c, base, 1);
+
+ return 0;
+}
+
static int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
{
struct sde_hw_blk_reg_map *c = &pp->hw;
@@ -218,8 +274,10 @@
}
static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
- unsigned long cap)
+ const struct sde_pingpong_cfg *hw_cap)
{
+ u32 version = 0;
+
ops->setup_tearcheck = sde_hw_pp_setup_te_config;
ops->enable_tearcheck = sde_hw_pp_enable_te;
ops->connect_external_te = sde_hw_pp_connect_external_te;
@@ -230,6 +288,16 @@
ops->disable_dsc = sde_hw_pp_dsc_disable;
ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
+
+ version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
+ switch (version) {
+ case 1:
+ ops->setup_dither = sde_hw_pp_setup_dither_v1;
+ break;
+ default:
+ ops->setup_dither = NULL;
+ break;
+ }
};
static struct sde_hw_blk_ops sde_hw_ops = {
@@ -257,7 +325,7 @@
c->idx = idx;
c->caps = cfg;
- _setup_pingpong_ops(&c->ops, c->caps->features);
+ _setup_pingpong_ops(&c->ops, c->caps);
rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_PINGPONG, idx, &sde_hw_ops);
if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index 6dbf4aa..f0a2054 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -17,6 +17,7 @@
#include "sde_hw_mdss.h"
#include "sde_hw_util.h"
#include "sde_hw_blk.h"
+#include <uapi/drm/msm_drm_pp.h>
struct sde_hw_pingpong;
@@ -62,6 +63,7 @@
* @setup_dsc : program DSC block with encoding details
* @enable_dsc : enables DSC encoder
* @disable_dsc : disables DSC encoder
+ * @setup_dither : function to program the dither hw block
*/
struct sde_hw_pingpong_ops {
/**
@@ -123,6 +125,11 @@
* Disables DSC encoder
*/
void (*disable_dsc)(struct sde_hw_pingpong *pp);
+
+ /**
+ * Program the dither hw block
+ */
+ int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len);
};
struct sde_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 678c84a..9a5035a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -13,6 +13,7 @@
#include "sde_hw_ctl.h"
#include "sde_hw_reg_dma_v1.h"
#include "msm_drv.h"
+#include "msm_mmu.h"
#define GUARD_BYTES (BIT(8) - 1)
#define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
@@ -49,6 +50,7 @@
(cfg)->dma_buf->index)
#define REG_DMA_DECODE_SEL 0x180AC060
+#define REG_DMA_LAST_CMD 0x180AC004
#define SINGLE_REG_WRITE_OPCODE (BIT(28))
#define REL_ADDR_OPCODE (BIT(27))
#define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
@@ -58,6 +60,7 @@
#define WRAP_MIN_SIZE 2
#define WRAP_MAX_SIZE (BIT(4) - 1)
#define MAX_DWORDS_SZ (BIT(14) - 1)
+#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
@@ -87,23 +90,28 @@
[GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
[VLUT] = GRP_DSPP_HW_BLK_SELECT,
[GC] = GRP_DSPP_HW_BLK_SELECT,
+ [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
+ [PCC] = GRP_DSPP_HW_BLK_SELECT,
};
static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
static int check_support_v1(enum sde_reg_dma_features feature,
enum sde_reg_dma_blk blk, bool *is_supported);
static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
static int reset_v1(struct sde_hw_ctl *ctl);
+static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
@@ -123,6 +131,8 @@
[REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
};
+static struct sde_reg_dma_buffer *last_cmd_buf;
+
static void get_decode_sel(unsigned long blk, u32 *decode_sel)
{
int i = 0;
@@ -174,7 +184,7 @@
loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
memcpy(loc, cfg->data, cfg->data_size);
cfg->dma_buf->index += cfg->data_size;
- cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+ cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
cfg->dma_buf->ops_completed |= REG_WRITE_OP;
return 0;
@@ -236,7 +246,7 @@
loc[1] = *cfg->data;
cfg->dma_buf->index += ops_mem_size[cfg->ops];
cfg->dma_buf->ops_completed |= REG_WRITE_OP;
- cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+ cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
return 0;
}
@@ -447,6 +457,7 @@
u32 cmd1;
struct sde_hw_blk_reg_map hw;
+ memset(&hw, 0, sizeof(hw));
cmd1 = (cfg->op == REG_DMA_READ) ?
(dspp_read_sel[cfg->block_select] << 30) : 0;
cmd1 |= (cfg->last_command) ? BIT(24) : 0;
@@ -474,6 +485,11 @@
return -EINVAL;
reg_dma = cfg;
+ if (!last_cmd_buf) {
+ last_cmd_buf = alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
+ if (IS_ERR_OR_NULL(last_cmd_buf))
+ return -EINVAL;
+ }
reg_dma->ops.check_support = check_support_v1;
reg_dma->ops.setup_payload = setup_payload_v1;
reg_dma->ops.kick_off = kick_off_v1;
@@ -481,6 +497,7 @@
reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
+ reg_dma->ops.last_command = last_cmd_v1;
reg_dma_ctl_queue_off[CTL_0] = REG_DMA_CTL0_QUEUE_0_CMD0_OFF;
for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
@@ -547,6 +564,7 @@
return -EINVAL;
}
+ memset(&hw, 0, sizeof(hw));
index = ctl->idx - CTL_0;
SET_UP_REG_DMA_REG(hw, reg_dma);
SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
@@ -569,6 +587,7 @@
struct sde_reg_dma_buffer *dma_buf = NULL;
u32 iova_aligned, offset;
u32 rsize = size + GUARD_BYTES;
+ struct msm_gem_address_space *aspace = NULL;
int rc = 0;
if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
@@ -589,7 +608,15 @@
goto fail;
}
- rc = msm_gem_get_iova(dma_buf->buf, 0, &dma_buf->iova);
+ aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
+ if (!aspace) {
+ DRM_ERROR("failed to get aspace\n");
+ rc = -EINVAL;
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
if (rc) {
DRM_ERROR("failed to get the iova rc %d\n", rc);
goto free_gem;
@@ -612,7 +639,7 @@
return dma_buf;
put_iova:
- msm_gem_put_iova(dma_buf->buf, 0);
+ msm_gem_put_iova(dma_buf->buf, aspace);
free_gem:
msm_gem_free_object(dma_buf->buf);
fail:
@@ -648,3 +675,76 @@
lut_buf->next_op_allowed = DECODE_SEL_OP;
return 0;
}
+
+static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+ u32 remain_len, write_len;
+
+ remain_len = BUFFER_SPACE_LEFT(cfg);
+ write_len = sizeof(u32);
+ if (remain_len < write_len) {
+ DRM_ERROR("buffer is full sz %d needs %d bytes\n",
+ remain_len, write_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+ u32 *loc = NULL;
+
+ loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
+ cfg->dma_buf->index);
+ loc[0] = REG_DMA_LAST_CMD;
+ loc[1] = BIT(0);
+ cfg->dma_buf->index = sizeof(u32) * 2;
+ cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
+ cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
+
+ return 0;
+}
+
+static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
+{
+ struct sde_reg_dma_setup_ops_cfg cfg;
+ struct sde_reg_dma_kickoff_cfg kick_off;
+
+ if (!last_cmd_buf || !ctl || q >= DMA_CTL_QUEUE_MAX) {
+ DRM_ERROR("invalid param buf %pK ctl %pK q %d\n", last_cmd_buf,
+ ctl, q);
+ return -EINVAL;
+ }
+
+ cfg.dma_buf = last_cmd_buf;
+ reset_reg_dma_buffer_v1(last_cmd_buf);
+ if (validate_last_cmd(&cfg)) {
+ DRM_ERROR("validate buf failed\n");
+ return -EINVAL;
+ }
+
+ if (write_last_cmd(&cfg)) {
+ DRM_ERROR("write buf failed\n");
+ return -EINVAL;
+ }
+
+ kick_off.ctl = ctl;
+ kick_off.queue_select = q;
+ kick_off.trigger_mode = WRITE_IMMEDIATE;
+ kick_off.last_command = 1;
+ kick_off.op = REG_DMA_WRITE;
+ kick_off.dma_buf = last_cmd_buf;
+ if (kick_off_v1(&kick_off)) {
+ DRM_ERROR("kick off last cmd failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void deinit_v1(void)
+{
+ if (last_cmd_buf)
+ dealloc_reg_dma_v1(last_cmd_buf);
+ last_cmd_buf = NULL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
index 8e37d38..4f9ab4e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h
@@ -20,4 +20,8 @@
*/
int init_v1(struct sde_hw_reg_dma *reg_dma);
+/**
+ * deinit_v1() - free up any resources allocated during the v1 reg dma init
+ */
+void deinit_v1(void);
#endif /* _SDE_HW_REG_DMA_V1_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 0dcbb7e..70427ab 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -30,6 +30,14 @@
#define GC_LUT_MEM_SIZE ((sizeof(struct drm_msm_pgc_lut)) + \
REG_DMA_HEADERS_BUFFER_SZ)
+#define IGC_LUT_MEM_SIZE ((sizeof(struct drm_msm_igc_lut)) + \
+ REG_DMA_HEADERS_BUFFER_SZ)
+
+#define PCC_LUT_ENTRIES (PCC_NUM_PLANES * PCC_NUM_COEFF)
+#define PCC_LEN (PCC_LUT_ENTRIES * sizeof(u32))
+#define PCC_MEM_SIZE (PCC_LEN + \
+ REG_DMA_HEADERS_BUFFER_SZ)
+
#define REG_MASK(n) ((BIT(n)) - 1)
static struct sde_reg_dma_buffer *dspp_buf[REG_DMA_FEATURES_MAX][DSPP_MAX];
@@ -37,8 +45,8 @@
static u32 feature_map[SDE_DSPP_MAX] = {
[SDE_DSPP_VLUT] = VLUT,
[SDE_DSPP_GAMUT] = GAMUT,
- [SDE_DSPP_IGC] = REG_DMA_FEATURES_MAX,
- [SDE_DSPP_PCC] = REG_DMA_FEATURES_MAX,
+ [SDE_DSPP_IGC] = IGC,
+ [SDE_DSPP_PCC] = PCC,
[SDE_DSPP_GC] = GC,
[SDE_DSPP_HSIC] = REG_DMA_FEATURES_MAX,
[SDE_DSPP_MEMCOLOR] = REG_DMA_FEATURES_MAX,
@@ -52,6 +60,8 @@
[SDE_DSPP_VLUT] = VLUT_MEM_SIZE,
[SDE_DSPP_GAMUT] = GAMUT_LUT_MEM_SIZE,
[SDE_DSPP_GC] = GC_LUT_MEM_SIZE,
+ [SDE_DSPP_IGC] = IGC_LUT_MEM_SIZE,
+ [SDE_DSPP_PCC] = PCC_MEM_SIZE,
};
static u32 dspp_mapping[DSPP_MAX] = {
@@ -233,6 +243,7 @@
int rc = -ENOTSUPP;
struct sde_hw_reg_dma_ops *dma_ops;
bool is_supported = false;
+ u32 blk;
if (feature >= SDE_DSPP_MAX || idx >= DSPP_MAX) {
DRM_ERROR("invalid feature %x max %x dspp idx %x max %xd\n",
@@ -250,8 +261,8 @@
if (IS_ERR_OR_NULL(dma_ops))
return -ENOTSUPP;
- rc = dma_ops->check_support(feature_map[feature], dspp_mapping[idx],
- &is_supported);
+ blk = (feature_map[feature] == IGC) ? DSPP_IGC : dspp_mapping[idx];
+ rc = dma_ops->check_support(feature_map[feature], blk, &is_supported);
if (!rc)
rc = (is_supported) ? 0 : -ENOTSUPP;
@@ -379,7 +390,7 @@
{
struct sde_reg_dma_kickoff_cfg kick_off;
struct sde_hw_cp_cfg *hw_cfg = cfg;
- u32 op_mode;
+ u32 op_mode = 0;
struct sde_hw_reg_dma_ops *dma_ops;
struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
int rc;
@@ -408,7 +419,6 @@
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
- kick_off.last_command = hw_cfg->last_feature;
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -505,7 +515,6 @@
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
- kick_off.last_command = hw_cfg->last_feature;
rc = dma_ops->kick_off(&kick_off);
if (rc)
DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -521,7 +530,7 @@
int rc, i = 0;
u32 reg;
- rc = reg_dma_dspp_check(ctx, cfg, GAMUT);
+ rc = reg_dma_dspp_check(ctx, cfg, GC);
if (rc)
return;
@@ -598,7 +607,6 @@
REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GC][ctx->idx],
REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
- kick_off.last_command = hw_cfg->last_feature;
rc = dma_ops->kick_off(&kick_off);
if (rc) {
DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -606,6 +614,298 @@
}
}
+static void _dspp_igcv31_off(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc;
+ u32 reg;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], IGC,
+ dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ reg = IGC_DIS;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_igc_lut *lut_cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc, i = 0, j = 0;
+ u32 *addr = NULL;
+ u32 offset = 0;
+ u32 reg;
+
+ rc = reg_dma_dspp_check(ctx, cfg, IGC);
+ if (rc)
+ return;
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable igc feature\n");
+ _dspp_igcv31_off(ctx, cfg);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_igc_lut)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_igc_lut));
+ return;
+ }
+
+ lut_cfg = hw_cfg->payload;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, DSPP_IGC, IGC, dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < IGC_TBL_NUM; i++) {
+ addr = lut_cfg->c0 + (i * ARRAY_SIZE(lut_cfg->c0));
+ offset = IGC_C0_OFF + (i * sizeof(u32));
+
+ for (j = 0; j < IGC_TBL_LEN; j++) {
+ addr[j] &= IGC_DATA_MASK;
+ addr[j] |= IGC_DSPP_SEL_MASK(ctx->idx - 1);
+ if (j == 0)
+ addr[j] |= IGC_INDEX_UPDATE;
+ }
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, offset, addr,
+ IGC_TBL_LEN * sizeof(u32),
+ REG_BLK_WRITE_INC, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("lut write failed ret %d\n", rc);
+ return;
+ }
+ }
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], IGC,
+ dspp_buf[IGC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ if (lut_cfg->flags & IGC_DITHER_ENABLE) {
+ reg = lut_cfg->strength & IGC_DITHER_DATA_MASK;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_DITHER_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("dither strength failed ret %d\n", rc);
+ return;
+ }
+ }
+
+ reg = IGC_EN;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->igc.base + IGC_OPMODE_OFF,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+static void _dspp_pccv4_off(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ int rc;
+ u32 reg;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], PCC,
+ dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ reg = PCC_DIS;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ return;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_reg_dma_ops *dma_ops;
+ struct sde_reg_dma_kickoff_cfg kick_off;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+ struct drm_msm_pcc *pcc_cfg;
+ struct drm_msm_pcc_coeff *coeffs = NULL;
+ u32 *data = NULL;
+ int rc, i = 0;
+ u32 reg = 0;
+
+ rc = reg_dma_dspp_check(ctx, cfg, PCC);
+ if (rc)
+ return;
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ _dspp_pccv4_off(ctx, cfg);
+ return;
+ }
+
+ if (hw_cfg->len != sizeof(struct drm_msm_pcc)) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_pcc));
+ return;
+ }
+
+ pcc_cfg = hw_cfg->payload;
+
+ dma_ops = sde_reg_dma_get_ops();
+ dma_ops->reset_reg_dma_buf(dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx],
+ PCC, dspp_buf[PCC][ctx->idx]);
+
+ REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write decode select failed ret %d\n", rc);
+ return;
+ }
+
+ data = kzalloc(PCC_LEN, GFP_KERNEL);
+ if (!data)
+ return;
+
+ for (i = 0; i < PCC_NUM_PLANES; i++) {
+ switch (i) {
+ case 0:
+ coeffs = &pcc_cfg->r;
+ data[i + 24] = pcc_cfg->r_rr;
+ data[i + 27] = pcc_cfg->r_gg;
+ data[i + 30] = pcc_cfg->r_bb;
+ break;
+ case 1:
+ coeffs = &pcc_cfg->g;
+ data[i + 24] = pcc_cfg->g_rr;
+ data[i + 27] = pcc_cfg->g_gg;
+ data[i + 30] = pcc_cfg->g_bb;
+ break;
+ case 2:
+ coeffs = &pcc_cfg->b;
+ data[i + 24] = pcc_cfg->b_rr;
+ data[i + 27] = pcc_cfg->b_gg;
+ data[i + 30] = pcc_cfg->b_bb;
+ break;
+ default:
+ DRM_ERROR("invalid pcc plane: %d\n", i);
+ goto exit;
+ }
+
+ data[i] = coeffs->c;
+ data[i + 3] = coeffs->r;
+ data[i + 6] = coeffs->g;
+ data[i + 9] = coeffs->b;
+ data[i + 12] = coeffs->rg;
+ data[i + 15] = coeffs->rb;
+ data[i + 18] = coeffs->gb;
+ data[i + 21] = coeffs->rgb;
+ }
+
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base + PCC_C_OFF,
+ data, PCC_LEN,
+ REG_BLK_WRITE_SINGLE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("write pcc lut failed ret %d\n", rc);
+ goto exit;
+ }
+
+ reg = PCC_EN;
+ REG_DMA_SETUP_OPS(dma_write_cfg,
+ ctx->cap->sblk->pcc.base,
+ ®, sizeof(reg), REG_SINGLE_WRITE, 0, 0);
+ rc = dma_ops->setup_payload(&dma_write_cfg);
+ if (rc) {
+ DRM_ERROR("setting opcode failed ret %d\n", rc);
+ goto exit;
+ }
+
+ REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
+ REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+ rc = dma_ops->kick_off(&kick_off);
+ if (rc)
+ DRM_ERROR("failed to kick off ret %d\n", rc);
+
+exit:
+ kfree(data);
+}
+
int reg_dmav1_deinit_dspp_ops(enum sde_dspp idx)
{
int i;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
index 94e1a5c..bb72c8f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.h
@@ -46,13 +46,27 @@
void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg);
/**
- * reg_dmav1_setup_dspp_gc_v18() - gc v18 implementation using reg dma v1.
+ * reg_dmav1_setup_dspp_gcv18() - gc v18 implementation using reg dma v1.
* @ctx: dspp ctx info
* @cfg: pointer to struct sde_hw_cp_cfg
*/
void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg);
/**
+ * reg_dmav1_setup_dspp_igcv31() - igc v31 implementation using reg dma v1.
+ * @ctx: dspp ctx info
+ * @cfg: pointer to struct sde_hw_cp_cfg
+ */
+void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * reg_dmav1_setup_dspp_pccv4() - pcc v4 implementation using reg dma v1.
+ * @ctx: dspp ctx info
+ * @cfg: pointer to struct sde_hw_cp_cfg
+ */
+void reg_dmav1_setup_dspp_pccv4(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
* reg_dmav1_deinit_dspp_ops() - deinitialize the dspp feature op for sde v4
* which were initialized.
* @idx: dspp idx
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 9fd5992..d8cd75a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -257,7 +257,10 @@
} else {
mode_mask = SDE_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
mode_mask |= index;
- mode_mask |= (mode == SDE_SSPP_MULTIRECT_TIME_MX) ? 0x4 : 0x0;
+ if (mode == SDE_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
}
SDE_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
@@ -313,6 +316,7 @@
u32 chroma_samp, unpack, src_format;
u32 secure = 0, secure_bit_mask;
u32 opmode = 0;
+ u32 fast_clear = 0;
u32 op_mode_off, unpack_pat_off, format_off;
u32 idx;
@@ -382,10 +386,12 @@
SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
SDE_FETCH_CONFIG_RESET_VALUE |
ctx->mdp->highest_bank_bit << 18);
- if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
- BIT(31) | (ctx->mdp->ubwc_swizzle) |
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
(ctx->mdp->highest_bank_bit << 4));
+ }
}
opmode |= MDSS_MDP_OP_PE_OVERRIDE;
@@ -765,6 +771,16 @@
SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
}
+static u32 _sde_hw_sspp_get_scaler3_ver(struct sde_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return SDE_REG_READ(&ctx->hw, QSEED3_HW_VERSION + idx);
+}
+
/**
* sde_hw_sspp_setup_rects()
*/
@@ -1164,8 +1180,10 @@
if (sde_hw_sspp_multirect_enabled(c->cap))
c->ops.setup_multirect = sde_hw_sspp_setup_multirect;
- if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+ if (test_bit(SDE_SSPP_SCALER_QSEED3, &features)) {
c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _sde_hw_sspp_get_scaler3_ver;
+ }
if (test_bit(SDE_SSPP_HSIC, &features)) {
/* TODO: add version based assignment here as inline or macro */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 8d14715..c19eb5c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -575,6 +575,12 @@
void *scaler_cfg);
/**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct sde_hw_pipe *ctx);
+
+ /**
* setup_sys_cache - setup system cache configuration
* @ctx: Pointer to pipe context
* @cfg: Pointer to system cache configuration
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b773187..613ac53 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -38,6 +38,18 @@
#define MDP_WD_TIMER_0_CTL 0x380
#define MDP_WD_TIMER_0_CTL2 0x384
#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define MDP_TICK_COUNT 16
#define XO_CLK_RATE 19200
@@ -204,38 +216,74 @@
status->wb[WB_3] = 0;
}
-static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
- struct sde_watchdog_te_status *cfg, bool watchdog_te)
+static void sde_hw_setup_vsync_source(struct sde_hw_mdp *mdp,
+ struct sde_vsync_source_cfg *cfg)
{
- struct sde_hw_blk_reg_map *c = &mdp->hw;
- u32 reg = 0;
- int i = 0;
- u32 pp_offset[] = {0xC, 0x8, 0x4, 0x13};
+ struct sde_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
- if (!mdp)
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
return;
+ c = &mdp->hw;
reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
for (i = 0; i < cfg->pp_count; i++) {
int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
- if (watchdog_te)
- reg |= 0xF << pp_offset[pp_idx];
- else
- reg &= ~(0xF << pp_offset[pp_idx]);
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
}
-
SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
- if (watchdog_te) {
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_LOAD_VALUE,
+ if (cfg->vsync_source >= SDE_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= SDE_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case SDE_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case SDE_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ if (cfg->is_dummy) {
+ SDE_REG_WRITE(c, wd_ctl2, 0x0);
+ } else {
+ SDE_REG_WRITE(c, wd_load_value,
CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL, BIT(0)); /* clear timer */
- reg = SDE_REG_READ(c, MDP_WD_TIMER_0_CTL2);
- reg |= BIT(8); /* enable heartbeat timer */
- reg |= BIT(0); /* enable WD timer */
- SDE_REG_WRITE(c, MDP_WD_TIMER_0_CTL2, reg);
+ SDE_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = SDE_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ SDE_REG_WRITE(c, wd_ctl2, reg);
+ }
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
}
}
@@ -308,7 +356,7 @@
ops->setup_cdm_output = sde_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
ops->get_danger_status = sde_hw_get_danger_status;
- ops->setup_vsync_sel = sde_hw_setup_vsync_sel;
+ ops->setup_vsync_source = sde_hw_setup_vsync_source;
ops->get_safe_status = sde_hw_get_safe_status;
ops->setup_dce = sde_hw_setup_dce;
ops->reset_ubwc = sde_hw_reset_ubwc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 573780e..86c4219 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -78,15 +78,21 @@
};
/**
- * struct sde_watchdog_te_status - configure watchdog timer to generate TE
+ * struct sde_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
* @pp_count: number of ping pongs active
* @frame_rate: Display frame rate
* @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ * @is_dummy: a dummy source of vsync selection. It must not be selected for
+ * any case other than sde rsc idle request.
*/
-struct sde_watchdog_te_status {
+struct sde_vsync_source_cfg {
u32 pp_count;
u32 frame_rate;
u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+ bool is_dummy;
};
/**
@@ -155,13 +161,12 @@
struct sde_danger_safe_status *status);
/**
- * setup_vsync_sel - get vsync configuration details
+ * setup_vsync_source - setup vsync source configuration details
* @mdp: mdp top context driver
- * @cfg: watchdog timer configuration
- * @watchdog_te: watchdog timer enable
+ * @cfg: vsync source selection configuration
*/
- void (*setup_vsync_sel)(struct sde_hw_mdp *mdp,
- struct sde_watchdog_te_status *cfg, bool watchdog_te);
+ void (*setup_vsync_source)(struct sde_hw_mdp *mdp,
+ struct sde_vsync_source_cfg *cfg);
/**
* get_safe_status - get safe status
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index 8f469b2..aa3d5b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include "sde_hw_mdss.h"
+#define REG_MASK(n) ((BIT(n)) - 1)
struct sde_format_extended;
/*
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index b5c273a..9e6a246 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -22,7 +22,7 @@
#define VBIF_QOS_REMAP_01 0x0024
#define VBIF_QOS_REMAP_10 0x0028
#define VBIF_QOS_REMAP_11 0x002C
-#define VBIF_WRITE_GATHTER_EN 0x00AC
+#define VBIF_WRITE_GATHER_EN 0x00AC
#define VBIF_IN_RD_LIM_CONF0 0x00B0
#define VBIF_IN_RD_LIM_CONF1 0x00B4
#define VBIF_IN_RD_LIM_CONF2 0x00B8
@@ -167,6 +167,21 @@
SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
}
+static void sde_hw_set_write_gather_en(struct sde_hw_vbif *vbif, u32 xin_id)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = SDE_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ SDE_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
unsigned long cap)
{
@@ -177,6 +192,7 @@
if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
ops->set_qos_remap = sde_hw_set_qos_remap;
ops->set_mem_type = sde_hw_set_mem_type;
+ ops->set_write_gather_en = sde_hw_set_write_gather_en;
}
static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index 80a9e5a..81cb9d6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -80,6 +80,13 @@
*/
void (*set_mem_type)(struct sde_hw_vbif *vbif,
u32 xin_id, u32 value);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct sde_hw_vbif *vbif, u32 xin_id);
};
struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 26125d8..8dd6448 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -393,9 +393,6 @@
{
struct sde_kms *sde_kms;
struct msm_drm_private *priv;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
- int i;
if (!kms || !old_state)
return;
@@ -405,11 +402,9 @@
return;
priv = sde_kms->dev->dev_private;
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
- sde_crtc_complete_commit(crtc, old_crtc_state);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
- SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+ SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
}
static void sde_kms_wait_for_tx_complete(struct msm_kms *kms,
@@ -459,7 +454,7 @@
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev;
int ret;
if (!kms || !crtc || !crtc->state) {
@@ -467,6 +462,8 @@
return;
}
+ dev = crtc->dev;
+
if (!crtc->state->enable) {
SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
return;
@@ -517,8 +514,10 @@
}
/* old_state actually contains updated crtc pointers */
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
- sde_crtc_prepare_commit(crtc, old_crtc_state);
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active)
+ sde_crtc_prepare_commit(crtc, old_crtc_state);
+ }
}
/**
@@ -636,13 +635,16 @@
.post_init = dsi_conn_post_init,
.detect = dsi_conn_detect,
.get_modes = dsi_connector_get_modes,
+ .put_modes = dsi_connector_put_modes,
.mode_valid = dsi_conn_mode_valid,
.get_info = dsi_display_get_info,
.set_backlight = dsi_display_set_backlight,
.soft_reset = dsi_display_soft_reset,
.pre_kickoff = dsi_conn_pre_kickoff,
.clk_ctrl = dsi_display_clk_ctrl,
- .get_topology = dsi_conn_get_topology
+ .set_power = dsi_display_set_power,
+ .get_mode_info = dsi_conn_get_mode_info,
+ .get_dst_format = dsi_display_get_dst_format,
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
@@ -651,7 +653,8 @@
.set_property = sde_wb_connector_set_property,
.get_info = sde_wb_get_info,
.soft_reset = NULL,
- .get_topology = sde_wb_get_topology
+ .get_mode_info = sde_wb_get_mode_info,
+ .get_dst_format = NULL
};
static const struct sde_connector_ops dp_ops = {
.post_init = dp_connector_post_init,
@@ -659,7 +662,7 @@
.get_modes = dp_connector_get_modes,
.mode_valid = dp_connector_mode_valid,
.get_info = dp_connector_get_info,
- .get_topology = dp_connector_get_topology,
+ .get_mode_info = dp_connector_get_mode_info,
};
struct msm_display_info info;
struct drm_encoder *encoder;
@@ -1134,7 +1137,7 @@
}
ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
- &fbo->layout);
+ &fbo->layout, fbo->layout.plane_pitch);
if (ret) {
SDE_ERROR("failed to get plane sizes\n");
goto done;
@@ -1336,6 +1339,72 @@
if (sde_kms->mmio)
msm_iounmap(pdev, sde_kms->mmio);
sde_kms->mmio = NULL;
+
+ sde_reg_dma_deinit();
+}
+
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
+{
+ int i;
+
+ if (!sde_kms)
+ return -EINVAL;
+
+ for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+ if (!aspace)
+ continue;
+
+ mmu = sde_kms->aspace[i]->mmu;
+
+ if (secure_only &&
+ !aspace->mmu->funcs->is_domain_secure(mmu))
+ continue;
+
+ /* cleanup aspace before detaching */
+ msm_gem_aspace_domain_attach_detach_update(aspace, true);
+
+ SDE_DEBUG("Detaching domain:%d\n", i);
+ aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+
+ aspace->domain_attached = false;
+ }
+
+ return 0;
+}
+
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
+{
+ int i;
+
+ if (!sde_kms)
+ return -EINVAL;
+
+ for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+ if (!aspace)
+ continue;
+
+ mmu = sde_kms->aspace[i]->mmu;
+
+ if (secure_only &&
+ !aspace->mmu->funcs->is_domain_secure(mmu))
+ continue;
+
+ SDE_DEBUG("Attaching domain:%d\n", i);
+ aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+
+ msm_gem_aspace_domain_attach_detach_update(aspace, false);
+ aspace->domain_attached = true;
+ }
+
+ return 0;
}
static void sde_kms_destroy(struct msm_kms *kms)
@@ -1370,6 +1439,128 @@
sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
+static int sde_kms_check_secure_transition(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
+ struct drm_crtc_state *crtc_state;
+ int secure_crtc_cnt = 0, active_crtc_cnt = 0;
+ int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
+ int i;
+
+ if (!kms || !state) {
+ return -EINVAL;
+ SDE_ERROR("invalid arguments\n");
+ }
+
+ /* iterate state object for active and secure crtc */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->active)
+ continue;
+ active_crtc_cnt++;
+ if (sde_crtc_get_secure_level(crtc, crtc_state) ==
+ SDE_DRM_SEC_ONLY) {
+ sec_crtc = crtc;
+ secure_crtc_cnt++;
+ }
+ }
+
+ /* bail out from further validation if no secure ctrc */
+ if (!secure_crtc_cnt)
+ return 0;
+
+ if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
+ (secure_crtc_cnt &&
+ (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
+ SDE_ERROR("Secure check failed active:%d, secure:%d\n",
+ active_crtc_cnt, secure_crtc_cnt);
+ return -EPERM;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ /* iterate global list for active and secure crtc */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->state->active)
+ continue;
+
+ active_mode_crtc_cnt++;
+
+ if (sde_crtc_get_secure_level(crtc, crtc->state) ==
+ SDE_DRM_SEC_ONLY) {
+ secure_global_crtc_cnt++;
+ temp_crtc = crtc;
+ }
+ }
+
+ /**
+ * if more than one crtc is active fail
+ * check if the previous and current commit secure
+ * are same
+ */
+ if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
+ (secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
+ SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
+ active_mode_crtc_cnt, temp_crtc->base.id);
+
+ return 0;
+}
+
+static int sde_kms_atomic_check(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !state)
+ return -EINVAL;
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+ /*
+ * Check if any secure transition(moving CRTC between secure and
+ * non-secure state and vice-versa) is allowed or not. when moving
+ * to secure state, planes with fb_mode set to dir_translated only can
+ * be staged on the CRTC, and only one CRTC can be active during
+ * Secure state
+ */
+ return sde_kms_check_secure_transition(kms, state);
+}
+
+static struct msm_gem_address_space*
+_sde_kms_get_address_space(struct msm_kms *kms,
+ unsigned int domain)
+{
+ struct sde_kms *sde_kms;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return NULL;
+ }
+
+ if (domain >= MSM_SMMU_DOMAIN_MAX)
+ return NULL;
+
+ return (sde_kms->aspace[domain] &&
+ sde_kms->aspace[domain]->domain_attached) ?
+ sde_kms->aspace[domain] : NULL;
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = sde_kms_hw_init,
.postinit = sde_kms_postinit,
@@ -1387,10 +1578,12 @@
.enable_vblank = sde_kms_enable_vblank,
.disable_vblank = sde_kms_disable_vblank,
.check_modified_format = sde_format_check_modified_format,
+ .atomic_check = sde_kms_atomic_check,
.get_format = sde_get_msm_format,
.round_pixclk = sde_kms_round_pixclk,
.destroy = sde_kms_destroy,
.register_events = _sde_kms_register_events,
+ .get_address_space = _sde_kms_get_address_space,
};
/* the caller api needs to turn on clock before calling it */
@@ -1404,17 +1597,17 @@
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- if (!sde_kms->mmu[i])
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
- mmu = sde_kms->mmu[i];
- msm_unregister_mmu(sde_kms->dev, mmu);
+ mmu = sde_kms->aspace[i]->mmu;
+
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- sde_kms->mmu[i] = 0;
- sde_kms->mmu_id[i] = 0;
+ msm_gem_address_space_destroy(sde_kms->aspace[i]);
+
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -1426,6 +1619,8 @@
int i, ret;
for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_gem_address_space *aspace;
+
mmu = msm_smmu_new(sde_kms->dev->dev, i);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
@@ -1434,25 +1629,24 @@
continue;
}
+ aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
+ mmu, "sde");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+
+ sde_kms->aspace[i] = aspace;
+
ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
- mmu->funcs->destroy(mmu);
- continue;
- }
-
- sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
goto fail;
}
-
- sde_kms->mmu[i] = mmu;
+ aspace->domain_attached = true;
}
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 5894fe2..4c0699e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -24,6 +24,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
+#include "msm_gem.h"
#include "sde_dbg.h"
#include "sde_hw_catalog.h"
#include "sde_hw_ctl.h"
@@ -92,6 +93,15 @@
/* timeout in frames waiting for frame done */
#define SDE_FRAME_DONE_TIMEOUT 60
+/* max active secure client counts allowed */
+#define MAX_ALLOWED_SECURE_CLIENT_CNT 1
+
+/* max active crtc when secure client is active */
+#define MAX_ALLOWED_CRTC_CNT_DURING_SECURE 1
+
+/* max virtual encoders per secure crtc */
+#define MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC 1
+
/*
* struct sde_irq_callback - IRQ callback handlers
* @list: list to callback
@@ -158,8 +168,7 @@
int core_rev;
struct sde_mdss_cfg *catalog;
- struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
struct ion_client *iclient;
@@ -500,4 +509,13 @@
*/
void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo);
+/**
+ * smmu attach/detach functions
+ * @sde_kms: poiner to sde_kms structure
+ * @secure_only: if true only secure contexts are attached/detached, else
+ * all contexts are attached/detached/
+ */
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
+
#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 2a98af4..1affa9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -94,27 +94,9 @@
SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
};
-/**
- * enum sde_plane_sclcheck_state - User scaler data status
- *
- * @SDE_PLANE_SCLCHECK_NONE: No user data provided
- * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided
- * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data
- * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data
- * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data
- * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data
- */
-enum sde_plane_sclcheck_state {
- SDE_PLANE_SCLCHECK_NONE,
- SDE_PLANE_SCLCHECK_INVALID,
- SDE_PLANE_SCLCHECK_SCALER_V1,
- SDE_PLANE_SCLCHECK_SCALER_V1_CHECK,
- SDE_PLANE_SCLCHECK_SCALER_V2,
- SDE_PLANE_SCLCHECK_SCALER_V2_CHECK,
-};
-
/*
* struct sde_plane - local sde plane structure
+ * @aspace: address space pointer
* @csc_cfg: Decoded user configuration for csc
* @csc_usr_ptr: Points to csc_cfg if valid user config available
* @csc_ptr: Points to sde_csc_cfg structure to use for current
@@ -123,14 +105,11 @@
* @sbuf_mode: force stream buffer mode if set
* @sbuf_writeback: force stream buffer writeback if set
* @revalidate: force revalidation of all the plane properties
- * @scaler_check_state: Indicates status of user provided pixle extension data
* @blob_rot_caps: Pointer to rotator capability blob
*/
struct sde_plane {
struct drm_plane base;
- int mmu_id;
-
struct mutex lock;
enum sde_sspp pipe;
@@ -141,7 +120,6 @@
struct sde_hw_pipe *pipe_hw;
struct sde_hw_pipe_cfg pipe_cfg;
struct sde_hw_sharp_cfg sharp_cfg;
- struct sde_hw_scaler3_cfg *scaler3_cfg;
struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
uint32_t color_fill;
bool is_error;
@@ -153,9 +131,6 @@
u32 sbuf_writeback;
bool revalidate;
- struct sde_hw_pixel_ext pixel_ext;
- enum sde_plane_sclcheck_state scaler_check_state;
-
struct sde_csc_cfg csc_cfg;
struct sde_csc_cfg *csc_usr_ptr;
struct sde_csc_cfg *csc_ptr;
@@ -283,16 +258,16 @@
((src_width + 32) * fmt->bpp);
} else {
/* non NV12 */
- total_fl = (fixed_buff_size / 2 - hflip_bytes) /
- ((src_width + 32) * fmt->bpp * 2);
+ total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+ ((src_width + 32) * fmt->bpp);
}
} else {
if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
- total_fl = (fixed_buff_size / 2 - hflip_bytes) /
- ((src_width + 32) * fmt->bpp * 2);
+ total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+ ((src_width + 32) * fmt->bpp);
} else {
- total_fl = (fixed_buff_size - hflip_bytes) /
- ((src_width + 32) * fmt->bpp * 2);
+ total_fl = (fixed_buff_size - hflip_bytes) * 2 /
+ ((src_width + 32) * fmt->bpp);
}
}
@@ -367,7 +342,7 @@
total_fl = _sde_plane_calc_fill_level(plane, fmt,
psde->pipe_cfg.src_rect.w);
- if (SDE_FORMAT_IS_LINEAR(fmt))
+ if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
lut_usage = SDE_QOS_LUT_USAGE_LINEAR;
else
lut_usage = SDE_QOS_LUT_USAGE_MACROTILE;
@@ -428,7 +403,7 @@
fb->modifier,
drm_format_num_planes(fb->pixel_format));
- if (SDE_FORMAT_IS_LINEAR(fmt)) {
+ if (fmt && SDE_FORMAT_IS_LINEAR(fmt)) {
danger_lut = psde->catalog->perf.danger_lut_tbl
[SDE_QOS_LUT_USAGE_LINEAR];
safe_lut = psde->catalog->perf.safe_lut_tbl
@@ -700,7 +675,7 @@
SDE_DEBUG("plane%d size:%llu time:%llu\n",
plane->base.id, cfg.size, cfg.time);
- SDE_EVT32(DRMID(plane), cfg.size, cfg.time);
+ SDE_EVT32_VERBOSE(DRMID(plane), cfg.size, cfg.time);
psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
pstate->multirect_index);
}
@@ -867,12 +842,62 @@
return ret;
}
+/**
+ * _sde_plane_get_aspace: gets the address space based on the
+ * fb_translation mode property
+ */
+static int _sde_plane_get_aspace(
+ struct sde_plane *psde,
+ struct sde_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct sde_kms *kms;
+ int mode;
+
+ if (!psde || !pstate || !aspace) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _sde_plane_get_kms(&psde->base);
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+
+ switch (mode) {
+ case SDE_DRM_FB_NON_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC_DIR_TRANS:
+ case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+ *aspace = NULL;
+ break;
+ default:
+ SDE_ERROR("invalid fb_translation mode:%d\n", mode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static inline void _sde_plane_set_scanout(struct drm_plane *plane,
struct sde_plane_state *pstate,
struct sde_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct sde_plane *psde;
+ struct msm_gem_address_space *aspace = NULL;
int ret;
if (!plane || !pstate || !pipe_cfg || !fb) {
@@ -888,13 +913,19 @@
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+ if (ret) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
else if (psde->pipe_hw->ops.setup_sourceaddress) {
- SDE_EVT32(psde->pipe_hw->idx,
+ SDE_EVT32_VERBOSE(psde->pipe_hw->idx,
pipe_cfg->layout.width,
pipe_cfg->layout.height,
pipe_cfg->layout.plane_addr[0],
@@ -917,28 +948,24 @@
struct sde_hw_scaler3_cfg *cfg;
int ret = 0;
- if (!psde || !psde->scaler3_cfg) {
+ if (!psde || !pstate) {
SDE_ERROR("invalid args\n");
return -EINVAL;
- } else if (!pstate) {
- /* pstate is expected to be null on forced color fill */
- SDE_DEBUG("null pstate\n");
- return -EINVAL;
}
- cfg = psde->scaler3_cfg;
+ cfg = &pstate->scaler3_cfg;
cfg->dir_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->dir_len,
+ &pstate->property_state, &cfg->dir_len,
PLANE_PROP_SCALER_LUT_ED);
cfg->cir_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->cir_len,
+ &pstate->property_state, &cfg->cir_len,
PLANE_PROP_SCALER_LUT_CIR);
cfg->sep_lut = msm_property_get_blob(
&psde->property_info,
- pstate->property_blobs, &cfg->sep_len,
+ &pstate->property_state, &cfg->sep_len,
PLANE_PROP_SCALER_LUT_SEP);
if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
ret = -ENODATA;
@@ -946,6 +973,7 @@
}
static void _sde_plane_setup_scaler3(struct sde_plane *psde,
+ struct sde_plane_state *pstate,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct sde_hw_scaler3_cfg *scale_cfg,
const struct sde_format *fmt,
@@ -953,16 +981,17 @@
{
uint32_t decimated, i;
- if (!psde || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ if (!psde || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
!chroma_subsmpl_v) {
- SDE_ERROR("psde %pK scale_cfg %pK fmt %pK smp_h %d smp_v %d\n"
- , psde, scale_cfg, fmt, chroma_subsmpl_h,
+ SDE_ERROR(
+ "psde %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!psde, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
chroma_subsmpl_v);
return;
}
memset(scale_cfg, 0, sizeof(*scale_cfg));
- memset(&psde->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
+ memset(&pstate->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
decimated = DECIMATED_DIMENSION(src_w,
psde->pipe_cfg.horz_decimation);
@@ -1000,9 +1029,9 @@
}
scale_cfg->preload_x[i] = SDE_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = SDE_QSEED3_DEFAULT_PRELOAD_V;
- psde->pixel_ext.num_ext_pxls_top[i] =
+ pstate->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
- psde->pixel_ext.num_ext_pxls_left[i] =
+ pstate->pixel_ext.num_ext_pxls_left[i] =
scale_cfg->src_width[i];
}
if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
@@ -1247,7 +1276,7 @@
if (psde->pipe_hw->ops.setup_pa_memcolor) {
/* Skin memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_SKIN_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1255,7 +1284,7 @@
/* Sky memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_SKY_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1263,7 +1292,7 @@
/* Foliage memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
- pstate->property_blobs,
+ &pstate->property_state,
&memcol_sz,
PLANE_PROP_FOLIAGE_COLOR);
psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1272,19 +1301,19 @@
}
static void _sde_plane_setup_scaler(struct sde_plane *psde,
- const struct sde_format *fmt,
- struct sde_plane_state *pstate)
+ struct sde_plane_state *pstate,
+ const struct sde_format *fmt, bool color_fill)
{
struct sde_hw_pixel_ext *pe;
uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
- if (!psde || !fmt) {
+ if (!psde || !fmt || !pstate) {
SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
psde != 0, fmt != 0, pstate != 0);
return;
}
- pe = &(psde->pixel_ext);
+ pe = &pstate->pixel_ext;
psde->pipe_cfg.horz_decimation =
sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
@@ -1299,23 +1328,25 @@
/* update scaler */
if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
- int error;
+ int rc;
- error = _sde_plane_setup_scaler3_lut(psde, pstate);
- if (error || psde->debugfs_default_scale ||
- psde->scaler_check_state !=
- SDE_PLANE_SCLCHECK_SCALER_V2) {
+ if (!color_fill && !psde->debugfs_default_scale)
+ rc = _sde_plane_setup_scaler3_lut(psde, pstate);
+ else
+ rc = -EINVAL;
+ if (rc || pstate->scaler_check_state !=
+ SDE_PLANE_SCLCHECK_SCALER_V2) {
/* calculate default config for QSEED3 */
- _sde_plane_setup_scaler3(psde,
+ _sde_plane_setup_scaler3(psde, pstate,
psde->pipe_cfg.src_rect.w,
psde->pipe_cfg.src_rect.h,
psde->pipe_cfg.dst_rect.w,
psde->pipe_cfg.dst_rect.h,
- psde->scaler3_cfg, fmt,
+ &pstate->scaler3_cfg, fmt,
chroma_subsmpl_h, chroma_subsmpl_v);
}
- } else if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 ||
- !pstate || psde->debugfs_default_scale) {
+ } else if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 ||
+ color_fill || psde->debugfs_default_scale) {
uint32_t deci_dim, i;
/* calculate default configuration for QSEED2 */
@@ -1391,9 +1422,9 @@
{
const struct sde_format *fmt;
const struct drm_plane *plane;
- const struct sde_plane_state *pstate;
+ struct sde_plane_state *pstate;
- if (!psde) {
+ if (!psde || !psde->base.state) {
SDE_ERROR("invalid plane\n");
return -EINVAL;
}
@@ -1425,7 +1456,7 @@
psde->pipe_cfg.src_rect.y = 0;
psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
- _sde_plane_setup_scaler(psde, fmt, 0);
+ _sde_plane_setup_scaler(psde, pstate, fmt, true);
if (psde->pipe_hw->ops.setup_format)
psde->pipe_hw->ops.setup_format(psde->pipe_hw,
@@ -1439,7 +1470,7 @@
if (psde->pipe_hw->ops.setup_pe)
psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
- &psde->pixel_ext);
+ &pstate->pixel_ext);
}
return 0;
@@ -1722,6 +1753,15 @@
drm_rect_height(&rstate->out_rot_rect) >> 16,
rstate->out_rot_rect.x1 >> 16,
rstate->out_rot_rect.y1 >> 16);
+ SDE_EVT32_VERBOSE(DRMID(plane), rstate->sequence_id,
+ rstate->out_xpos, rstate->nplane,
+ in_rot->x1 >> 16, in_rot->y1 >> 16,
+ drm_rect_width(in_rot) >> 16,
+ drm_rect_height(in_rot) >> 16,
+ rstate->out_rot_rect.x1 >> 16,
+ rstate->out_rot_rect.y1 >> 16,
+ drm_rect_width(&rstate->out_rot_rect) >> 16,
+ drm_rect_height(&rstate->out_rot_rect) >> 16);
}
/**
@@ -1741,6 +1781,7 @@
struct drm_crtc_state *cstate;
struct sde_crtc_state *sde_cstate;
int ret, i;
+ int fb_mode;
if (!plane || !state || !state->fb || !rstate->rot_hw) {
SDE_ERROR("invalid parameters\n");
@@ -1764,7 +1805,14 @@
rot_cmd->rot90 = rstate->rot90;
rot_cmd->hflip = rstate->hflip;
rot_cmd->vflip = rstate->vflip;
- rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false;
+ fb_mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+ if ((fb_mode == SDE_DRM_FB_SEC) ||
+ (fb_mode == SDE_DRM_FB_SEC_DIR_TRANS))
+ rot_cmd->secure = true;
+ else
+ rot_cmd->secure = false;
+
rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
CRTC_PROP_ROT_PREFILL_BW);
rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
@@ -1801,7 +1849,7 @@
struct sde_hw_fmt_layout layout;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, state->fb,
+ sde_format_populate_layout(pstate->aspace, state->fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) {
rot_cmd->src_iova[i] = layout.plane_addr[i];
@@ -1810,7 +1858,7 @@
rot_cmd->src_planes = layout.num_planes;
memset(&layout, 0, sizeof(struct sde_hw_fmt_layout));
- sde_format_populate_layout(rstate->mmu_id, rstate->out_fb,
+ sde_format_populate_layout(pstate->aspace, rstate->out_fb,
&layout);
for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) {
rot_cmd->dst_iova[i] = layout.plane_addr[i];
@@ -1819,7 +1867,7 @@
rot_cmd->dst_planes = layout.num_planes;
/* VBIF remapper settings */
- for (i = 0; rstate->rot_hw->caps->xin_count; i++) {
+ for (i = 0; i < rstate->rot_hw->caps->xin_count; i++) {
const struct sde_rot_vbif_cfg *cfg =
&rstate->rot_hw->caps->vbif_cfg[i];
@@ -1912,7 +1960,7 @@
struct sde_kms_fbo *fbo;
struct drm_framebuffer *fb;
- if (!plane || !cstate || !rstate)
+ if (!plane || !cstate || !rstate || !rstate->rot_hw)
return;
fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
@@ -1975,27 +2023,6 @@
if (sde_plane_enabled(new_state) && !new_rstate->out_fb)
_sde_plane_rot_get_fb(plane, cstate, new_rstate);
- /* release buffer if output format configuration changes */
- if (new_rstate->out_fb &&
- ((new_rstate->out_fb_height != new_rstate->out_fb->height) ||
- (new_rstate->out_fb_width != new_rstate->out_fb->width) ||
- (new_rstate->out_fb_pixel_format !=
- new_rstate->out_fb->pixel_format) ||
- (new_rstate->out_fb_modifier[0] !=
- new_rstate->out_fb->modifier[0]) ||
- (new_rstate->out_fb_flags != new_rstate->out_fb->flags))) {
-
- SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
- new_rstate->sequence_id);
-
- sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
- (u64) &new_rstate->rot_hw->base);
- new_rstate->out_fb = NULL;
- sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
- (u64) &new_rstate->rot_hw->base);
- new_rstate->out_fbo = NULL;
- }
-
/* create new stream buffer if it is not available */
if (sde_plane_enabled(new_state) && !new_rstate->out_fb) {
u32 fb_w = drm_rect_width(&new_rstate->out_rot_rect) >> 16;
@@ -2004,11 +2031,6 @@
SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id,
new_rstate->sequence_id);
- if (new_state->fb->flags & DRM_MODE_FB_SECURE)
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE;
- else
- new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE;
-
/* check if out_fb is already attached to rotator */
new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h,
new_rstate->out_fb_pixel_format,
@@ -2035,6 +2057,8 @@
ret = -EINVAL;
goto error_create_fb;
}
+ SDE_EVT32_VERBOSE(DRMID(plane), new_rstate->sequence_id,
+ new_rstate->out_fb->base.id);
ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base,
@@ -2046,9 +2070,9 @@
}
/* prepare rotator input buffer */
- ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_state->fb, new_pstate->aspace);
if (ret) {
- SDE_ERROR("failed to prepare input framebuffer\n");
+ SDE_ERROR("failed to prepare input framebuffer, %d\n", ret);
goto error_prepare_input_buffer;
}
@@ -2058,9 +2082,10 @@
new_rstate->sequence_id);
ret = msm_framebuffer_prepare(new_rstate->out_fb,
- new_rstate->mmu_id);
+ new_pstate->aspace);
if (ret) {
- SDE_ERROR("failed to prepare inline framebuffer\n");
+ SDE_ERROR("failed to prepare inline framebuffer, %d\n",
+ ret);
goto error_prepare_output_buffer;
}
}
@@ -2068,7 +2093,7 @@
return 0;
error_prepare_output_buffer:
- msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
+ msm_framebuffer_cleanup(new_state->fb, new_pstate->aspace);
error_prepare_input_buffer:
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &new_rstate->rot_hw->base);
@@ -2124,7 +2149,7 @@
if (sde_plane_enabled(old_state)) {
if (old_rstate->out_fb) {
msm_framebuffer_cleanup(old_rstate->out_fb,
- old_rstate->mmu_id);
+ old_pstate->aspace);
sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
(u64) &old_rstate->rot_hw->base);
old_rstate->out_fb = NULL;
@@ -2133,7 +2158,7 @@
old_rstate->out_fbo = NULL;
}
- msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
}
}
@@ -2187,22 +2212,24 @@
rstate->out_sbuf = psde->sbuf_mode || rstate->rot90;
if (sde_plane_enabled(state) && rstate->out_sbuf) {
- SDE_DEBUG("plane%d.%d acquire rotator\n",
- plane->base.id, rstate->sequence_id);
+ SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id,
+ state->fb ? state->fb->base.id : -1);
hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
(u64) state->fb);
if (!hw_blk) {
- SDE_ERROR("plane%d no available rotator\n",
- plane->base.id);
+ SDE_ERROR("plane%d.%d no available rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id,
+ state->fb ? state->fb->base.id : -1);
return -EINVAL;
}
rstate->rot_hw = to_sde_hw_rot(hw_blk);
if (!rstate->rot_hw->ops.commit) {
- SDE_ERROR("plane%d invalid rotator ops\n",
- plane->base.id);
+ SDE_ERROR("plane%d.%d invalid rotator ops\n",
+ plane->base.id, rstate->sequence_id);
sde_crtc_res_put(cstate,
SDE_HW_BLK_ROT, (u64) state->fb);
rstate->rot_hw = NULL;
@@ -2216,19 +2243,44 @@
}
if (sde_plane_enabled(state) && rstate->out_sbuf && rstate->rot_hw) {
+ uint32_t fb_id;
- SDE_DEBUG("plane%d.%d use rotator\n",
- plane->base.id, rstate->sequence_id);
+ fb_id = state->fb ? state->fb->base.id : -1;
+ SDE_DEBUG("plane%d.%d use rotator, fb %d\n",
+ plane->base.id, rstate->sequence_id, fb_id);
sde_plane_rot_calc_cfg(plane, state);
- /* attempt to reuse stream buffer if already available */
- if (sde_plane_enabled(state))
- _sde_plane_rot_get_fb(plane, cstate, rstate);
-
ret = sde_plane_rot_submit_command(plane, state,
SDE_HW_ROT_CMD_VALIDATE);
+ if (ret)
+ return ret;
+ /* check if stream buffer is already attached to rotator */
+ _sde_plane_rot_get_fb(plane, cstate, rstate);
+
+ /* release buffer if output format configuration changes */
+ if (rstate->out_fb &&
+ ((rstate->out_fb_height != rstate->out_fb->height) ||
+ (rstate->out_fb_width != rstate->out_fb->width) ||
+ (rstate->out_fb_pixel_format !=
+ rstate->out_fb->pixel_format) ||
+ (rstate->out_fb_modifier[0] !=
+ rstate->out_fb->modifier[0]) ||
+ (rstate->out_fb_flags != rstate->out_fb->flags))) {
+
+ SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
+ rstate->sequence_id);
+ SDE_EVT32_VERBOSE(DRMID(plane),
+ rstate->sequence_id, fb_id);
+
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+ (u64) &rstate->rot_hw->base);
+ rstate->out_fb = NULL;
+ sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+ (u64) &rstate->rot_hw->base);
+ rstate->out_fbo = NULL;
+ }
} else {
SDE_DEBUG("plane%d.%d bypass rotator\n", plane->base.id,
@@ -2347,8 +2399,6 @@
{
struct sde_plane_state *pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *rstate = &pstate->rot;
- struct drm_crtc_state *cstate;
- int ret;
rstate->sequence_id++;
@@ -2356,19 +2406,7 @@
rstate->sequence_id,
!!rstate->out_sbuf, !!rstate->rot_hw);
- cstate = _sde_plane_get_crtc_state(new_state);
- if (IS_ERR(cstate)) {
- ret = PTR_ERR(cstate);
- SDE_ERROR("invalid crtc state %d\n", ret);
- return -EINVAL;
- }
-
- if (rstate->rot_hw && cstate)
- sde_crtc_res_get(cstate, SDE_HW_BLK_ROT, (u64) rstate->in_fb);
- else if (rstate->rot_hw && !cstate)
- SDE_ERROR("plane%d.%d zombie rotator hw\n",
- plane->base.id, rstate->sequence_id);
-
+ rstate->rot_hw = NULL;
rstate->out_fb = NULL;
rstate->out_fbo = NULL;
@@ -2640,8 +2678,10 @@
{
struct drm_framebuffer *fb = new_state->fb;
struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *pstate = to_sde_plane_state(new_state);
struct sde_plane_rot_state *new_rstate;
struct sde_hw_fmt_layout layout;
+ struct msm_gem_address_space *aspace;
int ret;
if (!new_state->fb)
@@ -2649,6 +2689,14 @@
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+ if (ret) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /*cache aspace */
+ pstate->aspace = aspace;
ret = sde_plane_rot_prepare_fb(plane, new_state);
if (ret) {
SDE_ERROR("failed to prepare rot framebuffer\n");
@@ -2657,14 +2705,14 @@
new_rstate = &to_sde_plane_state(new_state)->rot;
- ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id);
+ ret = msm_framebuffer_prepare(new_rstate->out_fb, pstate->aspace);
if (ret) {
SDE_ERROR("failed to prepare framebuffer\n");
return ret;
}
/* validate framebuffer layout before commit */
- ret = sde_format_populate_layout(new_rstate->mmu_id,
+ ret = sde_format_populate_layout(pstate->aspace,
new_rstate->out_fb, &layout);
if (ret) {
SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
@@ -2678,16 +2726,19 @@
struct drm_plane_state *old_state)
{
struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *old_pstate;
struct sde_plane_rot_state *old_rstate;
- if (!old_state->fb)
+ if (!old_state || !old_state->fb)
return;
+ old_pstate = to_sde_plane_state(old_state);
+
SDE_DEBUG_PLANE(psde, "FB[%u]\n", old_state->fb->base.id);
- old_rstate = &to_sde_plane_state(old_state)->rot;
+ old_rstate = &old_pstate->rot;
- msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id);
+ msm_framebuffer_cleanup(old_rstate->out_fb, old_pstate->aspace);
sde_plane_rot_cleanup_fb(plane, old_state);
}
@@ -2791,6 +2842,7 @@
}
static int _sde_plane_validate_scaler_v2(struct sde_plane *psde,
+ struct sde_plane_state *pstate,
const struct sde_format *fmt,
uint32_t img_w, uint32_t img_h,
uint32_t src_w, uint32_t src_h,
@@ -2798,17 +2850,16 @@
{
int i;
- if (!psde || !fmt) {
+ if (!psde || !pstate || !fmt) {
SDE_ERROR_PLANE(psde, "invalid arguments\n");
return -EINVAL;
}
/* don't run checks unless scaler data was changed */
- if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK ||
- !psde->scaler3_cfg)
+ if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK)
return 0;
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID;
for (i = 0; i < SDE_MAX_PLANES; i++) {
uint32_t hor_req_pixels, hor_fetch_pixels;
@@ -2838,17 +2889,17 @@
src_h_tmp >>= 1;
}
- hor_req_pixels = psde->pixel_ext.roi_w[i];
- vert_req_pixels = psde->pixel_ext.roi_h[i];
+ hor_req_pixels = pstate->pixel_ext.roi_w[i];
+ vert_req_pixels = pstate->pixel_ext.roi_h[i];
hor_fetch_pixels = DECIMATED_DIMENSION(src_w_tmp +
- (int8_t)(psde->pixel_ext.left_ftch[i] & 0xFF) +
- (int8_t)(psde->pixel_ext.right_ftch[i] & 0xFF),
- deci_w);
+ (int8_t)(pstate->pixel_ext.left_ftch[i] & 0xFF) +
+ (int8_t)(pstate->pixel_ext.right_ftch[i] & 0xFF),
+ deci_w);
vert_fetch_pixels = DECIMATED_DIMENSION(src_h_tmp +
- (int8_t)(psde->pixel_ext.top_ftch[i] & 0xFF) +
- (int8_t)(psde->pixel_ext.btm_ftch[i] & 0xFF),
- deci_h);
+ (int8_t)(pstate->pixel_ext.top_ftch[i] & 0xFF) +
+ (int8_t)(pstate->pixel_ext.btm_ftch[i] & 0xFF),
+ deci_h);
if ((hor_req_pixels != hor_fetch_pixels) ||
(hor_fetch_pixels > img_w) ||
@@ -2868,20 +2919,20 @@
* for Y and UV plane
*/
if (i != 3 &&
- (hor_req_pixels != psde->scaler3_cfg->src_width[i] ||
- vert_req_pixels != psde->scaler3_cfg->src_height[i])) {
+ (hor_req_pixels != pstate->scaler3_cfg.src_width[i] ||
+ vert_req_pixels != pstate->scaler3_cfg.src_height[i])) {
SDE_ERROR_PLANE(psde,
"roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n",
- i, psde->pixel_ext.roi_w[i],
- psde->pixel_ext.roi_h[i],
- psde->scaler3_cfg->src_width[i],
- psde->scaler3_cfg->src_height[i],
+ i, pstate->pixel_ext.roi_w[i],
+ pstate->pixel_ext.roi_h[i],
+ pstate->scaler3_cfg.src_width[i],
+ pstate->scaler3_cfg.src_height[i],
src_w, src_h);
return -EINVAL;
}
}
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
return 0;
}
@@ -3020,7 +3071,7 @@
"too much scaling requested %ux%u->%ux%u\n",
src_deci_w, src_deci_h, dst.w, dst.h);
ret = -E2BIG;
- } else if (_sde_plane_validate_scaler_v2(psde, fmt,
+ } else if (_sde_plane_validate_scaler_v2(psde, pstate, fmt,
rstate->out_fb_width,
rstate->out_fb_height,
src.w, src.h, deci_w, deci_h)) {
@@ -3134,7 +3185,7 @@
static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- uint32_t nplanes, src_flags;
+ uint32_t nplanes, src_flags = 0x0;
struct sde_plane *psde;
struct drm_plane_state *state;
struct sde_plane_state *pstate;
@@ -3147,6 +3198,7 @@
const struct sde_rect *crtc_roi;
bool q16_data = true;
int idx;
+ int mode;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -3198,7 +3250,8 @@
}
/* determine what needs to be refreshed */
- while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+ while ((idx = msm_property_pop_dirty(&psde->property_info,
+ &pstate->property_state)) >= 0) {
switch (idx) {
case PLANE_PROP_SCALER_V1:
case PLANE_PROP_SCALER_V2:
@@ -3228,6 +3281,9 @@
case PLANE_PROP_BLEND_OP:
/* no special action required */
break;
+ case PLANE_PROP_FB_TRANSLATION_MODE:
+ pstate->dirty |= SDE_PLANE_DIRTY_FB_TRANSLATION_MODE;
+ break;
case PLANE_PROP_PREFILL_SIZE:
case PLANE_PROP_PREFILL_TIME:
pstate->dirty |= SDE_PLANE_DIRTY_PERF;
@@ -3275,6 +3331,12 @@
psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT);
_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+ /* update secure session flag */
+ mode = sde_plane_get_property(pstate, PLANE_PROP_FB_TRANSLATION_MODE);
+ if ((mode == SDE_DRM_FB_SEC) ||
+ (mode == SDE_DRM_FB_SEC_DIR_TRANS))
+ src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION;
+
/* update roi config */
if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
@@ -3309,7 +3371,7 @@
psde->pipe_cfg.src_rect = src;
psde->pipe_cfg.dst_rect = dst;
- _sde_plane_setup_scaler(psde, fmt, pstate);
+ _sde_plane_setup_scaler(psde, pstate, fmt, false);
/* check for color fill */
psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
@@ -3326,7 +3388,7 @@
if (psde->pipe_hw->ops.setup_pe &&
(pstate->multirect_index != SDE_SSPP_RECT_1))
psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
- &psde->pixel_ext);
+ &pstate->pixel_ext);
/**
* when programmed in multirect mode, scalar block will be
@@ -3336,8 +3398,8 @@
if (psde->pipe_hw->ops.setup_scaler &&
pstate->multirect_index != SDE_SSPP_RECT_1)
psde->pipe_hw->ops.setup_scaler(psde->pipe_hw,
- &psde->pipe_cfg, &psde->pixel_ext,
- psde->scaler3_cfg);
+ &psde->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
/* update excl rect */
if (psde->pipe_hw->ops.setup_excl_rect)
@@ -3352,9 +3414,9 @@
pstate->multirect_mode);
}
- if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+ if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+ (src_flags & SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
psde->pipe_hw->ops.setup_format) {
- src_flags = 0x0;
SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", rstate->out_rotation);
if (rstate->out_rotation & DRM_REFLECT_X)
src_flags |= SDE_SSPP_FLIP_LR;
@@ -3383,7 +3445,7 @@
}
if (psde->pipe_hw->ops.setup_sys_cache) {
- if (rstate->out_sbuf) {
+ if (rstate->out_sbuf && rstate->rot_hw) {
if (rstate->nplane < 2)
pstate->sc_cfg.op_mode =
SDE_PIPE_SC_OP_MODE_INLINE_SINGLE;
@@ -3453,13 +3515,44 @@
return 0;
}
-static void sde_plane_atomic_update(struct drm_plane *plane,
+static void _sde_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sde_plane *psde;
struct drm_plane_state *state;
struct sde_plane_state *pstate;
- struct sde_plane_state *old_pstate;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ SDE_ERROR("invalid old state\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ state = plane->state;
+ pstate = to_sde_plane_state(state);
+
+ SDE_EVT32(DRMID(plane), is_sde_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_sde_plane_virtual(plane) &&
+ psde->pipe_hw && psde->pipe_hw->ops.setup_multirect)
+ psde->pipe_hw->ops.setup_multirect(psde->pipe_hw,
+ SDE_SSPP_RECT_SOLO, SDE_SSPP_MULTIRECT_NONE);
+}
+
+static void sde_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane *psde;
+ struct drm_plane_state *state;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -3472,15 +3565,13 @@
psde = to_sde_plane(plane);
psde->is_error = false;
state = plane->state;
- pstate = to_sde_plane_state(state);
- old_pstate = to_sde_plane_state(old_state);
SDE_DEBUG_PLANE(psde, "\n");
sde_plane_rot_atomic_update(plane, old_state);
if (!sde_plane_sspp_enabled(state)) {
- pstate->pending = true;
+ _sde_plane_atomic_disable(plane, old_state);
} else {
int ret;
@@ -3490,6 +3581,29 @@
}
}
+void sde_plane_restore(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+
+ if (!plane || !plane->state) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ /*
+ * Revalidate is only true here if idle PC occurred and
+ * there is no plane state update in current commit cycle.
+ */
+ if (!psde->revalidate)
+ return;
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* last plane state is same as current state */
+ sde_plane_atomic_update(plane, plane->state);
+}
/* helper to install properties which are common to planes and crtcs */
static void _sde_plane_install_properties(struct drm_plane *plane,
@@ -3504,6 +3618,12 @@
static const struct drm_prop_enum_list e_src_config[] = {
{SDE_DRM_DEINTERLACE, "deinterlace"}
};
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ {SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+ {SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+ };
const struct sde_format_extended *format_list;
struct sde_format_extended *virt_format_list = NULL;
struct sde_kms_info *info;
@@ -3686,6 +3806,10 @@
sde_kms_info_stop(info);
}
+ if (psde->pipe_hw && psde->pipe_hw->ops.get_scaler_ver)
+ sde_kms_info_add_keyint(info, "scaler_step_ver",
+ psde->pipe_hw->ops.get_scaler_ver(psde->pipe_hw));
+
sde_kms_info_add_keyint(info, "max_linewidth",
psde->pipe_sblk->maxlinewidth);
sde_kms_info_add_keyint(info, "max_upscale",
@@ -3722,6 +3846,12 @@
msm_property_install_blob(&psde->property_info, feature_name, 0,
PLANE_PROP_FOLIAGE_COLOR);
}
+
+ msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ PLANE_PROP_FB_TRANSLATION_MODE);
}
static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
@@ -3759,18 +3889,19 @@
psde->csc_usr_ptr = &psde->csc_cfg;
}
-static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
+static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde,
+ struct sde_plane_state *pstate, void *usr)
{
struct sde_drm_scaler_v1 scale_v1;
struct sde_hw_pixel_ext *pe;
int i;
- if (!psde) {
- SDE_ERROR("invalid plane\n");
+ if (!psde || !pstate) {
+ SDE_ERROR("invalid argument(s)\n");
return;
}
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
if (!usr) {
SDE_DEBUG_PLANE(psde, "scale data removed\n");
return;
@@ -3782,10 +3913,11 @@
}
/* force property to be dirty, even if the pointer didn't change */
- msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V1);
+ msm_property_set_dirty(&psde->property_info,
+ &pstate->property_state, PLANE_PROP_SCALER_V1);
/* populate from user space */
- pe = &(psde->pixel_ext);
+ pe = &pstate->pixel_ext;
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
for (i = 0; i < SDE_MAX_PLANES; i++) {
pe->init_phase_x[i] = scale_v1.init_phase_x[i];
@@ -3810,7 +3942,7 @@
pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
}
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1;
SDE_EVT32_VERBOSE(DRMID(&psde->base));
SDE_DEBUG_PLANE(psde, "user property data copied\n");
@@ -3824,13 +3956,13 @@
int i;
struct sde_hw_scaler3_cfg *cfg;
- if (!psde || !psde->scaler3_cfg) {
- SDE_ERROR("invalid plane\n");
+ if (!psde || !pstate) {
+ SDE_ERROR("invalid argument(s)\n");
return;
}
- cfg = psde->scaler3_cfg;
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
+ cfg = &pstate->scaler3_cfg;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
if (!usr) {
SDE_DEBUG_PLANE(psde, "scale data removed\n");
return;
@@ -3848,10 +3980,11 @@
}
/* force property to be dirty, even if the pointer didn't change */
- msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V2);
+ msm_property_set_dirty(&psde->property_info,
+ &pstate->property_state, PLANE_PROP_SCALER_V2);
/* populate from user space */
- pe = &(psde->pixel_ext);
+ pe = &pstate->pixel_ext;
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
cfg->enable = scale_v2.enable;
cfg->dir_en = scale_v2.dir_en;
@@ -3909,7 +4042,7 @@
pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
}
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK;
+ pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK;
SDE_EVT32_VERBOSE(DRMID(&psde->base), cfg->enable, cfg->de.enable,
cfg->src_width[0], cfg->src_height[0],
@@ -3965,8 +4098,7 @@
} else {
pstate = to_sde_plane_state(state);
ret = msm_property_atomic_set(&psde->property_info,
- pstate->property_values, pstate->property_blobs,
- property, val);
+ &pstate->property_state, property, val);
if (!ret) {
idx = msm_property_index(&psde->property_info,
property);
@@ -3978,11 +4110,12 @@
_sde_plane_set_csc_v1(psde, (void *)val);
break;
case PLANE_PROP_SCALER_V1:
- _sde_plane_set_scaler_v1(psde, (void *)val);
+ _sde_plane_set_scaler_v1(psde, pstate,
+ (void *)val);
break;
case PLANE_PROP_SCALER_V2:
_sde_plane_set_scaler_v2(psde, pstate,
- (void *)val);
+ (void *)val);
break;
case PLANE_PROP_EXCL_RECT_V1:
_sde_plane_set_excl_rect_v1(psde, pstate,
@@ -4027,8 +4160,7 @@
pstate = to_sde_plane_state(state);
sde_plane_rot_install_caps(plane);
ret = msm_property_atomic_get(&psde->property_info,
- pstate->property_values, pstate->property_blobs,
- property, val);
+ &pstate->property_state, property, val);
}
return ret;
@@ -4089,7 +4221,7 @@
/* destroy value helper */
msm_property_destroy_state(&psde->property_info, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state);
}
static struct drm_plane_state *
@@ -4120,13 +4252,14 @@
/* duplicate value helper */
msm_property_duplicate_state(&psde->property_info, old_state, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state, pstate->property_values);
/* clear out any input fence */
pstate->input_fence = 0;
input_fence_default = msm_property_get_default(
&psde->property_info, PLANE_PROP_INPUT_FENCE);
- msm_property_set_property(&psde->property_info, pstate->property_values,
+ msm_property_set_property(&psde->property_info,
+ &pstate->property_state,
PLANE_PROP_INPUT_FENCE, input_fence_default);
pstate->dirty = 0x0;
@@ -4166,7 +4299,8 @@
/* reset value helper */
msm_property_reset_state(&psde->property_info, pstate,
- pstate->property_values, pstate->property_blobs);
+ &pstate->property_state,
+ pstate->property_values);
pstate->base.plane = plane;
@@ -4481,9 +4615,7 @@
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
psde->is_virtual = (master_plane_id != 0);
- psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
INIT_LIST_HEAD(&psde->mplane_list);
master_plane = drm_plane_find(dev, master_plane_id);
if (master_plane) {
@@ -4512,17 +4644,6 @@
goto clean_sspp;
}
- if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
- psde->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
- GFP_KERNEL);
- if (!psde->scaler3_cfg) {
- SDE_ERROR("[%u]failed to allocate scale struct\n",
- pipe);
- ret = -ENOMEM;
- goto clean_sspp;
- }
- }
-
format_list = psde->pipe_sblk->format_list;
if (master_plane_id) {
@@ -4592,9 +4713,6 @@
clean_sspp:
if (psde && psde->pipe_hw)
sde_hw_sspp_destroy(psde->pipe_hw);
-
- if (psde && psde->scaler3_cfg)
- kfree(psde->scaler3_cfg);
clean_plane:
kfree(psde);
exit:
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index f83a891..a5599a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -34,7 +34,6 @@
* @rot90: true if rotation of 90 degree is required
* @hflip: true if horizontal flip is required
* @vflip: true if vertical flip is required
- * @mmu_id: iommu identifier for input/output buffers
* @rot_cmd: rotator configuration command
* @nplane: total number of drm plane attached to rotator
* @in_fb: input fb attached to rotator
@@ -64,7 +63,6 @@
bool rot90;
bool hflip;
bool vflip;
- u32 mmu_id;
struct sde_hw_rot_cmd rot_cmd;
int nplane;
/* input */
@@ -96,13 +94,34 @@
#define SDE_PLANE_DIRTY_FORMAT 0x2
#define SDE_PLANE_DIRTY_SHARPEN 0x4
#define SDE_PLANE_DIRTY_PERF 0x8
+#define SDE_PLANE_DIRTY_FB_TRANSLATION_MODE 0x10
#define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF
/**
+ * enum sde_plane_sclcheck_state - User scaler data status
+ *
+ * @SDE_PLANE_SCLCHECK_NONE: No user data provided
+ * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided
+ * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data
+ */
+enum sde_plane_sclcheck_state {
+ SDE_PLANE_SCLCHECK_NONE,
+ SDE_PLANE_SCLCHECK_INVALID,
+ SDE_PLANE_SCLCHECK_SCALER_V1,
+ SDE_PLANE_SCLCHECK_SCALER_V1_CHECK,
+ SDE_PLANE_SCLCHECK_SCALER_V2,
+ SDE_PLANE_SCLCHECK_SCALER_V2_CHECK,
+};
+
+/**
* struct sde_plane_state: Define sde extension of drm plane state object
* @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
* @property_values: cached plane property values
- * @property_blobs: blob properties
+ * @aspace: pointer to address space for input/output buffers
* @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @excl_rect: exclusion rect values
@@ -110,12 +129,16 @@
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration
*/
struct sde_plane_state {
struct drm_plane_state base;
- uint64_t property_values[PLANE_PROP_COUNT];
- struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+ struct msm_property_state property_state;
+ struct msm_property_value property_values[PLANE_PROP_COUNT];
+ struct msm_gem_address_space *aspace;
void *input_fence;
enum sde_stage stage;
struct sde_rect excl_rect;
@@ -124,6 +147,11 @@
uint32_t multirect_mode;
bool pending;
+ /* scaler configuration */
+ struct sde_hw_scaler3_cfg scaler3_cfg;
+ struct sde_hw_pixel_ext pixel_ext;
+ enum sde_plane_sclcheck_state scaler_check_state;
+
/* @sc_cfg: system_cache configuration */
struct sde_hw_pipe_sc_cfg sc_cfg;
struct sde_plane_rot_state rot;
@@ -150,8 +178,8 @@
* @X: Property index, from enum msm_mdp_plane_property
* Returns: Integer value of requested property
*/
-#define sde_plane_get_property(S, X) \
- ((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+#define sde_plane_get_property(S, X) ((S) && ((X) < PLANE_PROP_COUNT) ? \
+ ((S)->property_values[(X)].value) : 0)
/**
* sde_plane_pipe - return sspp identifier for the given plane
@@ -187,6 +215,12 @@
bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill);
/**
+ * sde_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_restore(struct drm_plane *plane);
+
+/**
* sde_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
index cc115c5..cc87aeb 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
@@ -62,10 +62,17 @@
return -EINVAL;
}
+static int default_last_command(struct sde_hw_ctl *ctl,
+ enum sde_reg_dma_queue q)
+{
+ return 0;
+}
+
static struct sde_hw_reg_dma reg_dma = {
.ops = {default_check_support, default_setup_payload,
default_kick_off, default_reset, default_alloc_reg_dma_buf,
- default_dealloc_reg_dma, default_buf_reset_reg_dma},
+ default_dealloc_reg_dma, default_buf_reset_reg_dma,
+ default_last_command},
};
int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
@@ -103,3 +110,26 @@
{
return ®_dma.ops;
}
+
+void sde_reg_dma_deinit(void)
+{
+ struct sde_hw_reg_dma op = {
+ .ops = {default_check_support, default_setup_payload,
+ default_kick_off, default_reset, default_alloc_reg_dma_buf,
+ default_dealloc_reg_dma, default_buf_reset_reg_dma,
+ default_last_command},
+ };
+
+ if (!reg_dma.drm_dev || !reg_dma.caps)
+ return;
+
+ switch (reg_dma.caps->version) {
+ case 1:
+ deinit_v1();
+ break;
+ default:
+ break;
+ }
+ memset(®_dma, 0, sizeof(reg_dma));
+ memcpy(®_dma.ops, &op.ops, sizeof(op.ops));
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.h b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
index c8e464d..70d995a 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.h
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.h
@@ -251,6 +251,7 @@
* @alloc_reg_dma_buf: allocate reg dma buffer
* @dealloc_reg_dma: de-allocate reg dma buffer
* @reset_reg_dma_buf: reset the buffer to init state
+ * @last_command: notify control that last command is queued
*/
struct sde_hw_reg_dma_ops {
int (*check_support)(enum sde_reg_dma_features feature,
@@ -262,6 +263,7 @@
struct sde_reg_dma_buffer* (*alloc_reg_dma_buf)(u32 size);
int (*dealloc_reg_dma)(struct sde_reg_dma_buffer *lut_buf);
int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf);
+ int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
};
/**
@@ -298,4 +300,9 @@
* who call this api.
*/
struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void);
+
+/**
+ * sde_reg_dma_deinit() - de-initialize the reg dma
+ */
+void sde_reg_dma_deinit(void);
#endif /* _SDE_REG_DMA_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 0382ed0..be3a8af 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1150,7 +1150,7 @@
(void) msm_property_set_property(
sde_connector_get_propinfo(conn),
- sde_connector_get_property_values(conn->state),
+ sde_connector_get_property_state(conn->state),
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_NONE);
}
@@ -1170,7 +1170,7 @@
ret = msm_property_set_property(
sde_connector_get_propinfo(conn_state->connector),
- sde_connector_get_property_values(conn_state),
+ sde_connector_get_property_state(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
rsvp->topology);
if (ret) {
@@ -1267,7 +1267,7 @@
(void) msm_property_set_property(
sde_connector_get_propinfo(
conn_state->connector),
- sde_connector_get_property_values(conn_state),
+ sde_connector_get_property_state(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_NONE);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index e233fc7..47fc39b 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -125,7 +125,7 @@
TP_printk("crtc:%d", __entry->crtc_id)
);
-TRACE_EVENT(sde_mark_write,
+TRACE_EVENT(tracing_mark_write,
TP_PROTO(int pid, const char *name, bool trace_begin),
TP_ARGS(pid, name, trace_begin),
TP_STRUCT__entry(
@@ -230,8 +230,8 @@
__entry->update_clk)
);
-#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
-#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
#define SDE_ATRACE_INT(name, value) \
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 847572b..d31f828 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -184,6 +184,10 @@
!vbif->ops.set_halt_ctrl)
return;
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index ceda16e..145acea 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -283,28 +283,31 @@
wb_dev->wb_cfg->sblk->maxlinewidth :
SDE_WB_MODE_MAX_WIDTH;
info->max_height = SDE_WB_MODE_MAX_HEIGHT;
- info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
return 0;
}
-int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology, u32 max_mixer_width)
+int sde_wb_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info, u32 max_mixer_width)
{
const u32 dual_lm = 2;
const u32 single_lm = 1;
const u32 single_intf = 1;
const u32 no_enc = 0;
+ struct msm_display_topology *topology;
- if (!drm_mode || !topology || !max_mixer_width) {
+ if (!drm_mode || !mode_info || !max_mixer_width) {
pr_err("invalid params\n");
return -EINVAL;
}
+ topology = &mode_info->topology;
topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
dual_lm : single_lm;
topology->num_enc = no_enc;
topology->num_intf = single_intf;
+ mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index 205ff24..aa57d3e 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -186,15 +186,14 @@
int sde_wb_get_info(struct msm_display_info *info, void *display);
/**
- * sde_wb_get_topology - retrieve current topology for the mode selected
+ * sde_wb_get_mode_info - retrieve information of the mode selected
* @drm_mode: Display mode set for the display
- * @topology: Out parameter. Topology for the mode.
+ * @mode_info: Out parameter. information of the mode.
* @max_mixer_width: max width supported by HW layer mixer
* Returns: zero on success
*/
-int sde_wb_get_topology(const struct drm_display_mode *drm_mode,
- struct msm_display_topology *topology,
- u32 max_mixer_width);
+int sde_wb_get_mode_info(const struct drm_display_mode *drm_mode,
+ struct msm_mode_info *mode_info, u32 max_mixer_width);
/**
* sde_wb_connector_get_wb - retrieve writeback device of the given connector
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index bcd3eaa..58069f2 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -55,6 +55,14 @@
#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
#define MMSS_VBIF_TEST_BUS_OUT 0x230
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
/* print debug ranges in groups of 4 u32s */
#define REG_DUMP_ALIGN 16
@@ -116,6 +124,7 @@
u32 wr_addr;
u32 block_id;
u32 test_id;
+ void (*analyzer)(struct sde_debug_bus_entry *entry, u32 val);
};
struct vbif_debug_bus_entry {
@@ -183,6 +192,43 @@
/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+static void _sde_debug_bus_xbar_dump(struct sde_debug_bus_entry *entry,
+ u32 val)
+{
+ dev_err(sde_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _sde_debug_bus_lm_dump(struct sde_debug_bus_entry *entry,
+ u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(sde_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _sde_debug_bus_ppb0_dump(struct sde_debug_bus_entry *entry,
+ u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(sde_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _sde_debug_bus_ppb1_dump(struct sde_debug_bus_entry *entry,
+ u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(sde_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
static struct sde_debug_bus_entry dbg_bus_sde_8998[] = {
/* Unpack 0 sspp 0*/
@@ -662,16 +708,16 @@
{ DBGBUS_DSPP, 20, 3 },
/* ppb_0 */
- { DBGBUS_DSPP, 31, 0 },
- { DBGBUS_DSPP, 33, 0 },
- { DBGBUS_DSPP, 35, 0 },
- { DBGBUS_DSPP, 42, 0 },
+ { DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump },
/* ppb_1 */
- { DBGBUS_DSPP, 32, 0 },
- { DBGBUS_DSPP, 34, 0 },
- { DBGBUS_DSPP, 36, 0 },
- { DBGBUS_DSPP, 43, 0 },
+ { DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump },
/* lm_lut */
{ DBGBUS_DSPP, 109, 0 },
@@ -686,7 +732,7 @@
{ DBGBUS_PERIPH, 74, 0 },
/* crossbar */
- { DBGBUS_DSPP, 0, 0},
+ { DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump },
/* rotator */
{ DBGBUS_DSPP, 9, 0},
@@ -700,7 +746,7 @@
{ DBGBUS_DSPP, 63, 4},
{ DBGBUS_DSPP, 63, 5},
{ DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7},
+ { DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 64, 0},
{ DBGBUS_DSPP, 64, 1},
@@ -709,7 +755,7 @@
{ DBGBUS_DSPP, 64, 4},
{ DBGBUS_DSPP, 64, 5},
{ DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7},
+ { DBGBUS_DSPP, 64, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 65, 0},
{ DBGBUS_DSPP, 65, 1},
@@ -718,7 +764,7 @@
{ DBGBUS_DSPP, 65, 4},
{ DBGBUS_DSPP, 65, 5},
{ DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7},
+ { DBGBUS_DSPP, 65, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 66, 0},
{ DBGBUS_DSPP, 66, 1},
@@ -727,7 +773,7 @@
{ DBGBUS_DSPP, 66, 4},
{ DBGBUS_DSPP, 66, 5},
{ DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7},
+ { DBGBUS_DSPP, 66, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 67, 0},
{ DBGBUS_DSPP, 67, 1},
@@ -736,7 +782,7 @@
{ DBGBUS_DSPP, 67, 4},
{ DBGBUS_DSPP, 67, 5},
{ DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7},
+ { DBGBUS_DSPP, 67, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 68, 0},
{ DBGBUS_DSPP, 68, 1},
@@ -745,7 +791,7 @@
{ DBGBUS_DSPP, 68, 4},
{ DBGBUS_DSPP, 68, 5},
{ DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7},
+ { DBGBUS_DSPP, 68, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 69, 0},
{ DBGBUS_DSPP, 69, 1},
@@ -754,7 +800,7 @@
{ DBGBUS_DSPP, 69, 4},
{ DBGBUS_DSPP, 69, 5},
{ DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7},
+ { DBGBUS_DSPP, 69, 7, _sde_debug_bus_lm_dump },
/* LM1 */
{ DBGBUS_DSPP, 70, 0},
@@ -764,7 +810,7 @@
{ DBGBUS_DSPP, 70, 4},
{ DBGBUS_DSPP, 70, 5},
{ DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7},
+ { DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 71, 0},
{ DBGBUS_DSPP, 71, 1},
@@ -773,7 +819,7 @@
{ DBGBUS_DSPP, 71, 4},
{ DBGBUS_DSPP, 71, 5},
{ DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7},
+ { DBGBUS_DSPP, 71, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 72, 0},
{ DBGBUS_DSPP, 72, 1},
@@ -782,7 +828,7 @@
{ DBGBUS_DSPP, 72, 4},
{ DBGBUS_DSPP, 72, 5},
{ DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7},
+ { DBGBUS_DSPP, 72, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 73, 0},
{ DBGBUS_DSPP, 73, 1},
@@ -791,7 +837,7 @@
{ DBGBUS_DSPP, 73, 4},
{ DBGBUS_DSPP, 73, 5},
{ DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7},
+ { DBGBUS_DSPP, 73, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 74, 0},
{ DBGBUS_DSPP, 74, 1},
@@ -800,7 +846,7 @@
{ DBGBUS_DSPP, 74, 4},
{ DBGBUS_DSPP, 74, 5},
{ DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7},
+ { DBGBUS_DSPP, 74, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 75, 0},
{ DBGBUS_DSPP, 75, 1},
@@ -809,7 +855,7 @@
{ DBGBUS_DSPP, 75, 4},
{ DBGBUS_DSPP, 75, 5},
{ DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7},
+ { DBGBUS_DSPP, 75, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 76, 0},
{ DBGBUS_DSPP, 76, 1},
@@ -818,7 +864,7 @@
{ DBGBUS_DSPP, 76, 4},
{ DBGBUS_DSPP, 76, 5},
{ DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7},
+ { DBGBUS_DSPP, 76, 7, _sde_debug_bus_lm_dump },
/* LM2 */
{ DBGBUS_DSPP, 77, 0},
@@ -828,7 +874,7 @@
{ DBGBUS_DSPP, 77, 4},
{ DBGBUS_DSPP, 77, 5},
{ DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7},
+ { DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 78, 0},
{ DBGBUS_DSPP, 78, 1},
@@ -837,7 +883,7 @@
{ DBGBUS_DSPP, 78, 4},
{ DBGBUS_DSPP, 78, 5},
{ DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7},
+ { DBGBUS_DSPP, 78, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 79, 0},
{ DBGBUS_DSPP, 79, 1},
@@ -846,7 +892,7 @@
{ DBGBUS_DSPP, 79, 4},
{ DBGBUS_DSPP, 79, 5},
{ DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7},
+ { DBGBUS_DSPP, 79, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 80, 0},
{ DBGBUS_DSPP, 80, 1},
@@ -855,7 +901,7 @@
{ DBGBUS_DSPP, 80, 4},
{ DBGBUS_DSPP, 80, 5},
{ DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7},
+ { DBGBUS_DSPP, 80, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 81, 0},
{ DBGBUS_DSPP, 81, 1},
@@ -864,7 +910,7 @@
{ DBGBUS_DSPP, 81, 4},
{ DBGBUS_DSPP, 81, 5},
{ DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7},
+ { DBGBUS_DSPP, 81, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 82, 0},
{ DBGBUS_DSPP, 82, 1},
@@ -873,7 +919,7 @@
{ DBGBUS_DSPP, 82, 4},
{ DBGBUS_DSPP, 82, 5},
{ DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7},
+ { DBGBUS_DSPP, 82, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 83, 0},
{ DBGBUS_DSPP, 83, 1},
@@ -882,7 +928,7 @@
{ DBGBUS_DSPP, 83, 4},
{ DBGBUS_DSPP, 83, 5},
{ DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7},
+ { DBGBUS_DSPP, 83, 7, _sde_debug_bus_lm_dump },
/* csc */
{ DBGBUS_SSPP0, 7, 0},
@@ -1386,16 +1432,16 @@
{ DBGBUS_DSPP, 20, 3 },
/* ppb_0 */
- { DBGBUS_DSPP, 31, 0 },
- { DBGBUS_DSPP, 33, 0 },
- { DBGBUS_DSPP, 35, 0 },
- { DBGBUS_DSPP, 42, 0 },
+ { DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump },
/* ppb_1 */
- { DBGBUS_DSPP, 32, 0 },
- { DBGBUS_DSPP, 34, 0 },
- { DBGBUS_DSPP, 36, 0 },
- { DBGBUS_DSPP, 43, 0 },
+ { DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump },
/* lm_lut */
{ DBGBUS_DSPP, 109, 0 },
@@ -1403,7 +1449,7 @@
{ DBGBUS_DSPP, 103, 0 },
/* crossbar */
- { DBGBUS_DSPP, 0, 0},
+ { DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump },
/* rotator */
{ DBGBUS_DSPP, 9, 0},
@@ -1416,7 +1462,7 @@
{ DBGBUS_DSPP, 63, 4},
{ DBGBUS_DSPP, 63, 5},
{ DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7},
+ { DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 64, 1},
{ DBGBUS_DSPP, 64, 2},
@@ -1424,7 +1470,7 @@
{ DBGBUS_DSPP, 64, 4},
{ DBGBUS_DSPP, 64, 5},
{ DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7},
+ { DBGBUS_DSPP, 64, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 65, 1},
{ DBGBUS_DSPP, 65, 2},
@@ -1432,7 +1478,7 @@
{ DBGBUS_DSPP, 65, 4},
{ DBGBUS_DSPP, 65, 5},
{ DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7},
+ { DBGBUS_DSPP, 65, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 66, 1},
{ DBGBUS_DSPP, 66, 2},
@@ -1440,7 +1486,7 @@
{ DBGBUS_DSPP, 66, 4},
{ DBGBUS_DSPP, 66, 5},
{ DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7},
+ { DBGBUS_DSPP, 66, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 67, 1},
{ DBGBUS_DSPP, 67, 2},
@@ -1448,7 +1494,7 @@
{ DBGBUS_DSPP, 67, 4},
{ DBGBUS_DSPP, 67, 5},
{ DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7},
+ { DBGBUS_DSPP, 67, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 68, 1},
{ DBGBUS_DSPP, 68, 2},
@@ -1456,7 +1502,7 @@
{ DBGBUS_DSPP, 68, 4},
{ DBGBUS_DSPP, 68, 5},
{ DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7},
+ { DBGBUS_DSPP, 68, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 69, 1},
{ DBGBUS_DSPP, 69, 2},
@@ -1464,7 +1510,7 @@
{ DBGBUS_DSPP, 69, 4},
{ DBGBUS_DSPP, 69, 5},
{ DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7},
+ { DBGBUS_DSPP, 69, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 84, 1},
{ DBGBUS_DSPP, 84, 2},
@@ -1472,7 +1518,7 @@
{ DBGBUS_DSPP, 84, 4},
{ DBGBUS_DSPP, 84, 5},
{ DBGBUS_DSPP, 84, 6},
- { DBGBUS_DSPP, 84, 7},
+ { DBGBUS_DSPP, 84, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 85, 1},
@@ -1481,7 +1527,7 @@
{ DBGBUS_DSPP, 85, 4},
{ DBGBUS_DSPP, 85, 5},
{ DBGBUS_DSPP, 85, 6},
- { DBGBUS_DSPP, 85, 7},
+ { DBGBUS_DSPP, 85, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 86, 1},
@@ -1490,7 +1536,7 @@
{ DBGBUS_DSPP, 86, 4},
{ DBGBUS_DSPP, 86, 5},
{ DBGBUS_DSPP, 86, 6},
- { DBGBUS_DSPP, 86, 7},
+ { DBGBUS_DSPP, 86, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 87, 1},
@@ -1499,7 +1545,7 @@
{ DBGBUS_DSPP, 87, 4},
{ DBGBUS_DSPP, 87, 5},
{ DBGBUS_DSPP, 87, 6},
- { DBGBUS_DSPP, 87, 7},
+ { DBGBUS_DSPP, 87, 7, _sde_debug_bus_lm_dump },
/* LM1 */
{ DBGBUS_DSPP, 70, 1},
@@ -1508,7 +1554,7 @@
{ DBGBUS_DSPP, 70, 4},
{ DBGBUS_DSPP, 70, 5},
{ DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7},
+ { DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 71, 1},
{ DBGBUS_DSPP, 71, 2},
@@ -1516,7 +1562,7 @@
{ DBGBUS_DSPP, 71, 4},
{ DBGBUS_DSPP, 71, 5},
{ DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7},
+ { DBGBUS_DSPP, 71, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 72, 1},
{ DBGBUS_DSPP, 72, 2},
@@ -1524,7 +1570,7 @@
{ DBGBUS_DSPP, 72, 4},
{ DBGBUS_DSPP, 72, 5},
{ DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7},
+ { DBGBUS_DSPP, 72, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 73, 1},
{ DBGBUS_DSPP, 73, 2},
@@ -1532,7 +1578,7 @@
{ DBGBUS_DSPP, 73, 4},
{ DBGBUS_DSPP, 73, 5},
{ DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7},
+ { DBGBUS_DSPP, 73, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 74, 1},
{ DBGBUS_DSPP, 74, 2},
@@ -1540,7 +1586,7 @@
{ DBGBUS_DSPP, 74, 4},
{ DBGBUS_DSPP, 74, 5},
{ DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7},
+ { DBGBUS_DSPP, 74, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 75, 1},
{ DBGBUS_DSPP, 75, 2},
@@ -1548,7 +1594,7 @@
{ DBGBUS_DSPP, 75, 4},
{ DBGBUS_DSPP, 75, 5},
{ DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7},
+ { DBGBUS_DSPP, 75, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 76, 1},
{ DBGBUS_DSPP, 76, 2},
@@ -1556,7 +1602,7 @@
{ DBGBUS_DSPP, 76, 4},
{ DBGBUS_DSPP, 76, 5},
{ DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7},
+ { DBGBUS_DSPP, 76, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 88, 1},
{ DBGBUS_DSPP, 88, 2},
@@ -1564,7 +1610,7 @@
{ DBGBUS_DSPP, 88, 4},
{ DBGBUS_DSPP, 88, 5},
{ DBGBUS_DSPP, 88, 6},
- { DBGBUS_DSPP, 88, 7},
+ { DBGBUS_DSPP, 88, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 89, 1},
{ DBGBUS_DSPP, 89, 2},
@@ -1572,7 +1618,7 @@
{ DBGBUS_DSPP, 89, 4},
{ DBGBUS_DSPP, 89, 5},
{ DBGBUS_DSPP, 89, 6},
- { DBGBUS_DSPP, 89, 7},
+ { DBGBUS_DSPP, 89, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 90, 1},
{ DBGBUS_DSPP, 90, 2},
@@ -1580,7 +1626,7 @@
{ DBGBUS_DSPP, 90, 4},
{ DBGBUS_DSPP, 90, 5},
{ DBGBUS_DSPP, 90, 6},
- { DBGBUS_DSPP, 90, 7},
+ { DBGBUS_DSPP, 90, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 91, 1},
{ DBGBUS_DSPP, 91, 2},
@@ -1588,7 +1634,7 @@
{ DBGBUS_DSPP, 91, 4},
{ DBGBUS_DSPP, 91, 5},
{ DBGBUS_DSPP, 91, 6},
- { DBGBUS_DSPP, 91, 7},
+ { DBGBUS_DSPP, 91, 7, _sde_debug_bus_lm_dump },
/* LM2 */
{ DBGBUS_DSPP, 77, 0},
@@ -1598,7 +1644,7 @@
{ DBGBUS_DSPP, 77, 4},
{ DBGBUS_DSPP, 77, 5},
{ DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7},
+ { DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 78, 0},
{ DBGBUS_DSPP, 78, 1},
@@ -1607,7 +1653,7 @@
{ DBGBUS_DSPP, 78, 4},
{ DBGBUS_DSPP, 78, 5},
{ DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7},
+ { DBGBUS_DSPP, 78, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 79, 0},
{ DBGBUS_DSPP, 79, 1},
@@ -1616,7 +1662,7 @@
{ DBGBUS_DSPP, 79, 4},
{ DBGBUS_DSPP, 79, 5},
{ DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7},
+ { DBGBUS_DSPP, 79, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 80, 0},
{ DBGBUS_DSPP, 80, 1},
@@ -1625,7 +1671,7 @@
{ DBGBUS_DSPP, 80, 4},
{ DBGBUS_DSPP, 80, 5},
{ DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7},
+ { DBGBUS_DSPP, 80, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 81, 0},
{ DBGBUS_DSPP, 81, 1},
@@ -1634,7 +1680,7 @@
{ DBGBUS_DSPP, 81, 4},
{ DBGBUS_DSPP, 81, 5},
{ DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7},
+ { DBGBUS_DSPP, 81, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 82, 0},
{ DBGBUS_DSPP, 82, 1},
@@ -1643,7 +1689,7 @@
{ DBGBUS_DSPP, 82, 4},
{ DBGBUS_DSPP, 82, 5},
{ DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7},
+ { DBGBUS_DSPP, 82, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 83, 0},
{ DBGBUS_DSPP, 83, 1},
@@ -1652,7 +1698,7 @@
{ DBGBUS_DSPP, 83, 4},
{ DBGBUS_DSPP, 83, 5},
{ DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7},
+ { DBGBUS_DSPP, 83, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 92, 1},
{ DBGBUS_DSPP, 92, 2},
@@ -1660,7 +1706,7 @@
{ DBGBUS_DSPP, 92, 4},
{ DBGBUS_DSPP, 92, 5},
{ DBGBUS_DSPP, 92, 6},
- { DBGBUS_DSPP, 92, 7},
+ { DBGBUS_DSPP, 92, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 93, 1},
{ DBGBUS_DSPP, 93, 2},
@@ -1668,7 +1714,7 @@
{ DBGBUS_DSPP, 93, 4},
{ DBGBUS_DSPP, 93, 5},
{ DBGBUS_DSPP, 93, 6},
- { DBGBUS_DSPP, 93, 7},
+ { DBGBUS_DSPP, 93, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 94, 1},
{ DBGBUS_DSPP, 94, 2},
@@ -1676,7 +1722,7 @@
{ DBGBUS_DSPP, 94, 4},
{ DBGBUS_DSPP, 94, 5},
{ DBGBUS_DSPP, 94, 6},
- { DBGBUS_DSPP, 94, 7},
+ { DBGBUS_DSPP, 94, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 95, 1},
{ DBGBUS_DSPP, 95, 2},
@@ -1684,7 +1730,7 @@
{ DBGBUS_DSPP, 95, 4},
{ DBGBUS_DSPP, 95, 5},
{ DBGBUS_DSPP, 95, 6},
- { DBGBUS_DSPP, 95, 7},
+ { DBGBUS_DSPP, 95, 7, _sde_debug_bus_lm_dump },
/* LM5 */
{ DBGBUS_DSPP, 110, 1},
@@ -1693,7 +1739,7 @@
{ DBGBUS_DSPP, 110, 4},
{ DBGBUS_DSPP, 110, 5},
{ DBGBUS_DSPP, 110, 6},
- { DBGBUS_DSPP, 110, 7},
+ { DBGBUS_DSPP, 110, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 111, 1},
{ DBGBUS_DSPP, 111, 2},
@@ -1701,7 +1747,7 @@
{ DBGBUS_DSPP, 111, 4},
{ DBGBUS_DSPP, 111, 5},
{ DBGBUS_DSPP, 111, 6},
- { DBGBUS_DSPP, 111, 7},
+ { DBGBUS_DSPP, 111, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 112, 1},
{ DBGBUS_DSPP, 112, 2},
@@ -1709,7 +1755,7 @@
{ DBGBUS_DSPP, 112, 4},
{ DBGBUS_DSPP, 112, 5},
{ DBGBUS_DSPP, 112, 6},
- { DBGBUS_DSPP, 112, 7},
+ { DBGBUS_DSPP, 112, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 113, 1},
{ DBGBUS_DSPP, 113, 2},
@@ -1717,7 +1763,7 @@
{ DBGBUS_DSPP, 113, 4},
{ DBGBUS_DSPP, 113, 5},
{ DBGBUS_DSPP, 113, 6},
- { DBGBUS_DSPP, 113, 7},
+ { DBGBUS_DSPP, 113, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 114, 1},
{ DBGBUS_DSPP, 114, 2},
@@ -1725,7 +1771,7 @@
{ DBGBUS_DSPP, 114, 4},
{ DBGBUS_DSPP, 114, 5},
{ DBGBUS_DSPP, 114, 6},
- { DBGBUS_DSPP, 114, 7},
+ { DBGBUS_DSPP, 114, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 115, 1},
{ DBGBUS_DSPP, 115, 2},
@@ -1733,7 +1779,7 @@
{ DBGBUS_DSPP, 115, 4},
{ DBGBUS_DSPP, 115, 5},
{ DBGBUS_DSPP, 115, 6},
- { DBGBUS_DSPP, 115, 7},
+ { DBGBUS_DSPP, 115, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 116, 1},
{ DBGBUS_DSPP, 116, 2},
@@ -1741,7 +1787,7 @@
{ DBGBUS_DSPP, 116, 4},
{ DBGBUS_DSPP, 116, 5},
{ DBGBUS_DSPP, 116, 6},
- { DBGBUS_DSPP, 116, 7},
+ { DBGBUS_DSPP, 116, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 117, 1},
{ DBGBUS_DSPP, 117, 2},
@@ -1749,7 +1795,7 @@
{ DBGBUS_DSPP, 117, 4},
{ DBGBUS_DSPP, 117, 5},
{ DBGBUS_DSPP, 117, 6},
- { DBGBUS_DSPP, 117, 7},
+ { DBGBUS_DSPP, 117, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 118, 1},
{ DBGBUS_DSPP, 118, 2},
@@ -1757,7 +1803,7 @@
{ DBGBUS_DSPP, 118, 4},
{ DBGBUS_DSPP, 118, 5},
{ DBGBUS_DSPP, 118, 6},
- { DBGBUS_DSPP, 118, 7},
+ { DBGBUS_DSPP, 118, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 119, 1},
{ DBGBUS_DSPP, 119, 2},
@@ -1765,7 +1811,7 @@
{ DBGBUS_DSPP, 119, 4},
{ DBGBUS_DSPP, 119, 5},
{ DBGBUS_DSPP, 119, 6},
- { DBGBUS_DSPP, 119, 7},
+ { DBGBUS_DSPP, 119, 7, _sde_debug_bus_lm_dump },
{ DBGBUS_DSPP, 120, 1},
{ DBGBUS_DSPP, 120, 2},
@@ -1773,7 +1819,7 @@
{ DBGBUS_DSPP, 120, 4},
{ DBGBUS_DSPP, 120, 5},
{ DBGBUS_DSPP, 120, 6},
- { DBGBUS_DSPP, 120, 7},
+ { DBGBUS_DSPP, 120, 7, _sde_debug_bus_lm_dump },
/* csc */
{ DBGBUS_SSPP0, 7, 0},
@@ -2276,6 +2322,9 @@
dump_addr[i*4 + 3] = status;
}
+ if (head->analyzer)
+ head->analyzer(head, status);
+
/* Disable debug bus once we are done */
writel_relaxed(0, mem_base + head->wr_addr);
@@ -2325,7 +2374,8 @@
bool in_log, in_mem;
u32 **dump_mem = NULL;
u32 *dump_addr = NULL;
- u32 value;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
struct vbif_debug_bus_entry *head;
phys_addr_t phys = 0;
int i, list_size = 0;
@@ -2398,6 +2448,35 @@
/* make sure that vbif core is on */
wmb();
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(sde_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, ®)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(sde_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
for (i = 0; i < bus_size; i++) {
head = dbg_bus + i;
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 02d46c7..e14d60e 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -27,6 +27,7 @@
#define SDE_EVTLOG_FUNC_CASE5 0x7777
#define SDE_EVTLOG_PANIC 0xdead
#define SDE_EVTLOG_FATAL 0xbad
+#define SDE_EVTLOG_ERROR 0xebad
#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 12165e8..3d6c2ea 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -92,6 +92,21 @@
(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK)
+ return false;
+
+ if (sde_cea_db_payload_len(db) < 7)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
{
u8 *db = NULL;
@@ -213,10 +228,17 @@
{
u8 cea_mode = 0;
struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
/* Need to add Y420 support flag to the modes */
list_for_each_entry(mode, &connector->probed_modes, head) {
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /* Clear the RGB/YUV format flags before calling upstream API */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
if ((cea_mode != 0) && (cea_mode == video_format)) {
SDE_EDID_DEBUG("%s found match for %d ", __func__,
video_format);
@@ -230,7 +252,7 @@
const u8 *db)
{
u32 offset = 0;
- u8 len = 0;
+ u8 cmdb_len = 0;
u8 svd_len = 0;
const u8 *svd = NULL;
u32 i = 0, j = 0;
@@ -246,10 +268,8 @@
return;
}
SDE_EDID_DEBUG("%s +\n", __func__);
- len = db[0] & 0x1f;
+ cmdb_len = db[0] & 0x1f;
- if (len < 7)
- return;
/* Byte 3 to L+1 contain SVDs */
offset += 2;
@@ -257,20 +277,24 @@
if (svd) {
/*moving to the next byte as vic info begins there*/
- ++svd;
svd_len = svd[0] & 0x1f;
+ ++svd;
}
for (i = 0; i < svd_len; i++, j++) {
- video_format = *svd & 0x7F;
- if (db[offset] & (1 << j))
+ video_format = *(svd + i) & 0x7F;
+ if (cmdb_len == 1) {
+ /* If cmdb_len is 1, it means all SVDs support YUV */
+ sde_edid_set_y420_support(connector, video_format);
+ } else if (db[offset] & (1 << j)) {
sde_edid_set_y420_support(connector, video_format);
- if (j & 0x80) {
- j = j/8;
- offset++;
- if (offset >= len)
- break;
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= cmdb_len)
+ break;
+ }
}
}
@@ -338,6 +362,63 @@
SDE_EDID_DEBUG("%s -\n", __func__);
}
+static void _sde_edid_update_dc_modes(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+ int i, start, end;
+ u8 *edid_ext, *hdmi;
+ struct drm_display_info *disp_info;
+ u32 hdmi_dc_yuv_modes = 0;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ if (!connector || !edid_ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ disp_info = &connector->display_info;
+
+ edid_ext = sde_find_cea_extension(edid_ctrl->edid);
+
+ if (!edid_ext) {
+ SDE_ERROR("no cea extension\n");
+ return;
+ }
+
+ if (sde_cea_db_offsets(edid_ext, &start, &end))
+ return;
+
+ sde_for_each_cea_db(edid_ext, i, start, end) {
+ if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) {
+
+ hdmi = &edid_ext[i];
+
+ if (sde_cea_db_payload_len(hdmi) < 7)
+ continue;
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30;
+ SDE_EDID_DEBUG("Y420 30-bit supported\n");
+ }
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
+ SDE_EDID_DEBUG("Y420 36-bit supported\n");
+ }
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
+ SDE_EDID_DEBUG("Y420 48-bit supported\n");
+ }
+ }
+ }
+
+ disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes;
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
static void _sde_edid_extract_audio_data_blocks(
struct sde_edid_ctrl *edid_ctrl)
{
@@ -475,6 +556,7 @@
rc = drm_add_edid_modes(connector, edid_ctrl->edid);
sde_edid_set_mode_format(connector, edid_ctrl);
+ _sde_edid_update_dc_modes(connector, edid_ctrl);
SDE_EDID_DEBUG("%s -", __func__);
return rc;
}
@@ -484,6 +566,45 @@
return rc;
}
+u32 sde_get_sink_bpc(void *input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+ struct edid *edid = edid_ctrl->edid;
+
+ if (!edid) {
+ SDE_ERROR("invalid edid input\n");
+ return 0;
+ }
+
+ if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return 0;
+
+ if (edid->revision < 4) {
+ if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)
+ return 8;
+ else
+ return 0;
+ }
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ return 6;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ return 8;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ return 10;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ return 12;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ return 14;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ return 16;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ return 0;
+ }
+}
+
bool sde_detect_hdmi_monitor(void *input)
{
struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 1143dc2..b58b322 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -33,6 +33,8 @@
#define SDE_CEA_EXT 0x02
#define SDE_EXTENDED_TAG 0x07
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
enum extended_data_block_types {
VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
@@ -136,6 +138,14 @@
bool sde_detect_hdmi_monitor(void *edid_ctrl);
/**
+ * sde_get_sink_bpc() - return the bpc of sink device.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: bpc supported by the sink.
+ */
+u32 sde_get_sink_bpc(void *edid_ctrl);
+
+/**
* _sde_edid_update_modes() - populate EDID modes.
* @edid_ctrl: Handle to the edid_ctrl structure.
*
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
new file mode 100644
index 0000000..05d290b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_HDCP_H__
+#define __SDE_HDCP_H__
+
+#include <soc/qcom/scm.h>
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "sde_kms.h"
+
+enum sde_hdcp_client_id {
+ HDCP_CLIENT_HDMI,
+ HDCP_CLIENT_DP,
+};
+
+enum sde_hdcp_states {
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_ENC_NONE,
+ HDCP_STATE_AUTH_ENC_1X,
+ HDCP_STATE_AUTH_ENC_2P2
+};
+
+struct sde_hdcp_init_data {
+ struct dss_io_data *core_io;
+ struct dss_io_data *qfprom_io;
+ struct dss_io_data *hdcp_io;
+ struct drm_dp_aux *drm_aux;
+ struct mutex *mutex;
+ struct workqueue_struct *workq;
+ void *cb_data;
+ void (*notify_status)(void *cb_data, enum sde_hdcp_states status);
+ u8 sink_rx_status;
+ unsigned char *revision;
+ u32 phy_addr;
+ bool sec_access;
+ enum sde_hdcp_client_id client_id;
+};
+
+struct sde_hdcp_ops {
+ int (*isr)(void *ptr);
+ int (*cp_irq)(void *ptr);
+ int (*reauthenticate)(void *input);
+ int (*authenticate)(void *hdcp_ctrl);
+ bool (*feature_supported)(void *input);
+ void (*off)(void *hdcp_ctrl);
+};
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data);
+void sde_hdcp_1x_deinit(void *input);
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input);
+void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data);
+void sde_dp_hdcp2p2_deinit(void *input);
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state);
+struct sde_hdcp_ops *sde_dp_hdcp2p2_start(void *input);
+#endif /* __SDE_HDCP_H__ */
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
new file mode 100644
index 0000000..7951c23
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -0,0 +1,1579 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[sde-hdcp1x] %s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/iopoll.h>
+#include <linux/hdcp_qseecom.h>
+#include <drm/drm_dp_helper.h>
+#include "sde_hdcp.h"
+#include "hdmi.xml.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+#include "dp/dp_reg.h"
+
+#define SDE_HDCP_STATE_NAME (sde_hdcp_state_name(hdcp->hdcp_state))
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS 0
+#define HDCP_KEYS_STATE_NOT_CHECKED 1
+#define HDCP_KEYS_STATE_CHECKING 2
+#define HDCP_KEYS_STATE_VALID 3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID 4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH 5
+#define HDCP_KEYS_STATE_PROD_AKSV 6
+#define HDCP_KEYS_STATE_RESERVED 7
+
+#define TZ_HDCP_CMD_ID 0x00004401
+
+#define HDCP_INT_CLR (isr->auth_success_ack | isr->auth_fail_ack | \
+ isr->auth_fail_info_ack | isr->tx_req_ack | \
+ isr->encryption_ready_ack | \
+ isr->encryption_not_ready_ack | isr->tx_req_done_ack)
+
+#define HDCP_INT_EN (isr->auth_success_mask | isr->auth_fail_mask | \
+ isr->encryption_ready_mask | \
+ isr->encryption_not_ready_mask)
+
+#define HDCP_POLL_SLEEP_US (20 * 1000)
+#define HDCP_POLL_TIMEOUT_US (HDCP_POLL_SLEEP_US * 100)
+
+#define sde_hdcp_1x_state(x) (hdcp->hdcp_state == x)
+
+struct sde_hdcp_sink_addr {
+ char *name;
+ u32 addr;
+ u32 len;
+};
+
+struct sde_hdcp_1x_reg_data {
+ u32 reg_id;
+ struct sde_hdcp_sink_addr *sink;
+};
+
+struct sde_hdcp_sink_addr_map {
+ /* addresses to read from sink */
+ struct sde_hdcp_sink_addr bcaps;
+ struct sde_hdcp_sink_addr bksv;
+ struct sde_hdcp_sink_addr r0;
+ struct sde_hdcp_sink_addr bstatus;
+ struct sde_hdcp_sink_addr cp_irq_status;
+ struct sde_hdcp_sink_addr ksv_fifo;
+ struct sde_hdcp_sink_addr v_h0;
+ struct sde_hdcp_sink_addr v_h1;
+ struct sde_hdcp_sink_addr v_h2;
+ struct sde_hdcp_sink_addr v_h3;
+ struct sde_hdcp_sink_addr v_h4;
+
+ /* addresses to write to sink */
+ struct sde_hdcp_sink_addr an;
+ struct sde_hdcp_sink_addr aksv;
+ struct sde_hdcp_sink_addr ainfo;
+};
+
+struct sde_hdcp_int_set {
+ /* interrupt register */
+ u32 int_reg;
+
+ /* interrupt enable/disable masks */
+ u32 auth_success_mask;
+ u32 auth_fail_mask;
+ u32 encryption_ready_mask;
+ u32 encryption_not_ready_mask;
+ u32 tx_req_mask;
+ u32 tx_req_done_mask;
+
+ /* interrupt acknowledgment */
+ u32 auth_success_ack;
+ u32 auth_fail_ack;
+ u32 auth_fail_info_ack;
+ u32 encryption_ready_ack;
+ u32 encryption_not_ready_ack;
+ u32 tx_req_ack;
+ u32 tx_req_done_ack;
+
+ /* interrupt status */
+ u32 auth_success_int;
+ u32 auth_fail_int;
+ u32 encryption_ready;
+ u32 encryption_not_ready;
+ u32 tx_req_int;
+ u32 tx_req_done_int;
+};
+
+struct sde_hdcp_reg_set {
+ u32 status;
+ u32 keys_offset;
+ u32 r0_offset;
+ u32 v_offset;
+ u32 ctrl;
+ u32 aksv_lsb;
+ u32 aksv_msb;
+ u32 entropy_ctrl0;
+ u32 entropy_ctrl1;
+ u32 sec_sha_ctrl;
+ u32 sec_sha_data;
+ u32 sha_status;
+
+ u32 data2_0;
+ u32 data3;
+ u32 data4;
+ u32 data5;
+ u32 data6;
+
+ u32 sec_data0;
+ u32 sec_data1;
+ u32 sec_data7;
+ u32 sec_data8;
+ u32 sec_data9;
+ u32 sec_data10;
+ u32 sec_data11;
+ u32 sec_data12;
+
+ u32 reset;
+ u32 reset_bit;
+
+ u32 repeater;
+};
+
+#define HDCP_REG_SET_CLIENT_HDMI \
+ {0}
+
+#define HDCP_REG_SET_CLIENT_DP \
+{DP_HDCP_STATUS, 16, 14, 13, DP_HDCP_CTRL, \
+ DP_HDCP_SW_LOWER_AKSV, DP_HDCP_SW_UPPER_AKSV, \
+ DP_HDCP_ENTROPY_CTRL0, DP_HDCP_ENTROPY_CTRL1, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA, \
+ DP_HDCP_SHA_STATUS, DP_HDCP_RCVPORT_DATA2_0, \
+ DP_HDCP_RCVPORT_DATA3, DP_HDCP_RCVPORT_DATA4, \
+ DP_HDCP_RCVPORT_DATA5, DP_HDCP_RCVPORT_DATA6, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \
+ HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \
+ DP_SW_RESET, BIT(1), BIT(1)}
+
+#define HDCP_HDMI_SINK_ADDR_MAP \
+ {{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \
+ {"bstatus", 0x41, 2}, {"??", 0x0, 0}, {"ksv-fifo", 0x43, 0}, \
+ {"v_h0", 0x20, 4}, {"v_h1", 0x24, 4}, {"v_h2", 0x28, 4}, \
+ {"v_h3", 0x2c, 4}, {"v_h4", 0x30, 4}, {"an", 0x18, 8}, \
+ {"aksv", 0x10, 5}, {"ainfo", 0x00, 0},}
+
+#define HDCP_DP_SINK_ADDR_MAP \
+ {{"bcaps", 0x68028, 1}, {"bksv", 0x68000, 5}, {"r0'", 0x68005, 2}, \
+ {"binfo", 0x6802A, 2}, {"cp_irq_status", 0x68029, 1}, \
+ {"ksv-fifo", 0x6802C, 0}, {"v_h0", 0x68014, 4}, {"v_h1", 0x68018, 4}, \
+ {"v_h2", 0x6801C, 4}, {"v_h3", 0x68020, 4}, {"v_h4", 0x68024, 4}, \
+ {"an", 0x6800C, 8}, {"aksv", 0x68007, 5}, {"ainfo", 0x6803B, 1} }
+
+#define HDCP_HDMI_INT_SET \
+ {0}
+
+#define HDCP_DP_INT_SET \
+ {DP_INTR_STATUS2, \
+ BIT(17), BIT(20), BIT(24), BIT(27), 0, 0, \
+ BIT(16), BIT(19), BIT(21), BIT(23), BIT(26), 0, 0, \
+ BIT(15), BIT(18), BIT(22), BIT(25), 0, 0}
+
+struct sde_hdcp_1x {
+ u8 bcaps;
+ u32 tp_msgid;
+ u32 an_0, an_1, aksv_0, aksv_1;
+ bool sink_r0_ready;
+ bool reauth;
+ bool ksv_ready;
+ enum sde_hdcp_states hdcp_state;
+ struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+ struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
+ struct delayed_work hdcp_auth_work;
+ struct completion r0_checked;
+ struct completion sink_r0_available;
+ struct sde_hdcp_init_data init_data;
+ struct sde_hdcp_ops *ops;
+ struct sde_hdcp_reg_set reg_set;
+ struct sde_hdcp_int_set int_set;
+ struct sde_hdcp_sink_addr_map sink_addr;
+ struct workqueue_struct *workq;
+};
+
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state)
+{
+ switch (hdcp_state) {
+ case HDCP_STATE_INACTIVE: return "HDCP_STATE_INACTIVE";
+ case HDCP_STATE_AUTHENTICATING: return "HDCP_STATE_AUTHENTICATING";
+ case HDCP_STATE_AUTHENTICATED: return "HDCP_STATE_AUTHENTICATED";
+ case HDCP_STATE_AUTH_FAIL: return "HDCP_STATE_AUTH_FAIL";
+ default: return "???";
+ }
+}
+
+static int sde_hdcp_1x_count_one(u8 *array, u8 len)
+{
+ int i, j, count = 0;
+
+ for (i = 0; i < len; i++)
+ for (j = 0; j < 8; j++)
+ count += (((array[i] >> j) & 0x1) ? 1 : 0);
+ return count;
+}
+
+static int sde_hdcp_1x_load_keys(void *input)
+{
+ int rc = 0;
+ bool use_sw_keys = false;
+ u32 reg_val;
+ u32 ksv_lsb_addr, ksv_msb_addr;
+ u32 aksv_lsb, aksv_msb;
+ u8 aksv[5];
+ struct dss_io_data *io;
+ struct dss_io_data *qfprom_io;
+ struct sde_hdcp_1x *hdcp = input;
+ struct sde_hdcp_reg_set *reg_set;
+
+ if (!hdcp || !hdcp->init_data.core_io ||
+ !hdcp->init_data.qfprom_io) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE) &&
+ !sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+ pr_err("%s: invalid state. returning\n",
+ SDE_HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ io = hdcp->init_data.core_io;
+ qfprom_io = hdcp->init_data.qfprom_io;
+ reg_set = &hdcp->reg_set;
+
+ /* On compatible hardware, use SW keys */
+ reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
+ if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
+ reg_val = DSS_REG_R(qfprom_io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+ QFPROM_RAW_VERSION_4);
+
+ if (!(reg_val & BIT(23)))
+ use_sw_keys = true;
+ }
+
+ if (use_sw_keys) {
+ if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+ pr_err("setting hdcp SW keys failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ } else {
+ /* Fetch aksv from QFPROM, this info should be public. */
+ ksv_lsb_addr = HDCP_KSV_LSB;
+ ksv_msb_addr = HDCP_KSV_MSB;
+
+ if (hdcp->init_data.sec_access) {
+ ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ }
+
+ aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
+ aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+ }
+
+ pr_debug("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
+ aksv_msb, aksv_lsb);
+
+ aksv[0] = aksv_lsb & 0xFF;
+ aksv[1] = (aksv_lsb >> 8) & 0xFF;
+ aksv[2] = (aksv_lsb >> 16) & 0xFF;
+ aksv[3] = (aksv_lsb >> 24) & 0xFF;
+ aksv[4] = aksv_msb & 0xFF;
+
+ /* check there are 20 ones in AKSV */
+ if (sde_hdcp_1x_count_one(aksv, 5) != 20) {
+ pr_err("AKSV bit count failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb);
+ DSS_REG_W(io, reg_set->aksv_msb, aksv_msb);
+
+ /* Setup seed values for random number An */
+ DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF);
+ DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE);
+
+ /* make sure hw is programmed */
+ wmb();
+
+ /* enable hdcp engine */
+ DSS_REG_W(io, reg_set->ctrl, 0x1);
+
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_read(struct sde_hdcp_1x *hdcp,
+ struct sde_hdcp_sink_addr *sink,
+ u8 *buf, bool realign)
+{
+ int const max_size = 15;
+ int rc = 0, read_size = 0, bytes_read = 0;
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ int size = sink->len, offset = sink->addr;
+
+ do {
+ read_size = min(size, max_size);
+
+ bytes_read = drm_dp_dpcd_read(hdcp->init_data.drm_aux,
+ offset, buf, read_size);
+ if (bytes_read != read_size) {
+ pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
+ offset, read_size, bytes_read);
+ break;
+ }
+
+ buf += read_size;
+ offset += read_size;
+ size -= read_size;
+ } while (size > 0);
+ }
+
+ return rc;
+}
+
+static int sde_hdcp_1x_write(struct sde_hdcp_1x *hdcp,
+ struct sde_hdcp_sink_addr *sink, u8 *buf)
+{
+ int const max_size = 16;
+ int rc = 0, write_size = 0, bytes_written = 0;
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ int size = sink->len, offset = sink->addr;
+
+ do {
+ write_size = min(size, max_size);
+
+ bytes_written =
+ drm_dp_dpcd_write(hdcp->init_data.drm_aux,
+ offset, buf, write_size);
+ if (bytes_written != write_size) {
+ pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
+ offset, write_size, bytes_written);
+ break;
+ }
+
+ buf += write_size;
+ offset += write_size;
+ size -= write_size;
+ } while (size > 0);
+ }
+
+ return rc;
+}
+
+static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp)
+{
+ u32 intr_reg;
+ struct dss_io_data *io;
+ struct sde_hdcp_int_set *isr;
+
+ io = hdcp->init_data.core_io;
+ isr = &hdcp->int_set;
+
+ intr_reg = DSS_REG_R(io, isr->int_reg);
+
+ intr_reg |= HDCP_INT_CLR | HDCP_INT_EN;
+
+ DSS_REG_W(io, isr->int_reg, intr_reg);
+}
+
+static int sde_hdcp_1x_read_bcaps(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (rc) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+
+ pr_debug("bcaps read: 0x%x\n", hdcp->bcaps);
+
+ hdcp->current_tp.ds_type = hdcp->bcaps & reg_set->repeater ?
+ DS_REPEATER : DS_RECEIVER;
+
+ pr_debug("ds: %s\n", hdcp->current_tp.ds_type == DS_REPEATER ?
+ "repeater" : "receiver");
+
+ /* Write BCAPS to the hardware */
+ DSS_REG_W(hdcp_io, reg_set->sec_data12, hdcp->bcaps);
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u32 link0_status;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* Wait for HDCP keys to be checked and validated */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ ((link0_status >> reg_set->keys_offset) & 0x7)
+ == HDCP_KEYS_STATE_VALID ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("key not ready\n");
+ goto error;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ DSS_REG_W(io, reg_set->data4, 0);
+
+ /* Wait for An0 and An1 bit to be ready */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ (link0_status & (BIT(8) | BIT(9))) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("An not ready\n");
+ goto error;
+ }
+
+ /* As per hardware recommendations, wait before reading An */
+ msleep(20);
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 an[8], aksv[5];
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ an[0] = hdcp->an_0 & 0xFF;
+ an[1] = (hdcp->an_0 >> 8) & 0xFF;
+ an[2] = (hdcp->an_0 >> 16) & 0xFF;
+ an[3] = (hdcp->an_0 >> 24) & 0xFF;
+ an[4] = hdcp->an_1 & 0xFF;
+ an[5] = (hdcp->an_1 >> 8) & 0xFF;
+ an[6] = (hdcp->an_1 >> 16) & 0xFF;
+ an[7] = (hdcp->an_1 >> 24) & 0xFF;
+
+ pr_debug("an read: 0x%2x%2x%2x%2x%2x%2x%2x%2x\n",
+ an[7], an[6], an[5], an[4], an[3], an[2], an[1], an[0]);
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.an, an);
+ if (rc) {
+ pr_err("error writing an to sink\n");
+ goto error;
+ }
+
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = hdcp->aksv_0 & 0xFF;
+ aksv[1] = (hdcp->aksv_0 >> 8) & 0xFF;
+ aksv[2] = (hdcp->aksv_0 >> 16) & 0xFF;
+ aksv[3] = (hdcp->aksv_0 >> 24) & 0xFF;
+ aksv[4] = hdcp->aksv_1 & 0xFF;
+
+ pr_debug("aksv read: 0x%2x%2x%2x%2x%2x\n",
+ aksv[4], aksv[3], aksv[2], aksv[1], aksv[0]);
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.aksv, aksv);
+ if (rc) {
+ pr_err("error writing aksv to sink\n");
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
+{
+ struct dss_io_data *io = hdcp->init_data.core_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ udelay(1);
+ hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ }
+
+ hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ udelay(1);
+ hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ }
+
+ /* Read AKSV */
+ hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3);
+ hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4);
+
+ return 0;
+}
+
+static int sde_hdcp_1x_get_bksv_from_sink(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 *bksv = hdcp->current_tp.bksv;
+ u32 link0_bksv_0, link0_bksv_1;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io;
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bksv, bksv, false);
+ if (rc) {
+ pr_err("error reading bksv from sink\n");
+ goto error;
+ }
+
+ pr_debug("bksv read: 0x%2x%2x%2x%2x%2x\n",
+ bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+
+ /* check there are 20 ones in BKSV */
+ if (sde_hdcp_1x_count_one(bksv, 5) != 20) {
+ pr_err("%s: BKSV doesn't have 20 1's and 20 0's\n",
+ SDE_HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ link0_bksv_0 = bksv[3];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+ link0_bksv_1 = bksv[4];
+
+ DSS_REG_W(hdcp_io, reg_set->sec_data0, link0_bksv_0);
+ DSS_REG_W(hdcp_io, reg_set->sec_data1, link0_bksv_1);
+error:
+ return rc;
+}
+
+static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp)
+{
+ u8 const required_major = 1, required_minor = 2;
+ u8 sink_major = 0, sink_minor = 0;
+ u8 enable_hpd_irq = 0x1;
+ int rc;
+ unsigned char revision = *hdcp->init_data.revision;
+
+ sink_major = (revision >> 4) & 0x0f;
+ sink_minor = revision & 0x0f;
+ pr_debug("revision: %d.%d\n", sink_major, sink_minor);
+
+ if ((sink_minor < required_minor) || (sink_major < required_major) ||
+ (hdcp->current_tp.ds_type != DS_REPEATER)) {
+ pr_debug("sink irq hpd not enabled\n");
+ return;
+ }
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq);
+ if (rc)
+ pr_debug("error writing ainfo to sink\n");
+}
+
+static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp)
+{
+ int rc, r0_retry = 3;
+ u8 buf[2];
+ u32 link0_status, timeout_count;
+ u32 const r0_read_delay_us = 1;
+ u32 const r0_read_timeout_us = r0_read_delay_us * 10;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* Wait for HDCP R0 computation to be completed */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ (link0_status & BIT(reg_set->r0_offset)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("R0 not ready\n");
+ goto error;
+ }
+
+ /*
+ * HDCP Compliace Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ msleep(100);
+ } else {
+ if (!hdcp->sink_r0_ready) {
+ reinit_completion(&hdcp->sink_r0_available);
+ timeout_count = wait_for_completion_timeout(
+ &hdcp->sink_r0_available, HZ / 2);
+
+ if (hdcp->reauth) {
+ pr_err("sink R0 not ready\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+ do {
+ memset(buf, 0, sizeof(buf));
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.r0,
+ buf, false);
+ if (rc) {
+ pr_err("error reading R0' from sink\n");
+ goto error;
+ }
+
+ pr_debug("sink R0'read: %2x%2x\n", buf[1], buf[0]);
+
+ DSS_REG_W(io, reg_set->data2_0, (((u32)buf[1]) << 8) | buf[0]);
+
+ rc = readl_poll_timeout(io->base + reg_set->status,
+ link0_status, (link0_status & BIT(12)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ r0_read_delay_us, r0_read_timeout_us);
+ } while (rc && --r0_retry);
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_authentication_part1(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ sde_hdcp_1x_enable_interrupts(hdcp);
+
+ rc = sde_hdcp_1x_read_bcaps(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_wait_for_hw_ready(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_read_an_aksv_from_hw(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_get_bksv_from_sink(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_send_an_aksv_to_sink(hdcp);
+ if (rc)
+ goto error;
+
+ sde_hdcp_1x_enable_sink_irq_hpd(hdcp);
+
+ rc = sde_hdcp_1x_verify_r0(hdcp);
+ if (rc)
+ goto error;
+
+ pr_info("SUCCESSFUL\n");
+
+ return 0;
+error:
+ pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+
+ return rc;
+}
+
+static int sde_hdcp_1x_transfer_v_h(struct sde_hdcp_1x *hdcp)
+{
+ int rc = 0;
+ struct dss_io_data *io = hdcp->init_data.hdcp_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct sde_hdcp_1x_reg_data reg_data[] = {
+ {reg_set->sec_data7, &hdcp->sink_addr.v_h0},
+ {reg_set->sec_data8, &hdcp->sink_addr.v_h1},
+ {reg_set->sec_data9, &hdcp->sink_addr.v_h2},
+ {reg_set->sec_data10, &hdcp->sink_addr.v_h3},
+ {reg_set->sec_data11, &hdcp->sink_addr.v_h4},
+ };
+ struct sde_hdcp_sink_addr sink = {"V", reg_data->sink->addr};
+ u32 size = ARRAY_SIZE(reg_data);
+ u8 buf[0xFF] = {0};
+ u32 i = 0, len = 0;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+
+ len += rd->sink->len;
+ }
+
+ sink.len = len;
+
+ rc = sde_hdcp_1x_read(hdcp, &sink, buf, false);
+ if (rc) {
+ pr_err("error reading %s\n", sink.name);
+ goto end;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+ u32 reg_data;
+
+ memcpy(®_data, buf + (sizeof(u32) * i), sizeof(u32));
+ DSS_REG_W(io, rd->reg_id, reg_data);
+ }
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_validate_downstream(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 buf[2] = {0, 0};
+ u8 device_count, depth;
+ u8 max_cascade_exceeded, max_devs_exceeded;
+ u16 bstatus;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bstatus,
+ buf, false);
+ if (rc) {
+ pr_err("error reading bstatus\n");
+ goto end;
+ }
+
+ bstatus = buf[1];
+ bstatus = (bstatus << 8) | buf[0];
+
+ device_count = bstatus & 0x7F;
+
+ pr_debug("device count %d\n", device_count);
+
+ /* Cascaded repeater depth */
+ depth = (bstatus >> 8) & 0x7;
+ pr_debug("depth %d\n", depth);
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+ if (max_devs_exceeded == 0x01) {
+ pr_err("no. of devs connected exceed max allowed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+ if (max_cascade_exceeded == 0x01) {
+ pr_err("no. of cascade connections exceed max allowed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* Update topology information */
+ hdcp->current_tp.dev_count = device_count;
+ hdcp->current_tp.max_cascade_exceeded = max_cascade_exceeded;
+ hdcp->current_tp.max_dev_exceeded = max_devs_exceeded;
+ hdcp->current_tp.depth = depth;
+
+ DSS_REG_W(hdcp->init_data.hdcp_io,
+ reg_set->sec_data12, hdcp->bcaps | (bstatus << 8));
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_read_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+ u32 ksv_read_retry = 20, ksv_bytes, rc = 0;
+ u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ memset(ksv_fifo, 0, sizeof(hdcp->current_tp.ksv_list));
+
+ /* each KSV is 5 bytes long */
+ ksv_bytes = 5 * hdcp->current_tp.dev_count;
+ hdcp->sink_addr.ksv_fifo.len = ksv_bytes;
+
+ while (ksv_bytes && --ksv_read_retry) {
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.ksv_fifo,
+ ksv_fifo, true);
+ if (rc)
+ pr_err("could not read ksv fifo (%d)\n",
+ ksv_read_retry);
+ else
+ break;
+ }
+
+ if (rc)
+ pr_err("error reading ksv_fifo\n");
+
+ return rc;
+}
+
+static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+ int i, rc = 0;
+ u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+ u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ u32 sha_status = 0, status;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* reset SHA Controller */
+ DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x1);
+ DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x0);
+
+ for (i = 0; i < ksv_bytes - 1; i++) {
+ /* Write KSV byte and do not set DONE bit[0] */
+ DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, ksv_fifo[i] << 16);
+
+ /*
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ */
+ if (i && !((i + 1) % 64)) {
+ rc = readl_poll_timeout(io->base + reg_set->sha_status,
+ sha_status, (sha_status & BIT(0)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("block not done\n");
+ goto error;
+ }
+ }
+ }
+
+ /* Write l to DONE bit[0] */
+ DSS_REG_W_ND(sec_io, reg_set->sec_sha_data,
+ (ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+
+ /* Now wait for HDCP_SHA_COMP_DONE */
+ rc = readl_poll_timeout(io->base + reg_set->sha_status, sha_status,
+ (sha_status & BIT(4)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("V computation not done\n");
+ goto error;
+ }
+
+ /* Wait for V_MATCHES */
+ rc = readl_poll_timeout(io->base + reg_set->status, status,
+ (status & BIT(reg_set->v_offset)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (rc) {
+ pr_err("V mismatch\n");
+ rc = -EINVAL;
+ }
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_wait_for_ksv_ready(struct sde_hdcp_1x *hdcp)
+{
+ int rc, timeout;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (rc) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ timeout = 50;
+
+ while (!(hdcp->bcaps & BIT(5)) && --timeout) {
+ rc = sde_hdcp_1x_read(hdcp,
+ &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (rc ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+ msleep(100);
+ }
+ } else {
+ u8 cp_buf = 0;
+ struct sde_hdcp_sink_addr *sink =
+ &hdcp->sink_addr.cp_irq_status;
+
+ timeout = jiffies_to_msecs(jiffies);
+
+ while (1) {
+ rc = sde_hdcp_1x_read(hdcp, sink, &cp_buf, false);
+ if (rc)
+ goto error;
+
+ if (cp_buf & BIT(0))
+ break;
+
+ /* max timeout of 5 sec as per hdcp 1.x spec */
+ if (abs(timeout - jiffies_to_msecs(jiffies)) > 5000) {
+ timeout = 0;
+ break;
+ }
+
+ if (hdcp->ksv_ready || hdcp->reauth ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ break;
+
+ /* re-read after a minimum delay */
+ msleep(20);
+ }
+ }
+
+ if (!timeout || hdcp->reauth ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("DS KSV not ready\n");
+ rc = -EINVAL;
+ } else {
+ hdcp->ksv_ready = true;
+ }
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ int v_retry = 3;
+
+ rc = sde_hdcp_1x_validate_downstream(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_read_ksv_fifo(hdcp);
+ if (rc)
+ goto error;
+
+ do {
+ rc = sde_hdcp_1x_transfer_v_h(hdcp);
+ if (rc)
+ goto error;
+
+ /* do not proceed further if no device connected */
+ if (!hdcp->current_tp.dev_count)
+ goto error;
+
+ rc = sde_hdcp_1x_write_ksv_fifo(hdcp);
+ } while (--v_retry && rc);
+error:
+ if (rc) {
+ pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+
+ pr_info("SUCCESSFUL\n");
+ }
+
+ return rc;
+}
+
+static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp)
+{
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ memcpy((void *)&hdcp->cached_tp,
+ (void *) &hdcp->current_tp,
+ sizeof(hdcp->cached_tp));
+ hdcp1_cache_repeater_topology((void *)&hdcp->cached_tp);
+}
+
+static void sde_hdcp_1x_notify_topology(void)
+{
+ hdcp1_notify_topology();
+}
+
+static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
+{
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+ sde_hdcp_1x_cache_topology(hdcp);
+ sde_hdcp_1x_notify_topology();
+ }
+
+ if (hdcp->init_data.notify_status &&
+ !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ hdcp->init_data.notify_status(
+ hdcp->init_data.cb_data,
+ hdcp->hdcp_state);
+ }
+}
+
+static void sde_hdcp_1x_auth_work(struct work_struct *work)
+{
+ int rc;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sde_hdcp_1x *hdcp = container_of(dw,
+ struct sde_hdcp_1x, hdcp_auth_work);
+ struct dss_io_data *io;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return;
+ }
+
+ hdcp->sink_r0_ready = false;
+ hdcp->reauth = false;
+ hdcp->ksv_ready = false;
+
+ io = hdcp->init_data.core_io;
+ /* Enabling Software DDC for HDMI and REF timer for DP */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ REG_HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+ else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ DSS_REG_W(io, DP_DP_HPD_REFTIMER, 0x10013);
+ }
+
+ /*
+ * program hw to enable encryption as soon as
+ * authentication is successful.
+ */
+ hdcp1_set_enc(true);
+
+ rc = sde_hdcp_1x_authentication_part1(hdcp);
+ if (rc)
+ goto end;
+
+ if (hdcp->current_tp.ds_type == DS_REPEATER) {
+ rc = sde_hdcp_1x_wait_for_ksv_ready(hdcp);
+ if (rc)
+ goto end;
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ goto end;
+ }
+
+ hdcp->ksv_ready = false;
+
+ rc = sde_hdcp_1x_authentication_part2(hdcp);
+ if (rc)
+ goto end;
+
+ /*
+ * Disabling software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDC
+ */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ REG_HDMI_DDC_ARBITRATION) | (BIT(4)));
+end:
+ if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+
+ sde_hdcp_1x_update_auth_status(hdcp);
+}
+
+static int sde_hdcp_1x_authenticate(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ flush_delayed_work(&hdcp->hdcp_auth_work);
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ if (!sde_hdcp_1x_load_keys(input)) {
+
+ queue_delayed_work(hdcp->workq,
+ &hdcp->hdcp_auth_work, HZ/2);
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ sde_hdcp_1x_update_auth_status(hdcp);
+ }
+
+ return 0;
+} /* hdcp_1x_authenticate */
+
+static int sde_hdcp_1x_reauthenticate(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ struct dss_io_data *io;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+ u32 ret = 0, reg;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* Disable HDCP interrupts */
+ DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+
+ reg = DSS_REG_R(io, reg_set->reset);
+ DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, reg_set->ctrl, 0);
+
+ DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ sde_hdcp_1x_authenticate(hdcp);
+
+ return ret;
+} /* hdcp_1x_reauthenticate */
+
+static void sde_hdcp_1x_off(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ struct dss_io_data *io;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+ int rc = 0;
+ u32 reg;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ pr_err("invalid state\n");
+ return;
+ }
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ mutex_lock(hdcp->init_data.mutex);
+ DSS_REG_W(io, isr->int_reg,
+ DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ mutex_unlock(hdcp->init_data.mutex);
+
+ /* complete any wait pending */
+ complete_all(&hdcp->sink_r0_available);
+ complete_all(&hdcp->r0_checked);
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentiaction attempts will be scheduled since we
+ * set the currect state to inactive.
+ */
+ rc = cancel_delayed_work_sync(&hdcp->hdcp_auth_work);
+ if (rc)
+ pr_debug("%s: Deleted hdcp auth work\n",
+ SDE_HDCP_STATE_NAME);
+
+ hdcp1_set_enc(false);
+
+ reg = DSS_REG_R(io, reg_set->reset);
+ DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, reg_set->ctrl, 0);
+
+ DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+ hdcp->sink_r0_ready = false;
+
+ pr_debug("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME);
+} /* hdcp_1x_off */
+
+static int sde_hdcp_1x_isr(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ int rc = 0;
+ struct dss_io_data *io;
+ u32 hdcp_int_val;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ hdcp_int_val = DSS_REG_R(io, isr->int_reg);
+
+ /* Ignore HDCP interrupts if HDCP is disabled */
+ if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ DSS_REG_W(io, isr->int_reg, hdcp_int_val | HDCP_INT_CLR);
+ return 0;
+ }
+
+ if (hdcp_int_val & isr->auth_success_int) {
+ /* AUTH_SUCCESS_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_success_ack));
+ pr_debug("%s: AUTH SUCCESS\n", SDE_HDCP_STATE_NAME);
+
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ complete_all(&hdcp->r0_checked);
+ }
+
+ if (hdcp_int_val & isr->auth_fail_int) {
+ /* AUTH_FAIL_INT */
+ u32 link_status = DSS_REG_R(io, reg_set->status);
+
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_fail_ack));
+
+ pr_debug("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n",
+ SDE_HDCP_STATE_NAME, link_status);
+
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ sde_hdcp_1x_update_auth_status(hdcp);
+ } else if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ complete_all(&hdcp->r0_checked);
+ }
+
+ /* Clear AUTH_FAIL_INFO as well */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_fail_info_ack));
+ }
+
+ if (hdcp_int_val & isr->tx_req_int) {
+ /* DDC_XFER_REQ_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->tx_req_ack));
+ pr_debug("%s: DDC_XFER_REQ_INT received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->tx_req_done_int) {
+ /* DDC_XFER_DONE_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->tx_req_done_ack));
+ pr_debug("%s: DDC_XFER_DONE received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->encryption_ready) {
+ /* Encryption enabled */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->encryption_ready_ack));
+ pr_debug("%s: encryption ready received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->encryption_not_ready) {
+ /* Encryption enabled */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->encryption_not_ready_ack));
+ pr_debug("%s: encryption not ready received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+error:
+ return rc;
+}
+
+void sde_hdcp_1x_deinit(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (hdcp->workq)
+ destroy_workqueue(hdcp->workq);
+
+ kfree(hdcp);
+} /* hdcp_1x_deinit */
+
+static void sde_hdcp_1x_update_client_reg_set(struct sde_hdcp_1x *hdcp)
+{
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ struct sde_hdcp_reg_set reg_set = HDCP_REG_SET_CLIENT_DP;
+ struct sde_hdcp_sink_addr_map sink_addr = HDCP_DP_SINK_ADDR_MAP;
+ struct sde_hdcp_int_set isr = HDCP_DP_INT_SET;
+
+ hdcp->reg_set = reg_set;
+ hdcp->sink_addr = sink_addr;
+ hdcp->int_set = isr;
+ }
+}
+
+static bool sde_hdcp_1x_is_cp_irq_raised(struct sde_hdcp_1x *hdcp)
+{
+ int ret;
+ u8 buf = 0;
+ struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+ ret = sde_hdcp_1x_read(hdcp, &sink, &buf, false);
+ if (ret)
+ pr_err("error reading irq_vector\n");
+
+ return buf & BIT(2) ? true : false;
+}
+
+static void sde_hdcp_1x_clear_cp_irq(struct sde_hdcp_1x *hdcp)
+{
+ int ret;
+ u8 buf = BIT(2);
+ struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+ ret = sde_hdcp_1x_write(hdcp, &sink, &buf);
+ if (ret)
+ pr_err("error clearing irq_vector\n");
+}
+
+static int sde_hdcp_1x_cp_irq(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ u8 buf = 0;
+ int ret;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ goto irq_not_handled;
+ }
+
+ if (!sde_hdcp_1x_is_cp_irq_raised(hdcp)) {
+ pr_debug("cp_irq not raised\n");
+ goto irq_not_handled;
+ }
+
+ ret = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.cp_irq_status,
+ &buf, false);
+ if (ret) {
+ pr_err("error reading cp_irq_status\n");
+ goto irq_not_handled;
+ }
+
+ if ((buf & BIT(2)) || (buf & BIT(3))) {
+ pr_err("%s\n",
+ buf & BIT(2) ? "LINK_INTEGRITY_FAILURE" :
+ "REAUTHENTICATION_REQUEST");
+
+ hdcp->reauth = true;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+
+ complete_all(&hdcp->sink_r0_available);
+ sde_hdcp_1x_update_auth_status(hdcp);
+ } else if (buf & BIT(1)) {
+ pr_debug("R0' AVAILABLE\n");
+ hdcp->sink_r0_ready = true;
+ complete_all(&hdcp->sink_r0_available);
+ } else if ((buf & BIT(0))) {
+ pr_debug("KSVs READY\n");
+
+ hdcp->ksv_ready = true;
+ } else {
+ pr_debug("spurious interrupt\n");
+ }
+
+ sde_hdcp_1x_clear_cp_irq(hdcp);
+ return 0;
+
+irq_not_handled:
+ return -EINVAL;
+}
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
+{
+ struct sde_hdcp_1x *hdcp = NULL;
+ char name[20];
+ static struct sde_hdcp_ops ops = {
+ .isr = sde_hdcp_1x_isr,
+ .cp_irq = sde_hdcp_1x_cp_irq,
+ .reauthenticate = sde_hdcp_1x_reauthenticate,
+ .authenticate = sde_hdcp_1x_authenticate,
+ .off = sde_hdcp_1x_off
+ };
+
+ if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+ !init_data->mutex || !init_data->notify_status ||
+ !init_data->workq || !init_data->cb_data) {
+ pr_err("invalid input\n");
+ goto error;
+ }
+
+ if (init_data->sec_access && !init_data->hdcp_io) {
+ pr_err("hdcp_io required\n");
+ goto error;
+ }
+
+ hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
+ if (!hdcp)
+ goto error;
+
+ hdcp->init_data = *init_data;
+ hdcp->ops = &ops;
+
+ snprintf(name, sizeof(name), "hdcp_1x_%d",
+ hdcp->init_data.client_id);
+
+ hdcp->workq = create_workqueue(name);
+ if (!hdcp->workq) {
+ pr_err("Error creating workqueue\n");
+ kfree(hdcp);
+ goto error;
+ }
+
+ sde_hdcp_1x_update_client_reg_set(hdcp);
+
+ INIT_DELAYED_WORK(&hdcp->hdcp_auth_work, sde_hdcp_1x_auth_work);
+
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ init_completion(&hdcp->r0_checked);
+ init_completion(&hdcp->sink_r0_available);
+
+ pr_debug("HDCP module initialized. HDCP_STATE=%s\n",
+ SDE_HDCP_STATE_NAME);
+
+ return (void *)hdcp;
+
+error:
+ return NULL;
+} /* hdcp_1x_init */
+
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input)
+{
+ return ((struct sde_hdcp_1x *)input)->ops;
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 242cd64..28a2d4d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -630,7 +630,7 @@
}
#else
static int sde_power_data_bus_parse(struct platform_device *pdev,
- struct sde_power_data_bus_handle *pdbus)
+ struct sde_power_data_bus_handle *pdbus, const char *name)
{
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 78c325d..9cbffa5 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,7 +16,7 @@
#define MAX_CLIENT_NAME_LEN 128
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 1600000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 9730f0b..4fc40d9 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -46,8 +46,9 @@
#define RSC_TIME_SLOT_0_NS ((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
#define DEFAULT_PANEL_FPS 60
-#define DEFAULT_PANEL_JITTER 5
-#define DEFAULT_PANEL_PREFILL_LINES 16
+#define DEFAULT_PANEL_JITTER_NUMERATOR 2
+#define DEFAULT_PANEL_JITTER_DENOMINATOR 1
+#define DEFAULT_PANEL_PREFILL_LINES 25
#define DEFAULT_PANEL_VTOTAL (480 + DEFAULT_PANEL_PREFILL_LINES)
#define TICKS_IN_NANO_SECOND 1000000000
@@ -57,6 +58,13 @@
#define TRY_CLK_MODE_SWITCH 0xFFFE
#define STATE_UPDATE_NOT_ALLOWED 0xFFFD
+/**
+ * Expected primary command mode panel vsync ranges
+ * Note: update if a primary panel is expected to run lower than 60fps
+ */
+#define PRIMARY_VBLANK_MIN_US (18 * 1000)
+#define PRIMARY_VBLANK_MAX_US (20 * 1000)
+
static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
/**
@@ -320,21 +328,25 @@
/* calculate for 640x480 60 fps resolution by default */
if (!rsc->cmd_config.fps)
rsc->cmd_config.fps = DEFAULT_PANEL_FPS;
- if (!rsc->cmd_config.jitter)
- rsc->cmd_config.jitter = DEFAULT_PANEL_JITTER;
+ if (!rsc->cmd_config.jitter_numer)
+ rsc->cmd_config.jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+ if (!rsc->cmd_config.jitter_denom)
+ rsc->cmd_config.jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
if (!rsc->cmd_config.vtotal)
rsc->cmd_config.vtotal = DEFAULT_PANEL_VTOTAL;
if (!rsc->cmd_config.prefill_lines)
rsc->cmd_config.prefill_lines = DEFAULT_PANEL_PREFILL_LINES;
- pr_debug("frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
- rsc->cmd_config.fps, rsc->cmd_config.jitter,
- rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
+ pr_debug("frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
+ rsc->cmd_config.jitter_denom, rsc->cmd_config.vtotal,
+ rsc->cmd_config.prefill_lines);
/* 1 nano second */
frame_time_ns = TICKS_IN_NANO_SECOND;
frame_time_ns = div_u64(frame_time_ns, rsc->cmd_config.fps);
- frame_jitter = frame_time_ns * rsc->cmd_config.jitter;
+ frame_jitter = frame_time_ns * rsc->cmd_config.jitter_numer;
+ frame_jitter = div_u64(frame_jitter, rsc->cmd_config.jitter_denom);
/* convert it to percentage */
frame_jitter = div_u64(frame_jitter, 100);
@@ -477,8 +489,7 @@
/* wait for vsync for vid to cmd state switch and config update */
if (!rc && (rsc->current_state == SDE_RSC_VID_STATE ||
rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
}
@@ -502,8 +513,7 @@
/* wait for vsync for cmd to clk state switch */
if (!rc && rsc->primary_client &&
(rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
}
@@ -532,8 +542,7 @@
/* wait for vsync for cmd to vid state switch */
if (!rc && rsc->primary_client &&
(rsc->current_state == SDE_RSC_CMD_STATE))
- drm_wait_one_vblank(rsc->master_drm,
- rsc->primary_client->crtc_id);
+ usleep_range(PRIMARY_VBLANK_MIN_US, PRIMARY_VBLANK_MAX_US);
end:
return rc;
@@ -573,7 +582,7 @@
return -EINVAL;
mutex_lock(&rsc->client_lock);
- SDE_EVT32(caller_client->id, caller_client->current_state,
+ SDE_EVT32_VERBOSE(caller_client->id, caller_client->current_state,
state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
caller_client->crtc_id = crtc_id;
caller_client->current_state = state;
@@ -749,8 +758,9 @@
rsc->timer_config.rsc_time_slot_0_ns);
seq_printf(s, "rsc time slot 1(ns):%d\n",
rsc->timer_config.rsc_time_slot_1_ns);
- seq_printf(s, "frame fps:%d jitter:%d vtotal:%d prefill lines:%d\n",
- rsc->cmd_config.fps, rsc->cmd_config.jitter,
+ seq_printf(s, "frame fps:%d jitter_numer:%d jitter_denom:%d vtotal:%d prefill lines:%d\n",
+ rsc->cmd_config.fps, rsc->cmd_config.jitter_numer,
+ rsc->cmd_config.jitter_denom,
rsc->cmd_config.vtotal, rsc->cmd_config.prefill_lines);
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index e5ae0ad..aa8fa01 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -18,12 +18,14 @@
#include <linux/delay.h>
#include "sde_rsc_priv.h"
+#include "sde_dbg.h"
/* display rsc offset */
#define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0 0x020
#define SDE_RSCC_PDC_MATCH_VALUE_LO_REG_OFFSET_DRV0 0x024
#define SDE_RSCC_PDC_MATCH_VALUE_HI_REG_OFFSET_DRV0 0x028
#define SDE_RSCC_PDC_SLAVE_ID_DRV0 0x02c
+#define SDE_RSCC_SEQ_PROGRAM_COUNTER 0x408
#define SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0 0x410
#define SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0 0x414
#define SDE_RSCC_SEQ_MEM_0_DRV0 0x600
@@ -294,104 +296,12 @@
return 0;
}
-int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
-{
- int rc;
- int count, wrapper_status;
- unsigned long reg;
-
- if (rsc->power_collapse_block)
- return -EINVAL;
-
- rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
- if (rc) {
- pr_err("vdd reg fast mode set failed rc:%d\n", rc);
- return rc;
- }
-
- rc = -EBUSY;
-
- rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
-
- /* update qtimers to high during clk & video mode state */
- if ((rsc->current_state == SDE_RSC_VID_STATE) ||
- (rsc->current_state == SDE_RSC_CLK_STATE)) {
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
- 0xffffffff, rsc->debug_mode);
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
- 0xffffffff, rsc->debug_mode);
- }
-
- wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
- rsc->debug_mode);
- wrapper_status |= BIT(3);
- wrapper_status |= BIT(0);
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
- wrapper_status, rsc->debug_mode);
-
- /**
- * force busy and idle during clk & video mode state because it
- * is trying to entry in mode-2 without turning on the vysnc.
- */
- if ((rsc->current_state == SDE_RSC_VID_STATE) ||
- (rsc->current_state == SDE_RSC_CLK_STATE)) {
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
- BIT(0) | BIT(1), rsc->debug_mode);
- wmb(); /* force busy gurantee */
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
- BIT(0) | BIT(9), rsc->debug_mode);
- }
-
- /* make sure that mode-2 is triggered before wait*/
- wmb();
-
- /* check for sequence running status before exiting */
- for (count = MAX_CHECK_LOOPS; count > 0; count--) {
- if (!regulator_is_enabled(rsc->fs)) {
- rc = 0;
- break;
- }
- usleep_range(1, 2);
- }
-
- if (rc) {
- pr_err("vdd fs is still enabled\n");
- goto end;
- } else {
- rc = -EINVAL;
- /* this wait is required to turn off the rscc clocks */
- for (count = MAX_CHECK_LOOPS; count > 0; count--) {
- reg = dss_reg_r(&rsc->wrapper_io,
- SDE_RSCC_PWR_CTRL, rsc->debug_mode);
- if (test_bit(POWER_CTRL_BIT_12, ®)) {
- rc = 0;
- break;
- }
- usleep_range(1, 2);
- }
- }
-
- if ((rsc->current_state == SDE_RSC_VID_STATE) ||
- (rsc->current_state == SDE_RSC_CLK_STATE)) {
- dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
- BIT(0) | BIT(8), rsc->debug_mode);
- wmb(); /* force busy on vsync */
- }
-
- rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
-
- return 0;
-
-end:
- rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
-
- return rc;
-}
-
-int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
+static int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc,
+ enum sde_rsc_state state)
{
int rc = -EBUSY;
int count, reg;
+ unsigned long power_status;
rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
@@ -428,9 +338,14 @@
/* make sure that mode-2 exit before wait*/
wmb();
- /* check for sequence running status before exiting */
+ /* this wait is required to make sure that gdsc is powered on */
for (count = MAX_CHECK_LOOPS; count > 0; count--) {
- if (regulator_is_enabled(rsc->fs)) {
+ power_status = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_PWR_CTRL, rsc->debug_mode);
+ if (!test_bit(POWER_CTRL_BIT_12, &power_status)) {
+ reg = dss_reg_r(&rsc->drv_io,
+ SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
+ SDE_EVT32_VERBOSE(count, reg, power_status);
rc = 0;
break;
}
@@ -450,6 +365,89 @@
return rc;
}
+static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc)
+{
+ int rc;
+ int count, wrapper_status;
+ unsigned long reg;
+
+ if (rsc->power_collapse_block)
+ return -EINVAL;
+
+ rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
+ if (rc) {
+ pr_err("vdd reg fast mode set failed rc:%d\n", rc);
+ return rc;
+ }
+
+ rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+
+ /* update qtimers to high during clk & video mode state */
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_HI,
+ 0xffffffff, rsc->debug_mode);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F0_QTMR_V1_CNTP_CVAL_LO,
+ 0xffffffff, rsc->debug_mode);
+ }
+
+ wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ rsc->debug_mode);
+ wrapper_status |= BIT(3);
+ wrapper_status |= BIT(0);
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
+ wrapper_status, rsc->debug_mode);
+
+ /**
+ * force busy and idle during clk & video mode state because it
+ * is trying to entry in mode-2 without turning on the vysnc.
+ */
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(1), rsc->debug_mode);
+ wmb(); /* force busy gurantee */
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(9), rsc->debug_mode);
+ }
+
+ /* make sure that mode-2 is triggered before wait*/
+ wmb();
+
+ rc = -EBUSY;
+ /* this wait is required to turn off the rscc clocks */
+ for (count = MAX_CHECK_LOOPS; count > 0; count--) {
+ reg = dss_reg_r(&rsc->wrapper_io,
+ SDE_RSCC_PWR_CTRL, rsc->debug_mode);
+ if (test_bit(POWER_CTRL_BIT_12, ®)) {
+ rc = 0;
+ break;
+ }
+ usleep_range(10, 100);
+ }
+
+ if (rc) {
+ pr_err("mdss gdsc power down failed rc:%d\n", rc);
+ goto end;
+ }
+
+ if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+ (rsc->current_state == SDE_RSC_CLK_STATE)) {
+ dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+ BIT(0) | BIT(8), rsc->debug_mode);
+ wmb(); /* force busy on vsync */
+ }
+
+ rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
+
+ return 0;
+
+end:
+ sde_rsc_mode2_exit(rsc, rsc->current_state);
+
+ return rc;
+}
+
static int sde_rsc_state_update(struct sde_rsc_priv *rsc,
enum sde_rsc_state state)
{
@@ -678,7 +676,7 @@
break;
case VSYNC_ENABLE:
- reg = BIT(8) | ((mode & 0x7) < 10);
+ reg = BIT(8) | ((mode & 0x7) << 10);
dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
reg, rsc->debug_mode);
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index afbf557..2c2b86d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,6 +24,7 @@
*
*/
+#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -358,6 +359,57 @@
} \
} while(0)
+static void
+nouveau_display_hpd_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+
+ pm_runtime_get_sync(drm->dev->dev);
+
+ drm_helper_hpd_irq_event(drm->dev);
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(drm->dev);
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+}
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
+ * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
+ * This should be dropped once that is merged.
+ */
+#ifndef ACPI_VIDEO_NOTIFY_PROBE
+#define ACPI_VIDEO_NOTIFY_PROBE 0x81
+#endif
+
+static int
+nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+ /*
+ * This may be the only indication we receive of a
+ * connector hotplug on a runtime suspended GPU,
+ * schedule hpd_work to check.
+ */
+ schedule_work(&drm->hpd_work);
+
+ /* acpi-video should not generate keypresses for this */
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int
nouveau_display_init(struct drm_device *dev)
{
@@ -370,9 +422,6 @@
if (ret)
return ret;
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
-
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
@@ -537,6 +586,12 @@
}
nouveau_backlight_init(dev);
+ INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+#ifdef CONFIG_ACPI
+ drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
+ register_acpi_notifier(&drm->acpi_nb);
+#endif
+
return 0;
vblank_err:
@@ -552,6 +607,9 @@
{
struct nouveau_display *disp = nouveau_display(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+#endif
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3100fd88..42829a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -483,6 +483,9 @@
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
+ } else {
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
}
return 0;
@@ -761,7 +764,7 @@
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 822a021..1e7f1e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -37,6 +37,8 @@
* - implemented limited ABI16/NVIF interop
*/
+#include <linux/notifier.h>
+
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
@@ -161,6 +163,12 @@
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
+ struct work_struct hpd_work;
+ struct work_struct fbcon_work;
+ int fbcon_new_state;
+#ifdef CONFIG_ACPI
+ struct notifier_block acpi_nb;
+#endif
/* power management */
struct nouveau_hwmon *hwmon;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9f56927..2b79e27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -491,19 +491,43 @@
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+ }
+}
+
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
}
int
@@ -524,6 +548,7 @@
fbcon->dev = dev;
drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64c4ce7..75e1f09 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -100,6 +100,7 @@
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
+ struct mutex mutex;
};
u64 nv84_fence_crtc(struct nouveau_channel *, int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6f..1fba386 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
- if (argv->v0.object == 0ULL)
+ if (argv->v0.object == 0ULL &&
+ argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 18bde9d..90a5dd6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -121,8 +121,10 @@
}
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+ mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@@ -148,11 +150,13 @@
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
+ mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
+ mutex_unlock(&priv->mutex);
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@@ -232,6 +236,8 @@
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
+ mutex_init(&priv->mutex);
+
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758..7bb1e53 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -30,6 +30,7 @@
#include "radeon_audio.h"
#include "atom.h"
#include <linux/backlight.h>
+#include <linux/dmi.h>
extern int atom_debug;
@@ -2183,9 +2184,17 @@
goto assigned;
}
- /* on DCE32 and encoder can driver any block so just crtc id */
+ /*
+ * On DCE32 any encoder can drive any block so usually just use crtc id,
+ * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * otherwise the internal eDP panel will stay dark.
+ */
if (ASIC_IS_DCE32(rdev)) {
- enc_idx = radeon_crtc->crtc_id;
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ enc_idx = (dig->linkb) ? 1 : 0;
+ else
+ enc_idx = radeon_crtc->crtc_id;
+
goto assigned;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ea36dc4..2481049 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -782,6 +782,12 @@
if (r600_dpm_get_vrefresh(rdev) > 120)
return true;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480f..3178ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 621af06..3b21ca5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3333e8a..b75d809 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -115,6 +115,8 @@
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
+
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
@@ -1650,6 +1652,7 @@
int err;
int new_fw = 0;
bool new_smc = false;
+ bool si58_fw = false;
DRM_DEBUG("\n");
@@ -1742,6 +1745,10 @@
default: BUG();
}
+ /* this memory configuration requires special firmware */
+ if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ si58_fw = true;
+
DRM_INFO("Loading %s Microcode\n", new_chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1852,10 @@
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+ if (si58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c18fc31..94983e8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1343,7 +1343,6 @@
mem_type);
return ret;
}
- fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1355,6 +1354,9 @@
ret = (*man->func->takedown)(man);
}
+ fence_put(man->move);
+ man->move = NULL;
+
return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3f6704c..ec9023b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -313,6 +313,14 @@
goto out;
}
+ /* If this object was partially constructed but CMA allocation
+ * had failed, just free it.
+ */
+ if (!bo->base.vaddr) {
+ vc4_bo_destroy(bo);
+ goto out;
+ }
+
cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
if (!cache_list) {
vc4_bo_destroy(bo);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 1483dae..6f66b73 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -81,8 +81,10 @@
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
- if (ret != 0)
+ if (ret != 0) {
+ kfree(bo);
return ret;
+ }
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2..1f013d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index f4552b6..431a67e 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -720,10 +720,25 @@
/* VBIF registers */
#define A6XX_VBIF_VERSION 0x3000
+#define A6XX_VBIF_CLKON 0x3001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK 0x1
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT 0x1
#define A6XX_VBIF_GATE_OFF_WRREQ_EN 0x302A
#define A6XX_VBIF_XIN_HALT_CTRL0 0x3080
#define A6XX_VBIF_XIN_HALT_CTRL0_MASK 0xF
#define A6XX_VBIF_XIN_HALT_CTRL1 0x3081
+#define A6XX_VBIF_TEST_BUS_OUT_CTRL 0x3084
+#define A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK 0x1
+#define A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT 0x0
+#define A6XX_VBIF_TEST_BUS1_CTRL0 0x3085
+#define A6XX_VBIF_TEST_BUS1_CTRL1 0x3086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK 0xF
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT 0x0
+#define A6XX_VBIF_TEST_BUS2_CTRL0 0x3087
+#define A6XX_VBIF_TEST_BUS2_CTRL1 0x3088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK 0x1FF
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT 0x0
+#define A6XX_VBIF_TEST_BUS_OUT 0x308C
#define A6XX_VBIF_PERF_CNT_SEL0 0x30d0
#define A6XX_VBIF_PERF_CNT_SEL1 0x30d1
#define A6XX_VBIF_PERF_CNT_SEL2 0x30d2
@@ -801,6 +816,7 @@
#define A6XX_GMU_DCVS_PERF_SETTING 0x1CBFD
#define A6XX_GMU_DCVS_BW_SETTING 0x1CBFE
#define A6XX_GMU_DCVS_RETURN 0x1CBFF
+#define A6XX_GMU_SYS_BUS_CONFIG 0x1F40F
#define A6XX_GMU_CM3_SYSRESET 0x1F800
#define A6XX_GMU_CM3_BOOT_CONFIG 0x1F801
#define A6XX_GMU_CM3_FW_BUSY 0x1F81A
@@ -859,11 +875,46 @@
#define A6XX_GMU_HOST2GMU_INTR_INFO_1 0x1F99C
#define A6XX_GMU_HOST2GMU_INTR_INFO_2 0x1F99D
#define A6XX_GMU_HOST2GMU_INTR_INFO_3 0x1F99E
+#define A6XX_GMU_GENERAL_1 0x1F9C6
#define A6XX_GMU_GENERAL_7 0x1F9CC
/* ISENSE registers */
#define A6XX_GMU_ISENSE_CTRL 0x1F95D
#define A6XX_GPU_CS_ENABLE_REG 0x23120
+#define A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x1f95d
+#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x22d78
+#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x22d58
+#define A6XX_GPU_CS_A_SENSOR_CTRL_0 0x22d80
+#define A6XX_GPU_CS_A_SENSOR_CTRL_2 0x422da
+#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x2301a
+#define A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x23157
+#define A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x2301a
+#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x2301d
+#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x2301f
+#define A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x23021
+#define A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x23165
+#define A6XX_GPU_CS_AMP_PERIOD_CTRL 0x2316d
+#define A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x23165
+
+#define CS_PWR_ON_STATUS (10)
+#define AMP_SW_WRM_TRIM_START (24)
+#define AMP_TRIM_TIMER (6)
+#define AMP_SW_TRIM_START (0)
+#define SS_AMPTRIM_DONE (11)
+#define AMP_OFFSET_CHECK_MIN_ERR (1)
+#define AMP_OFFSET_CHECK_MAX_ERR (2)
+#define AMP_OUT_OF_RANGE_ERR (4)
+#define TRIM_CNT_VALUE (1)
+#define RUNTIME_CNT_VALUE (16)
+#define TRIM_ENABLE (0)
+
+#define AMP_ERR (BIT(AMP_OFFSET_CHECK_MIN_ERR) || \
+ BIT(AMP_OFFSET_CHECK_MAX_ERR) || \
+ BIT(AMP_OUT_OF_RANGE_ERR))
+
+/* LM registers */
+#define A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x1F94D
+
#define A6XX_GMU_AO_INTERRUPT_EN 0x23B03
#define A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x23B04
@@ -875,6 +926,7 @@
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x23B0C
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x23B0D
#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x23B0E
+#define A6XX_GMU_AO_AHB_FENCE_CTRL 0x23B10
#define A6XX_GMU_AHB_FENCE_STATUS 0x23B13
#define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15
#define A6XX_GMU_AO_SPARE_CNTL 0x23B16
@@ -889,6 +941,7 @@
/* GPUCC registers */
#define A6XX_GPU_CC_GX_GDSCR 0x24403
+#define A6XX_GPU_CC_GX_DOMAIN_MISC 0x24542
/* GPU RSC sequencer registers */
#define A6XX_RSCC_PDC_SEQ_START_ADDR 0x23408
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 86a1e88..7dda62a 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -327,8 +327,7 @@
.minor = 0,
.patchid = ANY_ID,
.features = ADRENO_64BIT | ADRENO_RPMH |
- ADRENO_GPMU | ADRENO_CONTENT_PROTECTION |
- ADRENO_SPTP_PC,
+ ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
.gpudev = &adreno_a6xx_gpudev,
@@ -341,4 +340,21 @@
.gpmu_tsens = 0x000C000D,
.max_power = 5448,
},
+ {
+ .gpurev = ADRENO_REV_A615,
+ .core = 6,
+ .major = 1,
+ .minor = 5,
+ .patchid = ANY_ID,
+ .features = ADRENO_64BIT | ADRENO_RPMH,
+ .sqefw_name = "a630_sqe.fw",
+ .zap_name = "a615_zap",
+ .gpudev = &adreno_a6xx_gpudev,
+ .gmem_size = SZ_512K,
+ .num_protected_regs = 0x20,
+ .busy_mask = 0xFFFFFFFE,
+ .gpmufw_name = "a630_gmu.bin",
+ .gpmu_major = 0x0,
+ .gpmu_minor = 0x005,
+ },
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3672273..6426363 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2342,6 +2342,11 @@
return ret;
}
+ /* Clear the busy_data stats - we're starting over from scratch */
+ adreno_dev->busy_data.gpu_busy = 0;
+ adreno_dev->busy_data.vbif_ram_cycles = 0;
+ adreno_dev->busy_data.vbif_starved_ram = 0;
+
/* Set the page table back to the default page table */
adreno_ringbuffer_set_global(adreno_dev, 0);
kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
@@ -2645,10 +2650,9 @@
void __iomem *reg;
struct gmu_device *gmu = &device->gmu;
- offsetwords -= gmu->gmu2gpu_offset;
-
trace_kgsl_regwrite(device, offsetwords, value);
+ offsetwords -= gmu->gmu2gpu_offset;
reg = gmu->reg_virt + (offsetwords << 2);
/*
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c7e3ad7..01b877f 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -195,6 +195,7 @@
ADRENO_REV_A512 = 512,
ADRENO_REV_A530 = 530,
ADRENO_REV_A540 = 540,
+ ADRENO_REV_A615 = 615,
ADRENO_REV_A630 = 630,
};
@@ -641,6 +642,8 @@
ADRENO_REG_GMU_HOST2GMU_INTR_SET,
ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
+ ADRENO_REG_GMU_NMI_CONTROL_STATUS,
+ ADRENO_REG_GMU_CM3_CFG,
ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
ADRENO_REG_REGISTER_MAX,
};
@@ -875,6 +878,8 @@
unsigned int fsynr1);
int (*reset)(struct kgsl_device *, int fault);
int (*soft_reset)(struct adreno_device *);
+ bool (*gx_is_on)(struct adreno_device *);
+ bool (*sptprac_is_on)(struct adreno_device *);
};
/**
@@ -1144,6 +1149,7 @@
ADRENO_GPUREV(adreno_dev) < 700;
}
+ADRENO_TARGET(a615, ADRENO_REV_A615)
ADRENO_TARGET(a630, ADRENO_REV_A630)
static inline int adreno_is_a630v1(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 13c36e6..742da91 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -55,7 +55,7 @@
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
- { adreno_is_a508, a540_vbif },
+ { adreno_is_a508, a530_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
};
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 08fd16a..3d89d73 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -151,24 +151,6 @@
#define GPMU_ISENSE_STATUS GENMASK(3, 0)
#define GPMU_ISENSE_END_POINT_CAL_ERR BIT(0)
-/* A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 */
-#define AMP_SW_TRIM_START BIT(0)
-
-/* A5XX_GPU_CS_SENSOR_GENERAL_STATUS */
-#define SS_AMPTRIM_DONE BIT(11)
-#define CS_PWR_ON_STATUS BIT(10)
-
-/* A5XX_GPU_CS_AMP_CALIBRATION_STATUS*_* */
-#define AMP_OUT_OF_RANGE_ERR BIT(4)
-#define AMP_OFFSET_CHECK_MAX_ERR BIT(2)
-#define AMP_OFFSET_CHECK_MIN_ERR BIT(1)
-
-/* A5XX_GPU_CS_AMP_CALIBRATION_DONE */
-#define SW_OPAMP_CAL_DONE BIT(0)
-
-#define AMP_CALIBRATION_ERR (AMP_OFFSET_CHECK_MIN_ERR | \
- AMP_OFFSET_CHECK_MAX_ERR | AMP_OUT_OF_RANGE_ERR)
-
#define AMP_CALIBRATION_RETRY_CNT 3
#define AMP_CALIBRATION_TIMEOUT 6
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 43302a0..4036530 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -547,13 +547,42 @@
KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
"smmu_info");
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ kgsl_free_global(device, &iommu->smmu_info);
+}
+
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
return -ENODEV;
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+}
#endif
+static void a5xx_preemption_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ del_timer(&preempt->timer);
+ kgsl_free_global(device, &preempt->counters);
+ a5xx_preemption_iommu_close(adreno_dev);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_free_global(device, &rb->preemption_desc);
+ }
+}
+
int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -578,7 +607,7 @@
A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
"preemption_counters");
if (ret)
- return ret;
+ goto err;
addr = preempt->counters.gpuaddr;
@@ -586,10 +615,16 @@
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
if (ret)
- return ret;
+ goto err;
addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
}
- return a5xx_preemption_iommu_init(adreno_dev);
+ ret = a5xx_preemption_iommu_init(adreno_dev);
+
+err:
+ if (ret)
+ a5xx_preemption_close(device);
+
+ return ret;
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 2e5913d..78b56bc 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -360,8 +360,8 @@
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x04F4, 0X04F6, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400,
- 0xF800, 0xF807,
+ 0x04E0, 0x04F4, 0X04F8, 0x0529, 0x0531, 0x0533, 0x0540, 0x0555,
+ 0xF400, 0xF400, 0xF800, 0xF807,
/* CP */
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28, 0x0B78, 0x0B7F,
@@ -422,8 +422,8 @@
* is the stop offset (inclusive)
*/
static const unsigned int a5xx_pre_crashdumper_registers[] = {
- /* RBBM: RBBM_STATUS */
- 0x04F5, 0x04F5,
+ /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
+ 0x04F5, 0x04F7, 0x0530, 0x0530,
/* CP: CP_STATUS_1 */
0x0B1D, 0x0B1D,
};
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index fb745ad..5551cea 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -64,10 +64,10 @@
{A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
- {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
- {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
- {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
- {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
{A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
{A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
{A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
@@ -172,7 +172,8 @@
const struct kgsl_hwcg_reg *regs;
unsigned int count;
} a6xx_hwcg_registers[] = {
- {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}
+ {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
+ {adreno_is_a615, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
};
static struct a6xx_protected_regs {
@@ -338,7 +339,7 @@
regs = a6xx_hwcg_registers[i].regs;
/* Disable SP clock before programming HWCG registers */
- kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0);
+ kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
@@ -356,7 +357,22 @@
/* enable top level HWCG */
kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
- kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
+}
+
+#define LM_DEFAULT_LIMIT 6000
+
+static uint32_t lm_limit(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (adreno_dev->lm_limit)
+ return adreno_dev->lm_limit;
+
+ if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
+ &adreno_dev->lm_limit))
+ adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
+
+ return adreno_dev->lm_limit;
}
/*
@@ -378,6 +394,9 @@
/* enable hardware clockgating */
a6xx_hwcg_set(adreno_dev, true);
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ adreno_dev->lm_threshold_count = A6XX_GMU_GENERAL_1;
+
adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
ARRAY_SIZE(a6xx_vbif_platforms));
@@ -903,6 +922,9 @@
/* Configure registers for idle setting. The setting is cumulative */
+ /* Disable GMU WB/RB buffer */
+ kgsl_gmu_regwrite(device, A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+
kgsl_gmu_regwrite(device,
A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9C40400);
@@ -934,7 +956,8 @@
}
/* ACD feature enablement */
- if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+ test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
BIT(10));
@@ -942,9 +965,6 @@
if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
RPMH_ENABLE_MASK);
-
- /* Disable reference bandgap voltage */
- kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
}
/*
@@ -1131,116 +1151,54 @@
dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
}
-/*
- * a6xx_hm_enable() - Power on HM and turn on clock
- * @adreno_dev: Pointer to Adreno device
- */
-static int a6xx_hm_enable(struct adreno_device *adreno_dev)
-{
- int ret;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct gmu_device *gmu = &device->gmu;
-
- if (regulator_is_enabled(gmu->gx_gdsc))
- return 0;
-
- ret = regulator_enable(gmu->gx_gdsc);
- if (ret) {
- dev_err(&gmu->pdev->dev,
- "Failed to turn on GPU HM HS\n");
- return ret;
- }
-
- ret = clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[pwr->default_pwrlevel].
- gpu_freq);
- if (ret)
- return ret;
-
- return clk_prepare_enable(pwr->grp_clks[0]);
-}
+#define SPTPRAC_POWER_OFF BIT(2)
+#define SP_CLK_OFF BIT(4)
+#define GX_GDSC_POWER_OFF BIT(6)
+#define GX_CLK_OFF BIT(7)
/*
- * a6xx_hm_disable() - Turn off HM clock and power off
- * @adreno_dev: Pointer to Adreno device
+ * a6xx_gx_is_on() - Check if GX is on using pwr status register
+ * @adreno_dev - Pointer to adreno_device
+ * This check should only be performed if the keepalive bit is set or it
+ * can be guaranteed that the power state of the GPU will remain unchanged
*/
-static int a6xx_hm_disable(struct adreno_device *adreno_dev)
+static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct gmu_device *gmu = &device->gmu;
+ unsigned int val;
+ bool state;
- if (!regulator_is_enabled(gmu->gx_gdsc))
- return 0;
+ if (!kgsl_gmu_isenabled(device))
+ return true;
- /* Ensure that retention is on */
- kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
- A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+ kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
+ state = !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
- clk_disable_unprepare(pwr->grp_clks[0]);
+ /* If GMU is holding on to the fence then we cannot dump any GX stuff */
+ kgsl_gmu_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
+ if (val)
+ return false;
- clk_set_rate(pwr->grp_clks[0],
- pwr->pwrlevels[pwr->num_pwrlevels - 1].
- gpu_freq);
+ return state;
- return regulator_disable(gmu->gx_gdsc);
}
/*
- * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC
- * @device: Pointer to KGSL device
+ * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register
+ * @adreno_dev - Pointer to adreno_device
+ * This check should only be performed if the keepalive bit is set or it
+ * can be guaranteed that the power state of the GPU will remain unchanged
*/
-static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
+static bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev)
{
- int ret = 0;
- struct gmu_device *gmu = &device->gmu;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int val;
- /* If GMU does not control HM we must */
- if (gmu->idle_level < GPU_HW_IFPC) {
+ if (!kgsl_gmu_isenabled(device))
+ return true;
- ret = a6xx_hm_enable(ADRENO_DEVICE(device));
- if (ret) {
- dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
- return ret;
- }
-
-
- }
-
- /* If GMU does not control SPTPRAC we must */
- if (gmu->idle_level < GPU_HW_SPTP_PC) {
- ret = a6xx_sptprac_enable(ADRENO_DEVICE(device));
- if (ret) {
- a6xx_hm_disable(ADRENO_DEVICE(device));
- return ret;
- }
- }
-
- return ret;
-}
-
-/*
- * a6xx_hm_sptprac_disable() - Turn off SPTPRAC and HM
- * @device: Pointer to KGSL device
- */
-static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
-{
- int ret = 0;
- struct gmu_device *gmu = &device->gmu;
-
- /* If GMU does not control SPTPRAC we must */
- if (gmu->idle_level < GPU_HW_SPTP_PC)
- a6xx_sptprac_disable(ADRENO_DEVICE(device));
-
- /* If GMU does not control HM we must */
- if (gmu->idle_level < GPU_HW_IFPC) {
- ret = a6xx_hm_disable(ADRENO_DEVICE(device));
- if (ret)
- dev_err(&gmu->pdev->dev, "Failed to power off GPU HM\n");
- }
-
- return ret;
+ kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
+ return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF));
}
/*
@@ -1293,6 +1251,9 @@
/* Disable the power counter so that the GMU is not busy */
kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+ /* Turn off SPTPRAC before GMU turns off GX */
+ a6xx_sptprac_disable(adreno_dev);
+
if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
return ret;
@@ -1328,7 +1289,10 @@
{
struct gmu_device *gmu = &device->gmu;
struct device *dev = &gmu->pdev->dev;
- int ret = 0;
+ int val;
+
+ kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
+ WARN_ON(!(val & 0x1));
/* RSC wake sequence */
kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
@@ -1354,25 +1318,20 @@
kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
- /* Turn on the HM and SPTP head switches */
- ret = a6xx_hm_sptprac_enable(device);
-
/* Enable the power counter because it was disabled before slumber */
kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
- return ret;
+ return 0;
error_rsc:
dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
- return -EINVAL;
+ return -EINVAL;
}
static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
{
struct gmu_device *gmu = &device->gmu;
- int val, ret = 0;
-
- /* Turn off the SPTP and HM head switches */
- ret = a6xx_hm_sptprac_disable(device);
+ const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int val;
/* RSC sleep sequence */
kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1395,13 +1354,87 @@
&val);
kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
- kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+ test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+ kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
/* FIXME: v2 has different procedure to trigger sequence */
- return ret;
+ return 0;
}
+#define KMASK(start, n) (GENMASK((start + n), (start)))
+
+static void isense_cold_trimm(struct kgsl_device *device)
+{
+ unsigned int reg;
+ struct gmu_device *gmu = &device->gmu;
+
+ kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 0);
+
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3,
+ 0x00000F8F);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2,
+ 0x00705161);
+ udelay(10);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0x3);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_0, 0x10040a);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_A_SENSOR_CTRL_2, 0x10040a);
+
+ kgsl_gmu_regread(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS, ®);
+ if ((reg & BIT(CS_PWR_ON_STATUS)) != (1 << CS_PWR_ON_STATUS)) {
+ dev_err(&gmu->pdev->dev, "ERROR - ISENSE power-up\n");
+ return;
+ }
+
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
+ KMASK(AMP_TRIM_TIMER, 15), 70 << AMP_TRIM_TIMER);
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
+ KMASK(AMP_SW_TRIM_START, 1), 0 << AMP_SW_TRIM_START);
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1,
+ KMASK(AMP_SW_TRIM_START, 1), 1 << AMP_SW_TRIM_START);
+
+ if (timed_poll_check(device, A6XX_GPU_CS_SENSOR_GENERAL_STATUS,
+ BIT(SS_AMPTRIM_DONE), GMU_START_TIMEOUT,
+ BIT(SS_AMPTRIM_DONE))) {
+ dev_err(&gmu->pdev->dev, "ISENSE SS_AMPTRIM failure\n");
+ return;
+ }
+
+ kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0, ®);
+ if (reg & AMP_ERR) {
+ kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0,
+ ®);
+ dev_err(&gmu->pdev->dev,
+ "ISENSE ERROR:trimming GX 0x%08x\n", reg);
+ return;
+ }
+
+ kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2, ®);
+ if (reg & AMP_ERR) {
+ kgsl_gmu_regread(device, A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2,
+ ®);
+ dev_err(&gmu->pdev->dev,
+ "ISENSE ERROR:trimming SPTPRAC 0x%08x\n", reg);
+ return;
+ }
+
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
+ KMASK(TRIM_CNT_VALUE, 13), 20 << TRIM_CNT_VALUE);
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
+ KMASK(RUNTIME_CNT_VALUE, 9), 50 << RUNTIME_CNT_VALUE);
+
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
+ KMASK(TRIM_ENABLE, 1), 1 << TRIM_ENABLE);
+ udelay(4);
+ kgsl_gmu_regrmw(device, A6XX_GPU_CS_AMP_PERIOD_CTRL,
+ KMASK(TRIM_ENABLE, 1), 0 << TRIM_ENABLE);
+ kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
+
+}
/*
* a6xx_gmu_fw_start() - set up GMU and start FW
* @device: Pointer to KGSL device
@@ -1419,25 +1452,12 @@
case GMU_RESET:
/* fall through */
case GMU_COLD_BOOT:
- /* Turn on the HM and SPTP head switches */
- ret = a6xx_hm_sptprac_enable(device);
- if (ret)
- return ret;
-
/* Turn on TCM retention */
kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
- if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
+ if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
_load_gmu_rpmh_ucode(device);
- /* Turn on the HM and SPTP head switches */
- ret = a6xx_hm_sptprac_enable(device);
- if (ret)
- return ret;
- } else if (boot_state == GMU_RESET) {
- ret = a6xx_hm_sptprac_enable(device);
- if (ret)
- return ret;
- } else {
+ else if (boot_state != GMU_RESET) {
ret = a6xx_rpmh_power_on_gpu(device);
if (ret)
return ret;
@@ -1479,6 +1499,13 @@
kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
FENCE_RANGE_MASK);
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+ test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
+ kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
+ lm_limit(adreno_dev));
+ isense_cold_trimm(device);
+ }
+
/* Configure power control and bring the GMU out of reset */
a6xx_gmu_power_config(device);
ret = a6xx_gmu_start(device);
@@ -1494,6 +1521,12 @@
}
}
+ if (gmu->idle_level < GPU_HW_SPTP_PC) {
+ ret = a6xx_sptprac_enable(adreno_dev);
+ if (ret)
+ return ret;
+ }
+
ret = a6xx_gmu_hfi_start(device);
if (ret)
return ret;
@@ -1708,7 +1741,8 @@
unsigned int val;
const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+ !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
return;
kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
@@ -1724,7 +1758,8 @@
const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct gmu_device *gmu = &device->gmu;
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+ !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
return 0;
kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
@@ -1743,6 +1778,19 @@
return 0;
}
+
+static void a6xx_count_throttles(struct adreno_device *adreno_dev,
+ uint64_t adj)
+{
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+ !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+ return;
+
+ kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
+ adreno_dev->lm_threshold_count,
+ &adreno_dev->lm_threshold_cross);
+}
+
static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
{
int ret = 0;
@@ -1786,13 +1834,7 @@
/* Check no outstanding RPMh voting */
a6xx_complete_rpmh_votes(device);
- if (gmu->idle_level < GPU_HW_IFPC) {
- /* HM GDSC is controlled by KGSL */
- ret = a6xx_hm_disable(ADRENO_DEVICE(device));
- if (ret)
- dev_err(&gmu->pdev->dev,
- "suspend: fail: power off GPU HM\n");
- } else if (gmu->gx_gdsc) {
+ if (gmu->gx_gdsc) {
if (regulator_is_enabled(gmu->gx_gdsc)) {
/* Switch gx gdsc control from GMU to CPU
* force non-zero reference count in clk driver
@@ -2133,6 +2175,14 @@
return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
}
+static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ a6xx_preemption_trigger(adreno_dev);
+ adreno_dispatcher_schedule(device);
+}
+
#define A6XX_INT_MASK \
((1 << A6XX_INT_CP_AHB_ERROR) | \
(1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
@@ -2170,7 +2220,7 @@
ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
+ ADRENO_IRQ_CALLBACK(a6xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
/* 23 - MISC_HANG_DETECT */
@@ -2621,9 +2671,9 @@
if (!device->gmu.pdev)
return -ENODEV;
- kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0);
+ kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
kgsl_regrmw(device,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
+ A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
return 0;
@@ -2724,6 +2774,10 @@
A6XX_GMU_HOST2GMU_INTR_CLR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_NMI_CONTROL_STATUS,
+ A6XX_GMU_NMI_CONTROL_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
+ A6XX_GMU_CM3_CFG),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
A6XX_RBBM_SECVID_TRUST_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
@@ -2756,6 +2810,7 @@
.regulator_disable = a6xx_sptprac_disable,
.perfcounters = &a6xx_perfcounters,
.enable_pwr_counters = a6xx_enable_pwr_counters,
+ .count_throttles = a6xx_count_throttles,
.microcode_read = a6xx_microcode_read,
.enable_64bit = a6xx_enable_64bit,
.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
@@ -2777,4 +2832,6 @@
.preemption_set_marker = a6xx_preemption_set_marker,
.preemption_context_init = a6xx_preemption_context_init,
.preemption_context_destroy = a6xx_preemption_context_destroy,
+ .gx_is_on = a6xx_gx_is_on,
+ .sptprac_is_on = a6xx_sptprac_is_on,
};
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 54acd73..e1f1595 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -21,6 +21,9 @@
#include "kgsl_gmu.h"
#define A6XX_NUM_CTXTS 2
+#define A6XX_NUM_AXI_ARB_BLOCKS 2
+#define A6XX_NUM_XIN_AXI_BLOCKS 5
+#define A6XX_NUM_XIN_CORE_BLOCKS 4
static const unsigned int a6xx_gras_cluster[] = {
0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
@@ -205,13 +208,16 @@
0x3410, 0x3410, 0x3800, 0x3801,
};
-static const unsigned int a6xx_gmu_registers[] = {
+static const unsigned int a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
0x1A900, 0x1A92B, 0x1A940, 0x1A940,
+};
+
+static const unsigned int a6xx_gmu_registers[] = {
/* GMU TCM */
0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
/* GMU CX */
@@ -225,9 +231,9 @@
0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
/* GPU RSCC */
- 0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
- 0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
- 0x23944, 0x2397F,
+ 0x2348C, 0x2348C, 0x23501, 0x23502, 0x23740, 0x23742, 0x23744, 0x23747,
+ 0x2374C, 0x23787, 0x237EC, 0x237EF, 0x237F4, 0x2382F, 0x23894, 0x23897,
+ 0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
/* GMU AO */
0x23B00, 0x23B16, 0x23C00, 0x23C00,
/* GPU CC */
@@ -257,16 +263,17 @@
0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
- 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
- 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
- 0x0533, 0x0533, 0x0540, 0x0555,
+ 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9,
+ 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
/* CP */
- 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
- 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
- 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
- 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
- 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
- 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
+ 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
+ 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
+ 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
+ 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
+ 0x0A00, 0x0A03,
/* VSC */
0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
/* UCHE */
@@ -290,6 +297,18 @@
0xA630, 0xA630,
};
+/*
+ * Set of registers to dump for A6XX before actually triggering crash dumper.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+static const unsigned int a6xx_pre_crashdumper_registers[] = {
+ /* RBBM: RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP: CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
enum a6xx_debugbus_id {
A6XX_DBGBUS_CP = 0x1,
A6XX_DBGBUS_RBBM = 0x2,
@@ -377,9 +396,12 @@
{ A6XX_DBGBUS_TPL1_3, 0x100, },
};
+static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
+ A6XX_DBGBUS_VBIF, 0x100,
+};
+
static void __iomem *a6xx_cx_dbgc;
static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
- { A6XX_DBGBUS_VBIF, 0x100, },
{ A6XX_DBGBUS_GMU_CX, 0x100, },
{ A6XX_DBGBUS_CX, 0x100, },
};
@@ -562,6 +584,17 @@
return (count * 8) + sizeof(*header);
}
+static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
+{
+ struct kgsl_snapshot_registers pre_cdregs = {
+ .regs = a6xx_pre_crashdumper_registers,
+ .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2,
+ };
+
+ return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs);
+}
+
static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
@@ -1049,7 +1082,7 @@
kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
}
-/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
+/* a6xx_snapshot_dbgc_debugbus_block() - Capture debug data for a gpu block */
static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
@@ -1088,6 +1121,89 @@
return size;
}
+/* a6xx_snapshot_vbif_debugbus_block() - Capture debug data for VBIF block */
+static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
+{
+ struct kgsl_snapshot_debugbus *header =
+ (struct kgsl_snapshot_debugbus *)buf;
+ struct adreno_debugbus_block *block = priv;
+ int i, j;
+ /*
+ * Total number of VBIF data words considering 3 sections:
+ * 2 arbiter blocks of 16 words
+ * 5 AXI XIN blocks of 18 dwords each
+ * 4 core clock side XIN blocks of 12 dwords each
+ */
+ unsigned int dwords = (16 * A6XX_NUM_AXI_ARB_BLOCKS) +
+ (18 * A6XX_NUM_XIN_AXI_BLOCKS) +
+ (12 * A6XX_NUM_XIN_CORE_BLOCKS);
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ size_t size;
+ unsigned int reg_clk;
+
+ size = (dwords * sizeof(unsigned int)) + sizeof(*header);
+
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+ return 0;
+ }
+ header->id = block->block_id;
+ header->count = dwords;
+
+ kgsl_regread(device, A6XX_VBIF_CLKON, ®_clk);
+ kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk |
+ (A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_MASK <<
+ A6XX_VBIF_CLKON_FORCE_ON_TESTBUS_SHIFT));
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS_OUT_CTRL,
+ (A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_MASK <<
+ A6XX_VBIF_TEST_BUS_OUT_CTRL_EN_SHIFT));
+
+ for (i = 0; i < A6XX_NUM_AXI_ARB_BLOCKS; i++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0,
+ (1 << (i + 16)));
+ for (j = 0; j < 16; j++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
+ ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
+ << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
+ kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
+ data);
+ data++;
+ }
+ }
+
+ /* XIN blocks AXI side */
+ for (i = 0; i < A6XX_NUM_XIN_AXI_BLOCKS; i++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+ for (j = 0; j < 18; j++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL1,
+ ((j & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
+ << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
+ kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
+ data);
+ data++;
+ }
+ }
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ /* XIN blocks core clock side */
+ for (i = 0; i < A6XX_NUM_XIN_CORE_BLOCKS; i++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+ for (j = 0; j < 12; j++) {
+ kgsl_regwrite(device, A6XX_VBIF_TEST_BUS1_CTRL1,
+ ((j & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
+ << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
+ kgsl_regread(device, A6XX_VBIF_TEST_BUS_OUT,
+ data);
+ data++;
+ }
+ }
+ /* restore the clock of VBIF */
+ kgsl_regwrite(device, A6XX_VBIF_CLKON, reg_clk);
+ return size;
+}
+
static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
{
void __iomem *reg;
@@ -1283,6 +1399,10 @@
(void *) &a6xx_dbgc_debugbus_blocks[i]);
}
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+ snapshot, a6xx_snapshot_vbif_debugbus_block,
+ (void *) &a6xx_vbif_debugbus_blocks);
+
if (a6xx_cx_dbgc) {
for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
kgsl_snapshot_add_section(device,
@@ -1297,11 +1417,19 @@
static void a6xx_snapshot_gmu(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
if (!kgsl_gmu_isenabled(device))
return;
adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
ARRAY_SIZE(a6xx_gmu_registers) / 2);
+
+ if (gpudev->gx_is_on(adreno_dev))
+ adreno_snapshot_registers(device, snapshot,
+ a6xx_gmu_gx_registers,
+ ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
}
/* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
@@ -1384,17 +1512,35 @@
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
+ bool sptprac_on;
- /* Try to run the crash dumper */
- _a6xx_do_crashdump(device);
+ /* GMU TCM data dumped through AHB */
+ a6xx_snapshot_gmu(device, snapshot);
+ sptprac_on = gpudev->sptprac_is_on(adreno_dev);
+
+ /* Return if the GX is off */
+ if (!gpudev->gx_is_on(adreno_dev)) {
+ pr_err("GX is off. Only dumping GMU data in snapshot\n");
+ return;
+ }
+
+ /* Dump the registers which get affected by crash dumper trigger */
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
- snapshot, a6xx_snapshot_registers, NULL);
+ snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
+ /* Dump vbif registers as well which get affected by crash dumper */
adreno_snapshot_vbif_registers(device, snapshot,
a6xx_vbif_snapshot_registers,
ARRAY_SIZE(a6xx_vbif_snapshot_registers));
+ /* Try to run the crash dumper */
+ if (sptprac_on)
+ _a6xx_do_crashdump(device);
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+ snapshot, a6xx_snapshot_registers, NULL);
+
/* CP_SQE indexed registers */
kgsl_snapshot_indexed_registers(device, snapshot,
A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
@@ -1422,19 +1568,19 @@
/* Mempool debug data */
a6xx_snapshot_mempool(device, snapshot);
- /* Shader memory */
- a6xx_snapshot_shader(device, snapshot);
+ if (sptprac_on) {
+ /* Shader memory */
+ a6xx_snapshot_shader(device, snapshot);
- /* MVC register section */
- a6xx_snapshot_mvc_regs(device, snapshot);
+ /* MVC register section */
+ a6xx_snapshot_mvc_regs(device, snapshot);
- /* registers dumped through DBG AHB */
- a6xx_snapshot_dbgahb_regs(device, snapshot);
+ /* registers dumped through DBG AHB */
+ a6xx_snapshot_dbgahb_regs(device, snapshot);
+ }
a6xx_snapshot_debugbus(device, snapshot);
- /* GMU TCM data dumped through AHB */
- a6xx_snapshot_gmu(device, snapshot);
}
static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index e8b1c67..0a45d27 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -983,6 +983,13 @@
spin_unlock(&dispatcher->plist_lock);
}
+static inline void _decrement_submit_now(struct kgsl_device *device)
+{
+ spin_lock(&device->submit_lock);
+ device->submit_now--;
+ spin_unlock(&device->submit_lock);
+}
+
/**
* adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
* @adreno_dev: Pointer to the adreno device struct
@@ -992,15 +999,29 @@
static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ spin_lock(&device->submit_lock);
+ /* If state transition to SLUMBER, schedule the work for later */
+ if (device->slumber == true) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ device->submit_now++;
+ spin_unlock(&device->submit_lock);
/* If the dispatcher is busy then schedule the work for later */
if (!mutex_trylock(&dispatcher->mutex)) {
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
- return;
+ _decrement_submit_now(device);
+ goto done;
}
_adreno_dispatcher_issuecmds(adreno_dev);
mutex_unlock(&dispatcher->mutex);
+ _decrement_submit_now(device);
+ return;
+done:
+ adreno_dispatcher_schedule(device);
}
/**
@@ -2060,11 +2081,25 @@
int ret, i;
int fault;
int halt;
+ bool gx_on = true;
fault = atomic_xchg(&dispatcher->fault, 0);
if (fault == 0)
return 0;
+ /* Mask all GMU interrupts */
+ if (kgsl_gmu_isenabled(device)) {
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+ 0xFFFFFFFF);
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+ 0xFFFFFFFF);
+ }
+
+ if (gpudev->gx_is_on)
+ gx_on = gpudev->gx_is_on(adreno_dev);
+
/*
* In the very unlikely case that the power is off, do nothing - the
* state will be reset on power up and everybody will be happy
@@ -2084,7 +2119,8 @@
* else return early to give the fault handler a chance to run.
*/
if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
- (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))) {
+ (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) &&
+ gx_on) {
unsigned int val;
mutex_lock(&device->mutex);
@@ -2106,14 +2142,15 @@
mutex_lock(&device->mutex);
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
- ADRENO_REG_CP_RB_BASE_HI, &base);
+ if (gx_on)
+ adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+ ADRENO_REG_CP_RB_BASE_HI, &base);
/*
* Force the CP off for anything but a hard fault to make sure it is
* good and stopped
*/
- if (!(fault & ADRENO_HARD_FAULT)) {
+ if (!(fault & ADRENO_HARD_FAULT) && gx_on) {
adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))
reg |= 1 | (1 << 1);
@@ -2149,8 +2186,9 @@
trace_adreno_cmdbatch_fault(cmdobj, fault);
}
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
- ADRENO_REG_CP_IB1_BASE_HI, &base);
+ if (gx_on)
+ adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
+ ADRENO_REG_CP_IB1_BASE_HI, &base);
do_header_and_snapshot(device, hung_rb, cmdobj);
@@ -2435,7 +2473,7 @@
mutex_unlock(&device->mutex);
}
-static void adreno_dispatcher_work(struct work_struct *work)
+static void adreno_dispatcher_work(struct kthread_work *work)
{
struct adreno_dispatcher *dispatcher =
container_of(work, struct adreno_dispatcher, work);
@@ -2495,7 +2533,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- kgsl_schedule_work(&dispatcher->work);
+ kthread_queue_work(&kgsl_driver.worker, &dispatcher->work);
}
/**
@@ -2791,7 +2829,7 @@
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+ kthread_init_work(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
complete_all(&dispatcher->idle_gate);
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 72545db..48f0cdc 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -91,7 +91,7 @@
atomic_t fault;
struct plist_head pending;
spinlock_t plist_lock;
- struct work_struct work;
+ struct kthread_work work;
struct kobject kobj;
struct completion idle_gate;
unsigned int disp_preempt_fair_sched;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index c6df7bb..0882447 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -548,6 +548,8 @@
mutex_unlock(&device->mutex);
+ debugfs_remove_recursive(drawctxt->debug_root);
+
/* wake threads waiting to submit commands from this context */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
@@ -569,7 +571,6 @@
gpudev->preemption_context_destroy(context);
drawctxt = ADRENO_CONTEXT(context);
- debugfs_remove_recursive(drawctxt->debug_root);
kfree(drawctxt);
}
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 92b541d..0840aba 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -840,6 +840,15 @@
setup_fault_process(device, snapshot,
context ? context->proc_priv : NULL);
+ /* Add GPU specific sections - registers mainly, but other stuff too */
+ if (gpudev->snapshot)
+ gpudev->snapshot(adreno_dev, snapshot);
+
+ /* Dumping these buffers is useless if the GX is not on */
+ if (gpudev->gx_is_on)
+ if (!gpudev->gx_is_on(adreno_dev))
+ return;
+
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size);
@@ -862,10 +871,6 @@
adreno_snapshot_ringbuffer(device, snapshot,
adreno_dev->next_rb);
- /* Add GPU specific sections - registers mainly, but other stuff too */
- if (gpudev->snapshot)
- gpudev->snapshot(adreno_dev, snapshot);
-
/* Dump selected global buffers */
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
snapshot, snapshot_global, &device->memstore);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 7b8cdc2..9968d8c 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -254,13 +254,6 @@
kgsl_mem_entry_put(entry);
}
-static inline void
-kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
-{
- if (entry)
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
-}
-
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
@@ -270,7 +263,6 @@
kref_init(&entry->refcount);
/* put this ref in userspace memory alloc and map ioctls */
kref_get(&entry->refcount);
- INIT_WORK(&entry->work, _deferred_put);
}
return entry;
@@ -577,8 +569,10 @@
context->tid = task_pid_nr(current);
ret = kgsl_sync_timeline_create(context);
- if (ret)
+ if (ret) {
+ kgsl_process_private_put(dev_priv->process_priv);
goto out;
+ }
snprintf(name, sizeof(name), "context-%d", id);
kgsl_add_event_group(&context->events, context, name,
@@ -995,7 +989,6 @@
*/
kgsl_process_uninit_sysfs(private);
- debugfs_remove_recursive(private->debug_root);
process_release_sync_sources(private);
@@ -1007,12 +1000,14 @@
list_del(&private->list);
/*
- * Unlock the mutex before releasing the memory - this prevents a
- * deadlock with the IOMMU mutex if a page fault occurs
+ * Unlock the mutex before releasing the memory and the debugfs
+ * nodes - this prevents deadlocks with the IOMMU and debugfs
+ * locks.
*/
mutex_unlock(&kgsl_driver.process_mutex);
process_release_memory(private);
+ debugfs_remove_recursive(private->debug_root);
kgsl_process_private_put(private);
}
@@ -1880,7 +1875,7 @@
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -1898,7 +1893,7 @@
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -1931,11 +1926,13 @@
return ret;
}
-static void gpuobj_free_fence_func(void *priv)
+static bool gpuobj_free_fence_func(void *priv)
{
struct kgsl_mem_entry *entry = priv;
- kgsl_mem_entry_put_deferred(entry);
+ INIT_WORK(&entry->work, _deferred_put);
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
+ return true;
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1999,7 +1996,7 @@
else
ret = -EINVAL;
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return ret;
}
@@ -2230,21 +2227,23 @@
if (fd != 0)
dmabuf = dma_buf_get(fd - 1);
}
- up_read(¤t->mm->mmap_sem);
- if (IS_ERR_OR_NULL(dmabuf))
+ if (IS_ERR_OR_NULL(dmabuf)) {
+ up_read(¤t->mm->mmap_sem);
return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
+ }
ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
if (ret) {
dma_buf_put(dmabuf);
+ up_read(¤t->mm->mmap_sem);
return ret;
}
/* Setup the user addr/cache mode for cache operations */
entry->memdesc.useraddr = hostptr;
_setup_cache_mode(entry, vma);
-
+ up_read(¤t->mm->mmap_sem);
return 0;
}
#else
@@ -3379,13 +3378,7 @@
if (entry == NULL)
return -EINVAL;
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
if (entry->memdesc.cur_bindings != 0) {
- kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3394,7 +3387,7 @@
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return 0;
}
@@ -3454,13 +3447,7 @@
if (entry == NULL)
return -EINVAL;
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
if (entry->bind_tree.rb_node != NULL) {
- kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3469,7 +3456,7 @@
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put_deferred(entry);
+ kgsl_mem_entry_put(entry);
return 0;
}
@@ -4660,6 +4647,7 @@
device->id, device->reg_phys, device->reg_len);
rwlock_init(&device->context_lock);
+ spin_lock_init(&device->submit_lock);
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
@@ -4803,6 +4791,8 @@
static int __init kgsl_core_init(void)
{
int result = 0;
+ struct sched_param param = { .sched_priority = 2 };
+
/* alloc major and minor device numbers */
result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
"kgsl");
@@ -4867,7 +4857,19 @@
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
- WQ_MEM_RECLAIM, 0);
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ kthread_init_worker(&kgsl_driver.worker);
+
+ kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
+ &kgsl_driver.worker, "kgsl_worker_thread");
+
+ if (IS_ERR(kgsl_driver.worker_thread)) {
+ pr_err("unable to start kgsl thread\n");
+ goto err;
+ }
+
+ sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c54e51e..f80da79 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -25,6 +25,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/kthread.h>
#include <asm/cacheflush.h>
/*
@@ -151,6 +152,8 @@
unsigned int full_cache_threshold;
struct workqueue_struct *workqueue;
struct workqueue_struct *mem_workqueue;
+ struct kthread_worker worker;
+ struct task_struct *worker_thread;
};
extern struct kgsl_driver kgsl_driver;
@@ -300,7 +303,7 @@
void *priv;
struct list_head node;
unsigned int created;
- struct work_struct work;
+ struct kthread_work work;
int result;
struct kgsl_event_group *group;
};
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b621ada..6bad70b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -269,6 +269,11 @@
struct kgsl_pwrctrl pwrctrl;
int open_count;
+ /* For GPU inline submission */
+ uint32_t submit_now;
+ spinlock_t submit_lock;
+ bool slumber;
+
struct mutex mutex;
uint32_t state;
uint32_t requested_state;
@@ -664,9 +669,14 @@
static inline int kgsl_state_is_awake(struct kgsl_device *device)
{
+ struct gmu_device *gmu = &device->gmu;
+
if (device->state == KGSL_STATE_ACTIVE ||
device->state == KGSL_STATE_AWARE)
return true;
+ else if (kgsl_gmu_isenabled(device) &&
+ test_bit(GMU_CLK_ON, &gmu->flags))
+ return true;
else
return false;
}
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index bca3d57..3dbaea4 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -158,10 +158,13 @@
}
/*
- * a generic function to retire a pending sync event and (possibly)
- * kick the dispatcher
+ * a generic function to retire a pending sync event and (possibly) kick the
+ * dispatcher.
+ * Returns false if the event was already marked for cancellation in another
+ * thread. This function should return true if this thread is responsible for
+ * freeing up the memory, and the event will not be cancelled.
*/
-static void drawobj_sync_expire(struct kgsl_device *device,
+static bool drawobj_sync_expire(struct kgsl_device *device,
struct kgsl_drawobj_sync_event *event)
{
struct kgsl_drawobj_sync *syncobj = event->syncobj;
@@ -170,7 +173,7 @@
* leave without doing anything useful
*/
if (!test_and_clear_bit(event->id, &syncobj->pending))
- return;
+ return false;
/*
* If no more pending events, delete the timer and schedule the command
@@ -183,6 +186,7 @@
device->ftbl->drawctxt_sched(device,
event->syncobj->base.context);
}
+ return true;
}
/*
@@ -228,18 +232,23 @@
static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
- unsigned long pending;
+ unsigned long pending = 0;
unsigned int i;
/* Zap the canary timer */
del_timer_sync(&syncobj->timer);
/*
- * Copy off the pending list and clear all pending events - this will
- * render any subsequent asynchronous callback harmless
+ * Copy off the pending list and clear each pending event atomically -
+ * this will render any subsequent asynchronous callback harmless.
+ * This marks each event for deletion. If any pending fence callbacks
+ * run between now and the actual cancel, the associated structures
+ * are kfreed only in the cancel call.
*/
- bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
+ for_each_set_bit(i, &syncobj->pending, KGSL_MAX_SYNCPOINTS) {
+ if (test_and_clear_bit(i, &syncobj->pending))
+ __set_bit(i, &pending);
+ }
/*
* Clear all pending events - this will render any subsequent async
@@ -259,8 +268,8 @@
drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_drawobj_put(drawobj);
+ kgsl_sync_fence_async_cancel(event->handle);
+ kgsl_drawobj_put(drawobj);
break;
}
}
@@ -320,15 +329,21 @@
}
EXPORT_SYMBOL(kgsl_drawobj_destroy);
-static void drawobj_sync_fence_func(void *priv)
+static bool drawobj_sync_fence_func(void *priv)
{
struct kgsl_drawobj_sync_event *event = priv;
trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
- drawobj_sync_expire(event->device, event);
-
- kgsl_drawobj_put(&event->syncobj->base);
+ /*
+ * Only call kgsl_drawobj_put() if it's not marked for cancellation
+ * in another thread.
+ */
+ if (drawobj_sync_expire(event->device, event)) {
+ kgsl_drawobj_put(&event->syncobj->base);
+ return true;
+ }
+ return false;
}
/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index d042f05..759a966 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -32,7 +32,7 @@
{
list_del(&event->node);
event->result = result;
- queue_work(device->events_wq, &event->work);
+ kthread_queue_work(&kgsl_driver.worker, &event->work);
}
/**
@@ -42,7 +42,7 @@
* Each event callback has its own work struct and is run on a event specific
* workqeuue. This is the worker that queues up the event callback function.
*/
-static void _kgsl_event_worker(struct work_struct *work)
+static void _kgsl_event_worker(struct kthread_work *work)
{
struct kgsl_event *event = container_of(work, struct kgsl_event, work);
int id = KGSL_CONTEXT_ID(event->context);
@@ -286,7 +286,7 @@
event->created = jiffies;
event->group = group;
- INIT_WORK(&event->work, _kgsl_event_worker);
+ kthread_init_work(&event->work, _kgsl_event_worker);
trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
@@ -301,7 +301,7 @@
if (timestamp_cmp(retired, timestamp) >= 0) {
event->result = KGSL_EVENT_RETIRED;
- queue_work(device->events_wq, &event->work);
+ kthread_queue_work(&kgsl_driver.worker, &event->work);
spin_unlock(&group->lock);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 324840d..36fdd51 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -436,6 +436,7 @@
struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
if (gpu_pwrlevel < gmu->num_gpupwrlevels)
@@ -448,6 +449,10 @@
(bw_idx == INVALID_DCVS_IDX))
return -EINVAL;
+ if (bw_idx == INVALID_DCVS_IDX)
+ /* Use default BW, algorithm changes on V2 */
+ bw_idx = pwr->pwrlevels[gpu_pwrlevel].bus_freq;
+
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
return gpudev->rpmh_gpu_pwrctrl(adreno_dev,
GMU_DCVS_NOHFI, perf_idx, bw_idx);
@@ -500,7 +505,11 @@
}
cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
- arc->num = len >> 1;
+ for (arc->num = 1; arc->num <= MAX_GX_LEVELS; arc->num++) {
+ if (arc->num == MAX_GX_LEVELS ||
+ arc->val[arc->num - 1] >= arc->val[arc->num])
+ break;
+ }
return 0;
}
@@ -523,35 +532,42 @@
{
int i, j, k;
uint16_t cur_vlvl;
+ bool found_match;
/* i tracks current KGSL GPU frequency table entry
* j tracks second rail voltage table entry
* k tracks primary rail voltage table entry
*/
- for (i = 0, k = 0; i < num_entries; k++) {
- if (pri_rail->val[k] != vlvl[i]) {
- if (k >= pri_rail->num)
- return -EINVAL;
- continue;
- }
- votes[i].pri_idx = k;
- votes[i].vlvl = vlvl[i];
- cur_vlvl = vlvl[i];
+ for (i = 0; i < num_entries; i++) {
+ found_match = false;
- /* find index of second rail vlvl array element that
- * its vlvl >= current vlvl of primary rail
- */
- for (j = 0; j < sec_rail->num; j++) {
- if (sec_rail->val[j] >= cur_vlvl) {
- votes[i].sec_idx = j;
+ /* Look for a primary rail voltage that matches a VLVL level */
+ for (k = 0; k < pri_rail->num; k++) {
+ if (pri_rail->val[k] == vlvl[i]) {
+ votes[i].pri_idx = k;
+ votes[i].vlvl = vlvl[i];
+ cur_vlvl = vlvl[i];
+ found_match = true;
break;
}
}
- if (j == sec_rail->num)
- votes[i].sec_idx = j;
+ /* If we did not find a matching VLVL level then abort */
+ if (!found_match)
+ return -EINVAL;
- i++;
+ /*
+ * Look for a secondary rail index whose VLVL value
+ * is greater than or equal to the VLVL value of the
+ * corresponding index of the primary rail
+ */
+ for (j = 0; j < sec_rail->num; j++) {
+ if (sec_rail->val[j] >= cur_vlvl ||
+ j + 1 == sec_rail->num) {
+ votes[i].sec_idx = j;
+ break;
+ }
+ }
}
return 0;
}
@@ -570,26 +586,25 @@
struct rpmh_arc_vals *sec_rail,
unsigned int type)
{
+ struct device *dev;
+ struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
unsigned int num_freqs;
struct arc_vote_desc *votes;
unsigned int vlvl_tbl[MAX_GX_LEVELS];
unsigned int *freq_tbl;
int i, ret;
- /*
- * FIXME: remove below two arrays after OPP VLVL query API ready
- * struct dev_pm_opp *opp;
- */
- uint16_t gpu_vlvl[] = {0, 128, 256, 384};
- uint16_t cx_vlvl[] = {0, 48, 256};
+ struct dev_pm_opp *opp;
if (type == GPU_ARC_VOTE) {
num_freqs = gmu->num_gpupwrlevels;
votes = gmu->rpmh_votes.gx_votes;
- freq_tbl = gmu->gmu_freqs;
+ freq_tbl = gmu->gpu_freqs;
+ dev = &device->pdev->dev;
} else if (type == GMU_ARC_VOTE) {
num_freqs = gmu->num_gmupwrlevels;
votes = gmu->rpmh_votes.cx_votes;
- freq_tbl = gmu->gpu_freqs;
+ freq_tbl = gmu->gmu_freqs;
+ dev = &gmu->pdev->dev;
} else {
return -EINVAL;
}
@@ -601,26 +616,25 @@
return -EINVAL;
}
- /*
- * FIXME: Find a core's voltage VLVL value based on its frequency
- * using OPP framework, waiting for David Colin, ETA Jan.
- */
+ memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
for (i = 0; i < num_freqs; i++) {
- /*
- * opp = dev_pm_opp_find_freq_exact(&gmu->pdev->dev,
- * freq_tbl[i], true);
- * if (IS_ERR(opp)) {
- * dev_err(&gmu->pdev->dev,
- * "Failed to find opp freq %d of %s\n",
- * freq_tbl[i], debug_strs[type]);
- * return PTR_ERR(opp);
- * }
- * vlvl_tbl[i] = dev_pm_opp_get_voltage(opp);
- */
- if (type == GPU_ARC_VOTE)
- vlvl_tbl[i] = gpu_vlvl[i];
- else
- vlvl_tbl[i] = cx_vlvl[i];
+ /* Hardcode VLVL for 0 because it is not registered in OPP */
+ if (freq_tbl[i] == 0) {
+ vlvl_tbl[i] = 0;
+ continue;
+ }
+
+ /* Otherwise get the value from the OPP API */
+ opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
+ if (IS_ERR(opp)) {
+ dev_err(&gmu->pdev->dev,
+ "Failed to find opp freq %d of %s\n",
+ freq_tbl[i], debug_strs[type]);
+ return PTR_ERR(opp);
+ }
+
+ /* Values from OPP framework are offset by 1 */
+ vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
}
ret = setup_volt_dependency_tbl(votes,
@@ -1125,6 +1139,7 @@
goto error;
gmu->num_gpupwrlevels = pwr->num_pwrlevels;
+ gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
for (i = 0; i < gmu->num_gpupwrlevels; i++) {
int j = gmu->num_gpupwrlevels - 1 - i;
@@ -1161,6 +1176,8 @@
else
gmu->idle_level = GPU_HW_ACTIVE;
+ /* disable LM during boot time */
+ clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
return 0;
error:
@@ -1201,18 +1218,11 @@
static int gmu_disable_clks(struct gmu_device *gmu)
{
- int ret, j = 0;
+ int j = 0;
if (IS_ERR_OR_NULL(gmu->clks[0]))
return 0;
- ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[0]);
- if (ret) {
- dev_err(&gmu->pdev->dev, "fail to reset GMU clk freq %d\n",
- gmu->gmu_freqs[0]);
- return ret;
- }
-
while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
clk_disable_unprepare(gmu->clks[j]);
j++;
@@ -1325,15 +1335,44 @@
return 0;
}
+static void gmu_snapshot(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct gmu_device *gmu = &device->gmu;
+
+ if (!test_and_set_bit(GMU_FAULT, &gmu->flags)) {
+ /* Mask so there's no interrupt caused by NMI */
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
+
+ /* Make sure the interrupt is masked before causing it */
+ wmb();
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_CM3_CFG, (1 << 9));
+
+ /* Wait for the NMI to be handled */
+ wmb();
+ udelay(100);
+ kgsl_device_snapshot(device, NULL);
+
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+ (unsigned int) ~HFI_IRQ_MASK);
+ }
+}
+
/* To be called to power on both GPU and GMU */
int gmu_start(struct kgsl_device *device)
{
- int ret = 0, perf_idx;
+ int ret = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct gmu_device *gmu = &device->gmu;
- int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
switch (device->state) {
case KGSL_STATE_INIT:
@@ -1342,13 +1381,9 @@
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
- /* Convert to RPMh frequency index */
- perf_idx = gmu->num_gpupwrlevels -
- pwr->default_pwrlevel - 1;
-
/* Vote for 300MHz DDR for GMU to init */
ret = msm_bus_scale_client_update_request(gmu->pcl,
- bus_level);
+ pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
if (ret) {
dev_err(&gmu->pdev->dev,
"Failed to allocate gmu b/w\n");
@@ -1367,7 +1402,8 @@
goto error_gpu;
/* Send default DCVS level */
- ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
+ ret = gmu_dcvs_set(gmu, pwr->default_pwrlevel,
+ pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
if (ret)
goto error_gpu;
@@ -1379,8 +1415,6 @@
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
- perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
-
ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
GMU_WARM_BOOT, 0);
if (ret)
@@ -1392,12 +1426,12 @@
if (ret)
goto error_gpu;
- if (gmu->wakeup_pwrlevel != pwr->default_pwrlevel) {
- ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
- if (ret)
- goto error_gpu;
- gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
- }
+ ret = gmu_dcvs_set(gmu, gmu->wakeup_pwrlevel,
+ pwr->pwrlevels[gmu->wakeup_pwrlevel].bus_freq);
+ if (ret)
+ goto error_gpu;
+
+ gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
break;
case KGSL_STATE_RESET:
@@ -1406,11 +1440,6 @@
gmu_enable_gdsc(gmu);
gmu_enable_clks(gmu);
- perf_idx = gmu->num_gpupwrlevels -
- pwr->active_pwrlevel - 1;
-
- bus_level =
- pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
ret = gpudev->rpmh_gpu_pwrctrl(
adreno_dev, GMU_FW_START, GMU_RESET, 0);
if (ret)
@@ -1423,7 +1452,9 @@
goto error_gpu;
/* Send DCVS level prior to reset*/
- ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
+ ret = gmu_dcvs_set(gmu, pwr->active_pwrlevel,
+ pwr->pwrlevels[pwr->active_pwrlevel]
+ .bus_freq);
if (ret)
goto error_gpu;
@@ -1432,19 +1463,13 @@
OOB_CPINIT_CHECK_MASK,
OOB_CPINIT_CLEAR_MASK);
- } else {
+ } else
gmu_fast_boot(device);
- }
break;
default:
break;
}
- /*
- * OOB to enable power management of GMU.
- * In v2, this function call shall move ahead
- * of hfi_start() to save power.
- */
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
gpudev->oob_clear(adreno_dev,
OOB_BOOT_SLUMBER_CLEAR_MASK);
@@ -1452,15 +1477,17 @@
return ret;
error_gpu:
+ gmu_snapshot(device);
hfi_stop(gmu);
gmu_irq_disable(device);
- if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
- gpudev->oob_clear(adreno_dev,
- OOB_BOOT_SLUMBER_CLEAR_MASK);
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+ gpudev->oob_clear(adreno_dev,
+ OOB_BOOT_SLUMBER_CLEAR_MASK);
gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
error_bus:
- msm_bus_scale_client_update_request(gmu->pcl, 0);
+ msm_bus_scale_client_update_request(gmu->pcl, 0);
error_clks:
+ gmu_snapshot(device);
gmu_disable_clks(gmu);
gmu_disable_gdsc(gmu);
return ret;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index a741beb..63ca028 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -82,6 +82,7 @@
GMU_BOOT_INIT_DONE = 0,
GMU_CLK_ON = 1,
GMU_HFI_ON = 2,
+ GMU_FAULT = 3
};
/**
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 067b276..68e0f3a 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -177,6 +177,7 @@
{
struct kgsl_hfi *hfi = &gmu->hfi;
struct pending_msg *msg = NULL, *next;
+ bool in_queue = false;
trace_kgsl_hfi_receive(rsp->ret_hdr.id,
rsp->ret_hdr.size,
@@ -185,11 +186,13 @@
spin_lock(&hfi->msglock);
list_for_each_entry_safe(msg, next, &hfi->msglist, node) {
if (msg->msg_id == rsp->ret_hdr.id &&
- msg->seqnum == rsp->ret_hdr.seqnum)
+ msg->seqnum == rsp->ret_hdr.seqnum) {
+ in_queue = true;
break;
+ }
}
- if (msg == NULL) {
+ if (in_queue == false) {
spin_unlock(&hfi->msglock);
dev_err(&gmu->pdev->dev,
"Cannot find receiver of ack msg with id=%d\n",
@@ -599,15 +602,23 @@
if (result)
return result;
- /*
- * FW is not ready for LM configuration
- * without powering on GPU.
- */
- /*
- * result = hfi_send_lmconfig(gmu);
- * if (result)
- * return result;
- */
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
+ test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
+ gmu->lm_config.lm_type = 1;
+ gmu->lm_config.lm_sensor_type = 1;
+ gmu->lm_config.throttle_config = 1;
+ gmu->lm_config.idle_throttle_en = 0;
+ gmu->lm_config.acd_en = 0;
+ gmu->bcl_config = 0;
+ gmu->lm_dcvs_level = 0;
+
+ result = hfi_send_lmconfig(gmu);
+ if (result) {
+ dev_err(dev, "Failire enabling limits management (%d)\n",
+ result);
+ return result;
+ }
+ }
set_bit(GMU_HFI_ON, &gmu->flags);
return 0;
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 8eedbfa2..47d07d9 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -115,7 +115,7 @@
HFI_F2H_QPRI_DEBUG = 40,
};
-#define HFI_RSP_TIMEOUT 100 /* msec */
+#define HFI_RSP_TIMEOUT 500 /* msec */
#define HFI_H2F_CMD_IRQ_MASK BIT(0)
enum hfi_msg_type {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 73c0d71..c02046a 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -34,6 +34,8 @@
#include "kgsl_trace.h"
#include "kgsl_pwrctrl.h"
+#define CP_APERTURE_REG 0
+
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
#define ADDR_IN_GLOBAL(_a) \
@@ -1220,6 +1222,19 @@
"System cache not enabled for GPU pagetable walks: %d\n", ret);
}
+static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
+{
+ struct scm_desc desc = {0};
+
+ desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
+ desc.args[1] = 0xFFFFFFFF;
+ desc.args[2] = 0xFFFFFFFF;
+ desc.args[3] = 0xFFFFFFFF;
+ desc.arginfo = SCM_ARGS(4);
+
+ return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc);
+}
+
static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
@@ -1260,6 +1275,15 @@
goto done;
}
+ if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) {
+ ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
+ if (ret) {
+ pr_err("SMMU aperture programming call failed with error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
ctx->cb_num = cb_num;
ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
+ (cb_num << KGSL_IOMMU_CB_SHIFT);
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index bb92b8b..685ce3e 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -65,26 +65,19 @@
/* Map the page into kernel and zero it out */
static void
-_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
+_kgsl_pool_zero_page(struct page *p)
{
- int i;
+ void *addr = kmap_atomic(p);
- for (i = 0; i < (1 << pool_order); i++) {
- struct page *page = nth_page(p, i);
- void *addr = kmap_atomic(page);
-
- memset(addr, 0, PAGE_SIZE);
- dmac_flush_range(addr, addr + PAGE_SIZE);
- kunmap_atomic(addr);
- }
+ memset(addr, 0, PAGE_SIZE);
+ dmac_flush_range(addr, addr + PAGE_SIZE);
+ kunmap_atomic(addr);
}
/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
- _kgsl_pool_zero_page(p, pool->pool_order);
-
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
@@ -280,6 +273,17 @@
return -ENOMEM;
}
+static int kgsl_pool_get_retry_order(unsigned int order)
+{
+ int i;
+
+ for (i = kgsl_num_pools-1; i > 0; i--)
+ if (order >= kgsl_pools[i].pool_order)
+ return kgsl_pools[i].pool_order;
+
+ return 0;
+}
+
/**
* kgsl_pool_alloc_page() - Allocate a page of requested size
* @page_size: Size of the page to be allocated
@@ -318,7 +322,6 @@
} else
return -ENOMEM;
}
- _kgsl_pool_zero_page(page, order);
goto done;
}
@@ -326,7 +329,7 @@
if (pool == NULL) {
/* Retry with lower order pages */
if (order > 0) {
- size = PAGE_SIZE << --order;
+ size = PAGE_SIZE << kgsl_pool_get_retry_order(order);
goto eagain;
} else {
/*
@@ -338,7 +341,6 @@
page = alloc_pages(gfp_mask, order);
if (page == NULL)
return -ENOMEM;
- _kgsl_pool_zero_page(page, order);
goto done;
}
}
@@ -368,13 +370,12 @@
} else
return -ENOMEM;
}
-
- _kgsl_pool_zero_page(page, order);
}
done:
for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
+ _kgsl_pool_zero_page(p);
pages[pcount] = p;
pcount++;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5c53a05c..6fd6a05 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -224,7 +224,7 @@
{
struct gmu_device *gmu = &device->gmu;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int ret;
+ int ret = 0;
/* GMU scales BW */
if (kgsl_gmu_isenabled(device)) {
@@ -232,7 +232,7 @@
return 0;
ret = gmu_dcvs_set(gmu, INVALID_DCVS_IDX, buslevel);
- } else {
+ } else if (pwr->pcl) {
/* Linux bus driver scales BW */
ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel);
}
@@ -253,23 +253,30 @@
{
struct gmu_device *gmu = &device->gmu;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_pwrlevel *pl = &pwr->pwrlevels[pwrlevel];
int ret = 0;
/* GMU scales GPU freq */
if (kgsl_gmu_isenabled(device)) {
/* If GMU has not been started, save it */
if (!(gmu->flags & GMU_HFI_ON)) {
+ /* In slumber the clock is off so we are done */
+ if (pwrlevel == (gmu->num_gpupwrlevels - 1))
+ return 0;
+
gmu->wakeup_pwrlevel = pwrlevel;
return 0;
}
+ /* If the GMU is on we cannot vote for the lowest level */
+ if (pwrlevel == (gmu->num_gpupwrlevels - 1)) {
+ WARN(1, "Cannot set 0 GPU frequency with GMU\n");
+ return -EINVAL;
+ }
ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
- } else {
+ } else
/* Linux clock driver scales GPU freq */
- struct kgsl_pwrlevel *Pl = &pwr->pwrlevels[pwrlevel];
-
- ret = clk_set_rate(pwr->grp_clks[0], Pl->gpu_freq);
- }
+ ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
if (ret)
KGSL_PWR_ERR(device, "GPU clk freq set failure\n");
@@ -291,7 +298,8 @@
unsigned long ab;
/* the bus should be ON to update the active frequency */
- if (on && !(test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)))
+ if (!(kgsl_gmu_isenabled(device)) && on &&
+ !(test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)))
return;
/*
* If the bus should remain on calculate our request and submit it,
@@ -321,9 +329,7 @@
msm_bus_scale_client_update_request(pwr->ocmem_pcl,
on ? pwr->active_pwrlevel : pwr->num_pwrlevels - 1);
- /* vote for bus if gpubw-dev support is not enabled */
- if (pwr->pcl)
- kgsl_bus_scale_request(device, buslevel);
+ kgsl_bus_scale_request(device, buslevel);
kgsl_pwrctrl_vbif_update(ab);
}
@@ -1049,6 +1055,8 @@
if (on) {
switch (flag) {
case KGSL_PWRFLAGS_CLK_ON:
+ /* make sure pwrrail is ON before enabling clocks */
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
KGSL_STATE_ACTIVE);
break;
@@ -1784,8 +1792,6 @@
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- if (kgsl_gmu_isenabled(device))
- return;
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
return;
@@ -1854,7 +1860,12 @@
if (kgsl_gmu_isenabled(device))
return 0;
- if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
+ /*
+ * Disabling the regulator means also disabling dependent clocks.
+ * Hence don't disable it if force clock ON is set.
+ */
+ if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) ||
+ test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
return 0;
if (state == KGSL_PWRFLAGS_OFF) {
@@ -2362,9 +2373,24 @@
|| device->state == KGSL_STATE_NAP) {
if (!atomic_read(&device->active_cnt)) {
+ spin_lock(&device->submit_lock);
+ if (device->submit_now) {
+ spin_unlock(&device->submit_lock);
+ goto done;
+ }
+ /* Don't allow GPU inline submission in SLUMBER */
+ if (requested_state == KGSL_STATE_SLUMBER)
+ device->slumber = true;
+ spin_unlock(&device->submit_lock);
+
ret = kgsl_pwrctrl_change_state(device,
device->requested_state);
if (ret == -EBUSY) {
+ if (requested_state == KGSL_STATE_SLUMBER) {
+ spin_lock(&device->submit_lock);
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
+ }
/*
* If the GPU is currently busy, restore
* the requested state and reschedule
@@ -2375,7 +2401,7 @@
kgsl_schedule_work(&device->idle_check_ws);
}
}
-
+done:
if (!ret)
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
@@ -2449,8 +2475,13 @@
kgsl_pwrctrl_pwrlevel_change(device, level);
- if (kgsl_gmu_isenabled(device))
- return gmu_start(device);
+ if (kgsl_gmu_isenabled(device)) {
+ int ret = gmu_start(device);
+
+ if (!ret)
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+ return ret;
+ }
/* Order pwrrail/clk sequence based upon platform */
status = kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
@@ -2463,8 +2494,10 @@
static void kgsl_pwrctrl_disable(struct kgsl_device *device)
{
- if (kgsl_gmu_isenabled(device))
+ if (kgsl_gmu_isenabled(device)) {
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
return gmu_stop(device);
+ }
/* Order pwrrail/clk sequence based upon platform */
device->ftbl->regulator_disable(device);
@@ -2835,6 +2868,13 @@
trace_kgsl_pwr_set_state(device, state);
device->state = state;
device->requested_state = KGSL_STATE_NONE;
+
+ spin_lock(&device->submit_lock);
+ if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND)
+ device->slumber = true;
+ else
+ device->slumber = false;
+ spin_unlock(&device->submit_lock);
}
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 7636a42..6fb81ee 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -530,7 +530,8 @@
struct kgsl_device *device = dev_get_drvdata(dev);
struct kgsl_pwrctrl *pwr;
struct kgsl_pwrlevel *pwr_level;
- int level, i;
+ int level;
+ unsigned int i;
unsigned long cur_freq, rec_freq;
struct dev_pm_opp *opp;
@@ -574,7 +575,12 @@
/* If the governor recommends a new frequency, update it here */
if (rec_freq != cur_freq) {
level = pwr->max_pwrlevel;
- for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
+ /*
+ * Array index of pwrlevels[] should be within the permitted
+ * power levels, i.e., from max_pwrlevel to min_pwrlevel.
+ */
+ for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel
+ && i <= pwr->min_pwrlevel); i--)
if (rec_freq <= pwr->pwrlevels[i].gpu_freq) {
if (pwr->thermal_cycle == CYCLE_ACTIVE)
level = _thermal_adjust(pwr, i);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index eb67657..5061f6a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
+#include <linux/ratelimit.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
@@ -700,6 +701,10 @@
size_t len;
unsigned int align;
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
size = PAGE_ALIGN(size);
if (size == 0 || size > UINT_MAX)
return -EINVAL;
@@ -736,6 +741,8 @@
*/
memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+ memdesc->page_count = 0;
+ memdesc->size = 0;
if (memdesc->pages == NULL) {
ret = -ENOMEM;
@@ -762,7 +769,8 @@
*/
memdesc->size = (size - len);
- if (sharedmem_noretry_flag != true)
+ if (sharedmem_noretry_flag != true &&
+ __ratelimit(&_rs))
KGSL_CORE_ERR(
"Out of memory: only allocated %lldKB of %lldKB requested\n",
(size - len) >> 10, size >> 10);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 96873c4..8f8e3e9 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -52,6 +52,10 @@
fence_init(&kfence->fence, &kgsl_sync_fence_ops, &ktimeline->lock,
ktimeline->fence_context, timestamp);
+ /*
+ * sync_file_create() takes a refcount to the fence. This refcount is
+ * put when the fence is signaled.
+ */
kfence->sync_file = sync_file_create(&kfence->fence);
if (kfence->sync_file == NULL) {
@@ -61,9 +65,6 @@
return NULL;
}
- /* Get a refcount to the fence. Put when signaled */
- fence_get(&kfence->fence);
-
spin_lock_irqsave(&ktimeline->lock, flags);
list_add_tail(&kfence->child_list, &ktimeline->child_list_head);
spin_unlock_irqrestore(&ktimeline->lock, flags);
@@ -426,9 +427,14 @@
{
struct kgsl_sync_fence_cb *kcb = (struct kgsl_sync_fence_cb *)cb;
- kcb->func(kcb->priv);
- fence_put(kcb->fence);
- kfree(kcb);
+ /*
+ * If the callback is marked for cancellation in a separate thread,
+ * let the other thread do the cleanup.
+ */
+ if (kcb->func(kcb->priv)) {
+ fence_put(kcb->fence);
+ kfree(kcb);
+ }
}
static void kgsl_get_fence_name(struct fence *fence,
@@ -451,7 +457,7 @@
}
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv, char *fence_name, int name_len)
+ bool (*func)(void *priv), void *priv, char *fence_name, int name_len)
{
struct kgsl_sync_fence_cb *kcb;
struct fence *fence;
@@ -491,17 +497,24 @@
return kcb;
}
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb)
+/*
+ * Cancel the fence async callback and do the cleanup. The caller must make
+ * sure that the callback (if run before cancelling) returns false, so that
+ * no other thread frees the pointer.
+ */
+void kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb)
{
if (kcb == NULL)
- return 0;
+ return;
- if (fence_remove_callback(kcb->fence, &kcb->fence_cb)) {
- fence_put(kcb->fence);
- kfree(kcb);
- return 1;
- }
- return 0;
+ /*
+ * After fence_remove_callback() returns, the fence callback is
+ * either not called at all, or completed without freeing kcb.
+ * This thread can then put the fence refcount and free kcb.
+ */
+ fence_remove_callback(kcb->fence, &kcb->fence_cb);
+ fence_put(kcb->fence);
+ kfree(kcb);
}
struct kgsl_syncsource {
@@ -707,6 +720,14 @@
list_add_tail(&sfence->child_list, &syncsource->child_list_head);
spin_unlock(&syncsource->lock);
out:
+ /*
+ * We're transferring ownership of the fence to the sync file.
+ * The sync file takes an extra refcount when it is created, so put
+ * our refcount.
+ */
+ if (sync_file)
+ fence_put(&sfence->fence);
+
if (ret) {
if (sync_file)
fput(sync_file->file);
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 99fe0e1..d58859d 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -68,13 +68,14 @@
* fence_cb: Fence callback struct
* fence: Pointer to the fence for which the callback is done
* priv: Private data for the callback
- * func: Pointer to the kgsl function to call
+ * func: Pointer to the kgsl function to call. This function should return
+ * false if the sync callback is marked for cancellation in a separate thread.
*/
struct kgsl_sync_fence_cb {
struct fence_cb fence_cb;
struct fence *fence;
void *priv;
- void (*func)(void *priv);
+ bool (*func)(void *priv);
};
struct kgsl_syncsource;
@@ -91,10 +92,10 @@
void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline);
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv,
+ bool (*func)(void *priv), void *priv,
char *fence_name, int name_len);
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
+void kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
long kgsl_ioctl_syncsource_create(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
@@ -143,10 +144,9 @@
return NULL;
}
-static inline int
+static inline void
kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb)
{
- return 1;
}
static inline long
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 1767810..c7690a1 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -820,14 +820,14 @@
TRACE_EVENT(kgsl_mmu_pagefault,
- TP_PROTO(struct kgsl_device *device, unsigned int page,
+ TP_PROTO(struct kgsl_device *device, unsigned long page,
unsigned int pt, const char *op),
TP_ARGS(device, page, pt, op),
TP_STRUCT__entry(
__string(device_name, device->name)
- __field(unsigned int, page)
+ __field(unsigned long, page)
__field(unsigned int, pt)
__string(op, op)
),
@@ -840,7 +840,7 @@
),
TP_printk(
- "d_name=%s page=0x%08x pt=%u op=%s",
+ "d_name=%s page=0x%lx pt=%u op=%s",
__get_str(device_name), __entry->page, __entry->pt,
__get_str(op)
)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 2e04608..cb2e85c 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -440,6 +440,9 @@
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index a5dd7e6..d7f6cf0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1791,6 +1791,8 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
@@ -2056,7 +2058,6 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index da93077..cfca43f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -311,6 +311,9 @@
#define USB_VENDOR_ID_DELCOM 0x0fc5
#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
+#define USB_VENDOR_ID_DELL 0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b1bce80..8008e06 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -427,6 +427,15 @@
if (ret)
goto out_unlock;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 97dbb25..2b16207 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -81,6 +81,7 @@
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index 63fea00..3399c27 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/bitmap.h>
+#include <linux/io.h>
#include "coresight-ost.h"
#define STM_USERSPACE_HEADER_SIZE (8)
@@ -54,19 +55,40 @@
return ch;
}
-static int stm_ost_send(void *addr, const void *data, uint32_t count)
+static int stm_ost_send(void __iomem *addr, const void *data, uint32_t size)
{
- struct stm_drvdata *drvdata = stmdrvdata;
- const unsigned char *p = data;
- size_t pos;
- ssize_t sz;
+ uint32_t len = size;
- for (pos = 0, p = data; count > pos; pos += sz, p += sz) {
- sz = min_t(unsigned int, count - pos, drvdata->write_bytes);
- stm_send(addr, p, sz, drvdata->write_bytes);
+ if (((unsigned long)data & 0x1) && (size >= 1)) {
+ writeb_relaxed_no_log(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+ if (((unsigned long)data & 0x2) && (size >= 2)) {
+ writew_relaxed_no_log(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
}
- return count;
+ /* now we are 32bit aligned */
+ while (size >= 4) {
+ writel_relaxed_no_log(*(uint32_t *)data, addr);
+ data += 4;
+ size -= 4;
+ }
+
+ if (size >= 2) {
+ writew_relaxed_no_log(*(uint16_t *)data, addr);
+ data += 2;
+ size -= 2;
+ }
+ if (size >= 1) {
+ writeb_relaxed_no_log(*(uint8_t *)data, addr);
+ data++;
+ size--;
+ }
+
+ return len;
}
static void stm_channel_free(uint32_t ch)
@@ -76,10 +98,10 @@
clear_bit(ch, drvdata->chs.bitmap);
}
-static int stm_trace_ost_header(unsigned long ch_addr, uint32_t flags,
+static int stm_trace_ost_header(void __iomem *ch_addr, uint32_t flags,
uint8_t entity_id, uint8_t proto_id)
{
- void *addr;
+ void __iomem *addr;
uint32_t header;
char *hdr;
@@ -93,12 +115,13 @@
/* header is expected to be D32M type */
flags |= STM_FLAG_MARKED;
flags &= ~STM_FLAG_TIMESTAMPED;
- addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, flags));
+ addr = (void __iomem *)(ch_addr +
+ stm_channel_off(STM_PKT_TYPE_DATA, flags));
return stm_ost_send(addr, &header, sizeof(header));
}
-static int stm_trace_data_header(void *addr)
+static int stm_trace_data_header(void __iomem *addr)
{
char hdr[16];
int len = 0;
@@ -114,14 +137,15 @@
return len;
}
-static int stm_trace_data(unsigned long ch_addr, uint32_t flags,
+static int stm_trace_data(void __iomem *ch_addr, uint32_t flags,
const void *data, uint32_t size)
{
- void *addr;
+ void __iomem *addr;
int len = 0;
flags &= ~STM_FLAG_TIMESTAMPED;
- addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, flags));
+ addr = (void __iomem *)(ch_addr +
+ stm_channel_off(STM_PKT_TYPE_DATA, flags));
/* send the data header */
len += stm_trace_data_header(addr);
@@ -131,12 +155,13 @@
return len;
}
-static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t flags)
+static int stm_trace_ost_tail(void __iomem *ch_addr, uint32_t flags)
{
- void *addr;
+ void __iomem *addr;
uint32_t tail = 0x0;
- addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, flags));
+ addr = (void __iomem *)(ch_addr +
+ stm_channel_off(STM_PKT_TYPE_FLAG, flags));
return stm_ost_send(addr, &tail, sizeof(tail));
}
@@ -147,7 +172,7 @@
struct stm_drvdata *drvdata = stmdrvdata;
int len = 0;
uint32_t ch;
- unsigned long ch_addr;
+ void __iomem *ch_addr;
/* allocate channel and get the channel address */
ch = stm_channel_alloc();
@@ -159,7 +184,7 @@
return 0;
}
- ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
+ ch_addr = (void __iomem *)stm_channel_addr(drvdata, ch);
/* send the ost header */
len += stm_trace_ost_header(ch_addr, flags, entity_id,
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c8f2702e..159512c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,6 +211,7 @@
if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
spin_lock(&drvdata->spinlock);
stm_disable_hw(drvdata);
+ drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea..85fe87f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -171,8 +171,11 @@
if (!used)
kfree(buf);
- if (!ret)
+ if (!ret) {
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ }
return ret;
}
@@ -244,6 +247,9 @@
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+
dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
}
@@ -521,11 +527,13 @@
goto out;
}
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- ret = -EINVAL;
- goto out;
+ if (drvdata->enable) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
}
val = local_read(&drvdata->mode);
@@ -565,11 +573,13 @@
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EINVAL;
+ if (drvdata->enable) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
}
/* Re-enable the TMC if need be */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 966a988..eb355f4 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -771,33 +771,49 @@
mutex_lock(&drvdata->mem_lock);
- /*
- * ETR DDR memory is not allocated until user enables
- * tmc at least once. If user specifies different ETR
- * DDR size than the default size or switches between
- * contiguous or scatter-gather memory type after
- * enabling tmc; the new selection will be honored from
- * next tmc enable session.
- */
- if (drvdata->size != drvdata->mem_size ||
- drvdata->memtype != drvdata->mem_type) {
- tmc_etr_free_mem(drvdata);
- drvdata->size = drvdata->mem_size;
- drvdata->memtype = drvdata->mem_type;
- }
- ret = tmc_etr_alloc_mem(drvdata);
- if (ret) {
- pm_runtime_put(drvdata->dev);
- mutex_unlock(&drvdata->mem_lock);
- return ret;
- }
- mutex_unlock(&drvdata->mem_lock);
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
- goto out;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
}
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ /*
+ * ETR DDR memory is not allocated until user enables
+ * tmc at least once. If user specifies different ETR
+ * DDR size than the default size or switches between
+ * contiguous or scatter-gather memory type after
+ * enabling tmc; the new selection will be honored from
+ * next tmc enable session.
+ */
+ if (drvdata->size != drvdata->mem_size ||
+ drvdata->memtype != drvdata->mem_type) {
+ tmc_etr_free_mem(drvdata);
+ drvdata->size = drvdata->mem_size;
+ drvdata->memtype = drvdata->mem_type;
+ }
+ ret = tmc_etr_alloc_mem(drvdata);
+ if (ret) {
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+ }
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ } else {
+ drvdata->usbch = usb_qdss_open("qdss", drvdata,
+ usb_notifier);
+ if (IS_ERR_OR_NULL(drvdata->usbch)) {
+ dev_err(drvdata->dev, "usb_qdss_open failed\n");
+ ret = PTR_ERR(drvdata->usbch);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
val = local_xchg(&drvdata->mode, mode);
/*
@@ -808,9 +824,14 @@
if (val == CS_MODE_SYSFS)
goto out;
- tmc_etr_enable_hw(drvdata);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_enable_hw(drvdata);
+
+ drvdata->enable = true;
+ drvdata->sticky_enable = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -872,6 +893,7 @@
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -880,11 +902,26 @@
val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
/* Disable the TMC only if it needs to */
- if (val != CS_MODE_DISABLED)
- tmc_etr_disable_hw(drvdata);
+ if (val != CS_MODE_DISABLED) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ __tmc_etr_disable_to_bam(drvdata);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ tmc_etr_bam_disable(drvdata);
+ usb_qdss_close(drvdata->usbch);
+ goto out;
+ } else {
+ tmc_etr_disable_hw(drvdata);
+ }
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ }
+out:
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
@@ -907,12 +944,18 @@
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
}
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ ret = -EINVAL;
+ goto out;
+ }
+
val = local_read(&drvdata->mode);
/* Don't interfere if operated from Perf */
if (val == CS_MODE_PERF) {
@@ -933,6 +976,7 @@
drvdata->reading = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 077cb45..b97ebb8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -142,7 +142,11 @@
{
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ char *bufp;
+
+ mutex_lock(&drvdata->mem_lock);
+
+ bufp = drvdata->buf + *ppos;
if (*ppos + len > drvdata->len)
len = drvdata->len - *ppos;
@@ -165,6 +169,7 @@
if (copy_to_user(data, bufp, len)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
@@ -172,6 +177,8 @@
dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
__func__, len, (int)(drvdata->len - *ppos));
+
+ mutex_unlock(&drvdata->mem_lock);
return len;
}
@@ -354,6 +361,9 @@
drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
tmc_etr_bam_disable(drvdata);
usb_qdss_close(drvdata->usbch);
} else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
@@ -374,6 +384,9 @@
drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+
drvdata->usbch = usb_qdss_open("qdss", drvdata,
usb_notifier);
if (IS_ERR(drvdata->usbch)) {
@@ -496,6 +509,7 @@
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
+ struct coresight_cti_data *ctidata;
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata)) {
@@ -540,12 +554,26 @@
drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG;
drvdata->mem_size = drvdata->size;
drvdata->mem_type = drvdata->memtype;
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
pm_runtime_put(&adev->dev);
+ ctidata = of_get_coresight_cti_data(dev, adev->dev.of_node);
+ if (IS_ERR(ctidata)) {
+ dev_err(dev, "invalid cti data\n");
+ } else if (ctidata && ctidata->nr_ctis == 2) {
+ drvdata->cti_flush = coresight_cti_get(ctidata->names[0]);
+ if (IS_ERR(drvdata->cti_flush))
+ dev_err(dev, "failed to get flush cti\n");
+
+ drvdata->cti_reset = coresight_cti_get(ctidata->names[1]);
+ if (IS_ERR(drvdata->cti_reset))
+ dev_err(dev, "failed to get reset cti\n");
+ }
+
desc.pdata = pdata;
desc.dev = dev;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index a9de0e8..6643adc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -25,6 +25,7 @@
#include <linux/amba/bus.h>
#include <linux/usb_bam.h>
#include <linux/usb/usb_qdss.h>
+#include <linux/coresight-cti.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -184,7 +185,8 @@
struct tmc_etr_bam_data *bamdata;
bool enable_to_bam;
bool sticky_enable;
-
+ struct coresight_cti *cti_flush;
+ struct coresight_cti *cti_reset;
};
/* Generic functions */
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 2492f90..81bbd78 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#include <linux/coresight.h>
#include <linux/cpumask.h>
#include <asm/smp_plat.h>
-
+#include <linux/coresight-cti.h>
static int of_dev_node_match(struct device *dev, void *data)
{
@@ -196,3 +196,45 @@
return pdata;
}
EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ int i, ret;
+ uint32_t ctis_len;
+ struct device_node *child_node;
+ struct coresight_cti_data *ctidata;
+
+ ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+ if (!ctidata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_get_property(node, "coresight-ctis", &ctis_len))
+ ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ctidata->nr_ctis) {
+ ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+ sizeof(*ctidata->names),
+ GFP_KERNEL);
+ if (!ctidata->names)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ctidata->nr_ctis; i++) {
+ child_node = of_parse_phandle(node, "coresight-ctis",
+ i);
+ if (!child_node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_property_read_string(child_node,
+ "coresight-name",
+ &ctidata->names[i]);
+ of_node_put(child_node);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+ return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 0652281..78792b4 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -465,6 +465,7 @@
u8 *tmp_buf;
int len = 0;
int xfersz = brcmstb_i2c_get_xfersz(dev);
+ u32 cond, cond_per_msg;
if (dev->is_suspended)
return -EBUSY;
@@ -481,10 +482,11 @@
pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
- brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
+ cond = ~COND_START_STOP;
else
- brcmstb_set_i2c_start_stop(dev,
- COND_RESTART | COND_NOSTOP);
+ cond = COND_RESTART | COND_NOSTOP;
+
+ brcmstb_set_i2c_start_stop(dev, cond);
/* Send slave address */
if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -497,13 +499,24 @@
}
}
+ cond_per_msg = cond;
+
/* Perform data transfer */
while (len) {
bytes_to_xfer = min(len, xfersz);
- if (len <= xfersz && i == (num - 1))
- brcmstb_set_i2c_start_stop(dev,
- ~(COND_START_STOP));
+ if (len <= xfersz) {
+ if (i == (num - 1))
+ cond_per_msg = cond_per_msg &
+ ~(COND_RESTART | COND_NOSTOP);
+ else
+ cond_per_msg = cond;
+ } else {
+ cond_per_msg = (cond_per_msg & ~COND_RESTART) |
+ COND_NOSTOP;
+ }
+
+ brcmstb_set_i2c_start_stop(dev, cond_per_msg);
rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
bytes_to_xfer, pmsg);
@@ -512,6 +525,8 @@
len -= bytes_to_xfer;
tmp_buf += bytes_to_xfer;
+
+ cond_per_msg = COND_NOSTART | COND_NOSTOP;
}
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e34d82e..c21ca7b 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -58,7 +58,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
-#define SMBIOSIZE 8
+#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090
@@ -592,6 +592,8 @@
u8 port;
int retval;
+ mutex_lock(&piix4_mutex_sb800);
+
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
smbslvcnt = inb_p(SMBSLVCNT);
do {
@@ -605,10 +607,10 @@
usleep_range(1000, 2000);
} while (--retries);
/* SMBus is still owned by the IMC, we give up */
- if (!retries)
+ if (!retries) {
+ mutex_unlock(&piix4_mutex_sb800);
return -EBUSY;
-
- mutex_lock(&piix4_mutex_sb800);
+ }
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@
outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
- mutex_unlock(&piix4_mutex_sb800);
-
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+ mutex_unlock(&piix4_mutex_sb800);
+
return retval;
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 051ab8e..946e0ba 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -85,6 +85,7 @@
int cur_rd;
struct device *wrapper_dev;
void *ipcl;
+ int clk_fld_idx;
};
struct geni_i2c_err_log {
@@ -109,46 +110,94 @@
[GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
};
-static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
+struct geni_i2c_clk_fld {
+ u32 clk_freq_out;
+ u8 clk_div;
+ u8 t_high;
+ u8 t_low;
+ u8 t_cycle;
+};
+
+static struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
+ {KHz(100), 7, 10, 11, 26},
+ {KHz(400), 2, 5, 12, 24},
+ {KHz(1000), 1, 3, 9, 18},
+};
+
+static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
{
- geni_write_reg(dfs, base, SE_GENI_CLK_SEL);
- geni_write_reg((div << 4) | 1, base, GENI_SER_M_CLK_CFG);
- geni_write_reg(((5 << 20) | (0xC << 10) | 0x18),
- base, SE_I2C_SCL_COUNTERS);
+ int i;
+ int ret = 0;
+ bool clk_map_present = false;
+ struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
+
+ for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
+ if (itr->clk_freq_out == gi2c->i2c_rsc.clk_freq_out) {
+ clk_map_present = true;
+ break;
+ }
+ }
+
+ if (clk_map_present)
+ gi2c->clk_fld_idx = i;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static inline void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c, int dfs)
+{
+ struct geni_i2c_clk_fld *itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
+
+ geni_write_reg(dfs, gi2c->base, SE_GENI_CLK_SEL);
+
+ geni_write_reg((itr->clk_div << 4) | 1, gi2c->base, GENI_SER_M_CLK_CFG);
+ geni_write_reg(((itr->t_high << 20) | (itr->t_low << 10) |
+ itr->t_cycle), gi2c->base, SE_I2C_SCL_COUNTERS);
+
/*
- * Ensure Clk config completes before return.
- */
+ * Ensure Clk config completes before return.
+ */
mb();
}
static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
{
- u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
- u32 rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
- u32 tx_st = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
u32 m_cmd = readl_relaxed(gi2c->base + SE_GENI_M_CMD0);
+ u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
u32 geni_s = readl_relaxed(gi2c->base + SE_GENI_STATUS);
u32 geni_ios = readl_relaxed(gi2c->base + SE_GENI_IOS);
+ u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
+ u32 rx_st, tx_st;
+
+ if (gi2c->cur)
+ GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+ "len:%d, slv-addr:0x%x, RD/WR:%d\n", gi2c->cur->len,
+ gi2c->cur->addr, gi2c->cur->flags);
if (err == I2C_NACK || err == GENI_ABORT_DONE) {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n",
- gi2c_log[err].msg);
- GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
- "m_stat:0x%x, tx_stat:0x%x, rx_stat:0x%x, ",
- m_stat, tx_st, rx_st);
- GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
- "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
- m_cmd, geni_s, geni_ios);
+ gi2c_log[err].msg);
+ goto err_ret;
} else {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, "%s\n",
gi2c_log[err].msg);
- GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
- "m_stat:0x%x, tx_stat:0x%x, rx_stat:0x%x, ",
- m_stat, tx_st, rx_st);
- GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+ }
+ if (dma) {
+ rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
+ tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
+ } else {
+ rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
+ tx_st = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
+ }
+ GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+ "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
+ dma, tx_st, rx_st, m_stat);
+ GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
m_cmd, geni_s, geni_ios);
- }
+err_ret:
gi2c->err = gi2c_log[err].err;
}
@@ -185,7 +234,6 @@
if (!dma)
writel_relaxed(0, (gi2c->base +
SE_GENI_TX_WATERMARK_REG));
- gi2c->err = -EIO;
goto irqret;
}
@@ -276,7 +324,7 @@
pm_runtime_set_suspended(gi2c->dev);
return ret;
}
- qcom_geni_i2c_conf(gi2c->base, 0, 2);
+ qcom_geni_i2c_conf(gi2c, 0);
dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
num, msgs[0].len, msgs[0].flags);
for (i = 0; i < num; i++) {
@@ -310,11 +358,12 @@
ret = geni_se_rx_dma_prep(gi2c->wrapper_dev,
gi2c->base, msgs[i].buf,
msgs[i].len, &rx_dma);
- if (ret)
+ if (ret) {
mode = FIFO_MODE;
+ ret = geni_se_select_mode(gi2c->base,
+ mode);
+ }
}
- if (mode == FIFO_MODE)
- geni_se_select_mode(gi2c->base, mode);
} else {
dev_dbg(gi2c->dev,
"WRITE:n:%d,i:%d len:%d, stretch:%d, m_param:0x%x\n",
@@ -327,15 +376,15 @@
ret = geni_se_tx_dma_prep(gi2c->wrapper_dev,
gi2c->base, msgs[i].buf,
msgs[i].len, &tx_dma);
- if (ret)
+ if (ret) {
mode = FIFO_MODE;
+ ret = geni_se_select_mode(gi2c->base,
+ mode);
+ }
}
- if (mode == FIFO_MODE) {
- geni_se_select_mode(gi2c->base, mode);
- /* Get FIFO IRQ */
+ if (mode == FIFO_MODE) /* Get FIFO IRQ */
geni_write_reg(1, gi2c->base,
SE_GENI_TX_WATERMARK_REG);
- }
}
/* Ensure FIFO write go through before waiting for Done evet */
mb();
@@ -477,12 +526,26 @@
return ret;
}
+ if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
+ &gi2c->i2c_rsc.clk_freq_out)) {
+ dev_info(&pdev->dev,
+ "Bus frequency not specified, default to 400KHz.\n");
+ gi2c->i2c_rsc.clk_freq_out = KHz(400);
+ }
+
gi2c->irq = platform_get_irq(pdev, 0);
if (gi2c->irq < 0) {
dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
return gi2c->irq;
}
+ ret = geni_i2c_clk_map_idx(gi2c);
+ if (ret) {
+ dev_err(gi2c->dev, "Invalid clk frequency %d KHz: %d\n",
+ gi2c->i2c_rsc.clk_freq_out, ret);
+ return ret;
+ }
+
gi2c->adap.algo = &geni_i2c_algo;
init_completion(&gi2c->xfer);
platform_set_drvdata(pdev, gi2c);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index b521df6..b055ff6 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -331,8 +331,8 @@
int64_t temp;
/* K = code/4 */
- temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K);
- temp *= FG_ADC_SCALE_MILLI_FACTOR;
+ temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR);
+ temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K);
*result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL;
return 0;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 2de1f52..62b0dec 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -484,7 +484,7 @@
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
return -ENOMEM;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b9fcbf1..5faea37 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@
static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
+ .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
.user_ctrl = INV_MPU6050_REG_USER_CTRL,
.fifo_en = INV_MPU6050_REG_FIFO_EN,
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@@ -205,6 +206,37 @@
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
+ * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ * MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+ enum inv_mpu6050_filter_e val)
+{
+ int result;
+
+ result = regmap_write(st->map, st->reg->lpf, val);
+ if (result)
+ return result;
+
+ switch (st->chip_type) {
+ case INV_MPU6050:
+ case INV_MPU6000:
+ case INV_MPU9150:
+ /* old chips, nothing to do */
+ result = 0;
+ break;
+ default:
+ /* set accel lpf */
+ result = regmap_write(st->map, st->reg->accel_lpf, val);
+ break;
+ }
+
+ return result;
+}
+
+/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
*
* Initial configuration:
@@ -227,8 +259,7 @@
if (result)
return result;
- d = INV_MPU6050_FILTER_20HZ;
- result = regmap_write(st->map, st->reg->lpf, d);
+ result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
if (result)
return result;
@@ -531,6 +562,8 @@
* would be alising. This function basically search for the
* correct low pass parameters based on the fifo rate, e.g,
* sampling frequency.
+ *
+ * lpf is set automatically when setting sampling rate to avoid any aliases.
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
@@ -546,7 +579,7 @@
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = regmap_write(st->map, st->reg->lpf, data);
+ result = inv_mpu6050_set_lpf_regs(st, data);
if (result)
return result;
st->chip_config.lpf = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f0e8c5d..d851581 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -28,6 +28,7 @@
* struct inv_mpu6050_reg_map - Notable registers.
* @sample_rate_div: Divider applied to gyro output rate.
* @lpf: Configures internal low pass filter.
+ * @accel_lpf: Configures accelerometer low pass filter.
* @user_ctrl: Enables/resets the FIFO.
* @fifo_en: Determines which data will appear in FIFO.
* @gyro_config: gyro config register.
@@ -47,6 +48,7 @@
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
u8 lpf;
+ u8 accel_lpf;
u8 user_ctrl;
u8 fifo_en;
u8 gyro_config;
@@ -187,6 +189,7 @@
#define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a7..44e46c1 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -227,7 +227,7 @@
.address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.endianness = IIO_LE,
@@ -240,7 +240,7 @@
.address = ST_TEMP_1_OUT_L_ADDR,
.scan_index = 1,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -259,7 +259,7 @@
.address = ST_PRESS_LPS001WP_OUT_L_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -273,7 +273,7 @@
.address = ST_TEMP_LPS001WP_OUT_L_ADDR,
.scan_index = 1,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 16,
.storagebits = 16,
.endianness = IIO_LE,
@@ -291,7 +291,7 @@
.address = ST_PRESS_1_OUT_XL_ADDR,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.endianness = IIO_LE,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 268210e..24fb543 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -269,8 +269,6 @@
static void calibrate_as3935(struct as3935_state *st)
{
- mutex_lock(&st->lock);
-
/* mask disturber interrupt bit */
as3935_write(st, AS3935_INT, BIT(5));
@@ -280,8 +278,6 @@
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
- mutex_unlock(&st->lock);
}
#ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@
val &= ~AS3935_AFE_PWR_BIT;
ret = as3935_write(st, AS3935_AFE_GAIN, val);
+ calibrate_as3935(st);
+
err_resume:
mutex_unlock(&st->lock);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8fd108d..63e82f8 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -518,6 +518,11 @@
struct dst_entry *dst;
int ret;
+ if (!addr->net) {
+ pr_warn_ratelimited("%s: missing namespace\n", __func__);
+ return -EINVAL;
+ }
+
if (src_in->sa_family == AF_INET) {
struct rtable *rt = NULL;
const struct sockaddr_in *dst_in4 =
@@ -555,7 +560,6 @@
}
addr->bound_dev_if = ndev->ifindex;
- addr->net = dev_net(ndev);
dev_put(ndev);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f2d40c0..809a028 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -976,6 +976,8 @@
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cb3f515a..01e3a37 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2342,6 +2342,10 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.port_num < rdma_start_port(ib_dev) ||
+ cmd.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
@@ -2882,6 +2886,10 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.attr.port_num < rdma_start_port(ib_dev) ||
+ cmd.attr.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 71232e5..20ec347 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1267,6 +1267,7 @@
u32 doorbell[2];
doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
+ doorbell[1] = 0;
roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 11bfa27..282c9fb 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1105,13 +1105,13 @@
* pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function.
*/
- if (PAGE_SIZE <= 4096 &&
- field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
- resp.comp_mask |=
- MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
- resp.hca_core_clock_offset =
- offsetof(struct mlx5_init_seg, internal_timer_h) %
- PAGE_SIZE;
+ if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ if (PAGE_SIZE <= 4096) {
+ resp.comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp.hca_core_clock_offset =
+ offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
+ }
resp.response_length += sizeof(resp.hca_core_clock_offset) +
sizeof(resp.reserved2);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1fb31a4..0a260a0 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1823,7 +1823,7 @@
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
- if (unlikely(i > mr->max_descs))
+ if (unlikely(i >= mr->max_descs))
break;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09..58e92bc 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -792,6 +792,9 @@
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err;
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
@@ -824,11 +827,10 @@
ib_dealloc_device(&dev->ibdev);
}
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
{
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
- return 0;
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
}
static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +839,12 @@
qedr_remove(dev);
}
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
static void qedr_mac_address_change(struct qedr_dev *dev)
{
union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +871,7 @@
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc)
DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +885,7 @@
{
switch (event) {
case QEDE_UP:
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ qedr_open(dev);
break;
case QEDE_DOWN:
qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd..f669d0b 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@
struct qed_rdma_events events;
};
+#define QEDR_ENET_STATE_BIT (0)
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -153,6 +155,8 @@
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+
+ unsigned long enet_state;
};
#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a615142..4ba019e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@
struct ib_ucontext *context, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_ucontext *uctx = NULL;
- struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd;
u16 pd_id;
int rc;
@@ -489,21 +487,33 @@
if (!pd)
return ERR_PTR(-ENOMEM);
- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ goto err;
- uresp.pd_id = pd_id;
pd->pd_id = pd_id;
if (udata && context) {
+ struct qedr_alloc_pd_uresp uresp;
+
+ uresp.pd_id = pd_id;
+
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (rc)
+ if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
- uctx = get_qedr_ucontext(context);
- uctx->pd = pd;
- pd->uctx = uctx;
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ goto err;
+ }
+
+ pd->uctx = get_qedr_ucontext(context);
+ pd->uctx->pd = pd;
}
return &pd->ibpd;
+
+err:
+ kfree(pd);
+ return ERR_PTR(rc);
}
int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1719,6 +1729,14 @@
/* ERR->XXX */
switch (new_state) {
case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
break;
default:
status = -EINVAL;
@@ -2014,7 +2032,7 @@
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
@@ -3220,9 +3238,10 @@
IB_WC_SUCCESS, 0);
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 0);
break;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ccf6247..4d2a346 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -980,7 +980,9 @@
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
- memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+ memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
+ memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
+ sizeof(skb->cb) - sizeof(ack_pkt));
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 140f3f3..e46e2b0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
@@ -550,12 +551,14 @@
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
@@ -973,13 +976,21 @@
struct iser_conn *iser_conn;
struct ib_device *ib_dev;
+ mutex_lock(&unbind_iser_conn_mutex);
+
session = starget_to_session(scsi_target(sdev))->dd_data;
iser_conn = session->leadconn->dd_data;
+ if (!iser_conn) {
+ mutex_unlock(&unbind_iser_conn_mutex);
+ return -ENOTCONN;
+ }
ib_dev = iser_conn->ib_conn.device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ mutex_unlock(&unbind_iser_conn_mutex);
+
return 0;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6dd43f6..39d2837 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1447,7 +1447,7 @@
isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
- struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "login recv");
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index fe7cc70..0dea590 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -87,6 +87,7 @@
u32 power_on_delay;
u32 power_off_delay;
bool manage_pin_ctrl;
+ struct kobject *sysfs_kobject;
};
static struct hbtp_data *hbtp;
@@ -1350,6 +1351,41 @@
},
};
+static ssize_t hbtp_display_pwr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ u32 status;
+ ssize_t ret;
+ char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
+
+ mutex_lock(&hbtp->mutex);
+ ret = kstrtou32(buf, 10, &status);
+ if (ret) {
+ pr_err("hbtp: ret error: %zd\n", ret);
+ mutex_unlock(&hbtp->mutex);
+ return ret;
+ }
+ if (!hbtp || !hbtp->input_dev) {
+ pr_err("hbtp: hbtp or hbtp->input_dev not ready!\n");
+ mutex_unlock(&hbtp->mutex);
+ return ret;
+ }
+ if (status) {
+ pr_debug("hbtp: display power on!\n");
+ kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+ KOBJ_ONLINE, envp);
+ } else {
+ pr_debug("hbtp: display power off!\n");
+ kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+ KOBJ_OFFLINE, envp);
+ }
+ mutex_unlock(&hbtp->mutex);
+ return count;
+}
+
+static struct kobj_attribute hbtp_display_attribute =
+ __ATTR(display_pwr, 0660, NULL, hbtp_display_pwr_store);
+
static int __init hbtp_init(void)
{
int error;
@@ -1382,6 +1418,16 @@
goto err_platform_drv_reg;
}
+ hbtp->sysfs_kobject = kobject_create_and_add("hbtp", kernel_kobj);
+ if (!hbtp->sysfs_kobject)
+ pr_err("%s: Could not create sysfs kobject\n", __func__);
+ else {
+ error = sysfs_create_file(hbtp->sysfs_kobject,
+ &hbtp_display_attribute.attr);
+ if (error)
+ pr_err("failed to create the display_pwr sysfs\n");
+ }
+
return 0;
err_platform_drv_reg:
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
index a5ea27a..c5ab3dd 100644
--- a/drivers/input/misc/keychord.c
+++ b/drivers/input/misc/keychord.c
@@ -232,9 +232,11 @@
{
struct keychord_device *kdev = file->private_data;
struct input_keychord *keychords = 0;
- struct input_keychord *keychord, *next, *end;
+ struct input_keychord *keychord;
int ret, i, key;
unsigned long flags;
+ size_t resid = count;
+ size_t key_bytes;
if (count < sizeof(struct input_keychord))
return -EINVAL;
@@ -265,15 +267,29 @@
kdev->head = kdev->tail = 0;
keychord = keychords;
- end = (struct input_keychord *)((char *)keychord + count);
- while (keychord < end) {
- next = NEXT_KEYCHORD(keychord);
- if (keychord->count <= 0 || next > end) {
+ while (resid > 0) {
+ /* Is the entire keychord entry header present ? */
+ if (resid < sizeof(struct input_keychord)) {
+ pr_err("keychord: Insufficient bytes present for header %zu\n",
+ resid);
+ goto err_unlock_return;
+ }
+ resid -= sizeof(struct input_keychord);
+ if (keychord->count <= 0) {
pr_err("keychord: invalid keycode count %d\n",
keychord->count);
goto err_unlock_return;
}
+ key_bytes = keychord->count * sizeof(keychord->keycodes[0]);
+ /* Do we have all the expected keycodes ? */
+ if (resid < key_bytes) {
+ pr_err("keychord: Insufficient bytes present for keycount %zu\n",
+ resid);
+ goto err_unlock_return;
+ }
+ resid -= key_bytes;
+
if (keychord->version != KEYCHORD_VERSION) {
pr_err("keychord: unsupported version %d\n",
keychord->version);
@@ -292,7 +308,7 @@
}
kdev->keychord_count++;
- keychord = next;
+ keychord = NEXT_KEYCHORD(keychord);
}
kdev->keychords = keychords;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index e7b96f1..5be14ad 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -788,6 +788,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
},
+ {
+ /* Fujitsu UH554 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb..c84c685 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -434,8 +434,10 @@
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = true;
- mb();
+ spin_unlock_irq(&i8042_lock);
+
return 0;
}
@@ -448,16 +450,20 @@
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = false;
+ port->serio = NULL;
+ spin_unlock_irq(&i8042_lock);
/*
+ * We need to make sure that interrupt handler finishes using
+ * our serio port before we return from this function.
* We synchronize with both AUX and KBD IRQs because there is
* a (very unlikely) chance that AUX IRQ is raised for KBD port
* and vice versa.
*/
synchronize_irq(I8042_AUX_IRQ);
synchronize_irq(I8042_KBD_IRQ);
- port->serio = NULL;
}
/*
@@ -574,7 +580,7 @@
spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists && !filtered))
+ if (likely(serio && !filtered))
serio_interrupt(serio, data, dfl);
out:
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 11a13b5..41800b6 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3857,11 +3857,9 @@
u8 vector, u32 dest_apicid, int devid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
- struct iommu_dev_data *dev_data = search_dev_data(devid);
irte->lo.val = 0;
irte->hi.val = 0;
- irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
irte->lo.fields_remap.int_type = delivery_mode;
irte->lo.fields_remap.dm = dest_mode;
irte->hi.fields.vector = vector;
@@ -3917,10 +3915,10 @@
struct irte_ga *irte = (struct irte_ga *) entry;
struct iommu_dev_data *dev_data = search_dev_data(devid);
- if (!dev_data || !dev_data->use_vapic) {
+ if (!dev_data || !dev_data->use_vapic ||
+ !irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
- irte->lo.fields_remap.guest_mode = 0;
modify_irte_ga(devid, index, irte, NULL);
}
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index f8ed8c9..a0b4ac6 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -695,9 +695,9 @@
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1d5c514..bf93b91 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -238,6 +238,7 @@
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIALL 0x618
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
@@ -249,6 +250,7 @@
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
+#define SCTLR_HUPCF (1 << 8)
#define SCTLR_CFIE (1 << 6)
#define SCTLR_CFRE (1 << 5)
#define SCTLR_E (1 << 4)
@@ -387,6 +389,7 @@
/* Protects clock_refs_count */
spinlock_t clock_refs_lock;
int clock_refs_count;
+ int regulator_defer;
};
struct arm_smmu_device {
@@ -415,6 +418,7 @@
#define ARM_SMMU_OPT_SKIP_INIT (1 << 2)
#define ARM_SMMU_OPT_DYNAMIC (1 << 3)
#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
+#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -534,6 +538,7 @@
{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
+ { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
{ 0, NULL},
};
@@ -785,6 +790,35 @@
WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
}
+static int arm_smmu_disable_regulators(struct arm_smmu_power_resources *pwr)
+{
+ struct regulator_bulk_data *consumers;
+ int i;
+ int num_consumers, ret, r;
+
+ num_consumers = pwr->num_gdscs;
+ consumers = pwr->gdscs;
+ for (i = num_consumers - 1; i >= 0; --i) {
+ ret = regulator_disable_deferred(consumers[i].consumer,
+ pwr->regulator_defer);
+ if (ret != 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
+ for (++i; i < num_consumers; ++i) {
+ r = regulator_enable(consumers[i].consumer);
+ if (r != 0)
+ pr_err("Failed to reename %s: %d\n",
+ consumers[i].supply, r);
+ }
+
+ return ret;
+}
+
/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
{
@@ -880,7 +914,7 @@
}
arm_smmu_unprepare_clocks(pwr);
- regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
+ arm_smmu_disable_regulators(pwr);
arm_smmu_unrequest_bus(pwr);
pwr->power_count = 0;
mutex_unlock(&pwr->power_lock);
@@ -991,12 +1025,17 @@
struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *base;
+ bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
- if (stage1) {
+ if (stage1 && !use_tlbiall) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
+ } else if (stage1 && use_tlbiall) {
+ base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
+ arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
} else {
base = ARM_SMMU_GR0(smmu);
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
@@ -1013,8 +1052,9 @@
struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg;
+ bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
- if (stage1) {
+ if (stage1 && !use_tlbiall) {
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@@ -1033,6 +1073,10 @@
iova += granule >> 12;
} while (size -= granule);
}
+ } else if (stage1 && use_tlbiall) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += ARM_SMMU_CB_S1_TLBIALL;
+ writel_relaxed(0, reg);
} else if (smmu->version == ARM_SMMU_V2) {
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
@@ -1440,6 +1484,11 @@
/* SCTLR */
reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
+ if (smmu_domain->attributes & (1 << DOMAIN_ATTR_CB_STALL_DISABLE)) {
+ reg &= ~SCTLR_CFCFG;
+ reg |= SCTLR_HUPCF;
+ }
+
if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
!(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
!stage1)
@@ -2567,19 +2616,23 @@
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
int ret = 0;
+ mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ ret = 0;
+ break;
case DOMAIN_ATTR_PT_BASE_ADDR:
*((phys_addr_t *)data) =
smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
- return 0;
+ ret = 0;
+ break;
case DOMAIN_ATTR_CONTEXT_BANK:
/* context bank index isn't valid until we are attached */
- if (smmu_domain->smmu == NULL)
- return -ENODEV;
-
+ if (smmu_domain->smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
*((unsigned int *) data) = smmu_domain->cfg.cbndx;
ret = 0;
break;
@@ -2587,9 +2640,10 @@
u64 val;
struct arm_smmu_device *smmu = smmu_domain->smmu;
/* not valid until we are attached */
- if (smmu == NULL)
- return -ENODEV;
-
+ if (smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
@@ -2600,8 +2654,10 @@
}
case DOMAIN_ATTR_CONTEXTIDR:
/* not valid until attached */
- if (smmu_domain->smmu == NULL)
- return -ENODEV;
+ if (smmu_domain->smmu == NULL) {
+ ret = -ENODEV;
+ break;
+ }
*((u32 *)data) = smmu_domain->cfg.procid;
ret = 0;
break;
@@ -2655,8 +2711,10 @@
ret = 0;
break;
case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
- if (!smmu_domain->smmu)
- return -ENODEV;
+ if (!smmu_domain->smmu) {
+ ret = -ENODEV;
+ break;
+ }
*((int *)data) = is_iommu_pt_coherent(smmu_domain);
ret = 0;
break;
@@ -2665,9 +2723,16 @@
& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
ret = 0;
break;
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
+ ret = 0;
+ break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ break;
}
+ mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -2842,6 +2907,12 @@
break;
}
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ if (*((int *)data))
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
}
@@ -3134,6 +3205,11 @@
u32 sctlr, sctlr_orig, fsr;
void __iomem *cb_base;
+ if (smmu->model == QCOM_SMMUV2) {
+ dev_err(smmu->dev, "ATOS support is disabled\n");
+ return 0;
+ }
+
ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
if (ret)
return ret;
@@ -3298,14 +3374,23 @@
struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
u32 i, idx;
int cb = -EINVAL;
bool dynamic;
- /* Dynamic domains must set cbndx through domain attribute */
+ /*
+ * Dynamic domains have already set cbndx through domain attribute.
+ * Verify that they picked a valid value.
+ */
dynamic = is_dynamic_domain(domain);
- if (dynamic)
- return INVALID_CBNDX;
+ if (dynamic) {
+ cb = smmu_domain->cfg.cbndx;
+ if (cb < smmu->num_context_banks)
+ return cb;
+ else
+ return -EINVAL;
+ }
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(fwspec, i, idx) {
@@ -3484,6 +3569,12 @@
if (!pwr->gdscs)
return -ENOMEM;
+ if (!of_property_read_u32(dev->of_node,
+ "qcom,deferred-regulator-disable-delay",
+ &(pwr->regulator_defer)))
+ dev_info(dev, "regulator defer delay %d\n",
+ pwr->regulator_defer);
+
i = 0;
of_property_for_each_string(dev->of_node, "qcom,regulator-names",
prop, cname)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2db0d64..d92a352 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -112,8 +112,7 @@
unsigned long lo, hi;
resource_list_for_each_entry(window, &bridge->windows) {
- if (resource_type(window->res) != IORESOURCE_MEM &&
- resource_type(window->res) != IORESOURCE_IO)
+ if (resource_type(window->res) != IORESOURCE_MEM)
continue;
lo = iova_pfn(iovad, window->res->start - window->offset);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87fcbf7..002f8a4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1144,7 +1144,7 @@
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
- level_pfn = pfn & level_mask(level - 1);
+ level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 2ef496d..dde2876 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -760,16 +760,7 @@
if (!iopte_tblcnt(*ptep)) {
/* no valid mappings left under this table. free it. */
__arm_lpae_set_pte(ptep, 0, &iop->cfg);
- io_pgtable_tlb_add_flush(iop, iova,
- entries * entry_size,
- ARM_LPAE_GRANULE(data),
- false);
__arm_lpae_free_pgtable(data, lvl + 1, table_base);
- } else {
- io_pgtable_tlb_add_flush(iop, iova,
- entries * entry_size,
- ARM_LPAE_GRANULE(data),
- true);
}
return entries * entry_size;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index c98d8c2..56eff61b 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -71,6 +71,8 @@
return "DOMAIN_ATTR_FAST";
case DOMAIN_ATTR_EARLY_MAP:
return "DOMAIN_ATTR_EARLY_MAP";
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ return "DOMAIN_ATTR_CB_STALL_DISABLE";
default:
return "Unknown attr!";
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 6c3f8a2..83cbf20 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -386,36 +386,30 @@
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -427,8 +421,10 @@
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -439,6 +435,21 @@
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index daccf64..779001e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -647,6 +647,9 @@
int enabled;
u64 val;
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
if (gic_irq_in_rdist(d))
return -EINVAL;
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
index e4a7a88..b892109 100644
--- a/drivers/irqchip/qcom/Kconfig
+++ b/drivers/irqchip/qcom/Kconfig
@@ -13,3 +13,10 @@
default y if ARCH_SDM845
help
QTI Power Domain Controller for SDM845
+
+config QTI_PDC_SDM670
+ bool "QTI PDC SDM670"
+ select QTI_PDC
+ default y if ARCH_SDM670
+ help
+ QTI Power Domain Controller for SDM670
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
index 1b7856d..5e99040 100644
--- a/drivers/irqchip/qcom/Makefile
+++ b/drivers/irqchip/qcom/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_QTI_PDC) += pdc.o
obj-$(CONFIG_QTI_PDC_SDM845) += pdc-sdm845.o
+obj-$(CONFIG_QTI_PDC_SDM670) += pdc-sdm670.o
diff --git a/drivers/irqchip/qcom/pdc-sdm670.c b/drivers/irqchip/qcom/pdc-sdm670.c
new file mode 100644
index 0000000..7bd6333
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdm670.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdm670_data[] = {
+ {0, 512}, /* rpmh_wake */
+ {1, 513}, /* ee0_apps_hlos_spmi_periph_irq */
+ {2, 514}, /* ee1_apps_trustzone_spmi_periph_irq */
+ {3, 515}, /* secure_wdog_expired */
+ {4, 516}, /* secure_wdog_bark_irq */
+ {5, 517}, /* aop_wdog_expired_irq */
+ {6, 518}, /* qmp_usb3_lfps_rxterm_irq */
+ {7, 519}, /* not-connected */
+ {8, 520}, /* eud_p0_dmse_int_mx */
+ {9, 521}, /* eud_p0_dpse_int_mx */
+ {10, 522}, /* not-connected */
+ {11, 523}, /* not-connected */
+ {12, 524}, /* eud_int_mx[1] */
+ {13, 525}, /* ssc_xpu_irq_summary */
+ {14, 526}, /* wd_bite_apps */
+ {15, 527}, /* ssc_vmidmt_irq_summary */
+ {16, 528}, /* q6ss_irq_out_apps_ipc[4] */
+ {17, 529}, /* not-connected */
+ {18, 530}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+ {19, 531}, /* apps_pdc_irq_in_19 */
+ {20, 532}, /* apps_pdc_irq_in_20 */
+ {21, 533}, /* apps_pdc_irq_in_21 */
+ {22, 534}, /* pdc_apps_epcb_timeout_summary_irq */
+ {23, 535}, /* spmi_protocol_irq */
+ {24, 536}, /* tsense0_tsense_max_min_int */
+ {25, 537}, /* tsense1_tsense_max_min_int */
+ {26, 538}, /* tsense0_upper_lower_intr */
+ {27, 539}, /* tsense1_upper_lower_intr */
+ {28, 540}, /* tsense0_critical_intr */
+ {29, 541}, /* tsense1_critical_intr */
+ {30, 542}, /* core_bi_px_gpio_1 */
+ {31, 543}, /* core_bi_px_gpio_3 */
+ {32, 544}, /* core_bi_px_gpio_5 */
+ {33, 545}, /* core_bi_px_gpio_10 */
+ {34, 546}, /* core_bi_px_gpio_11 */
+ {35, 547}, /* core_bi_px_gpio_20 */
+ {36, 548}, /* core_bi_px_gpio_22 */
+ {37, 549}, /* core_bi_px_gpio_24 */
+ {38, 550}, /* core_bi_px_gpio_26 */
+ {39, 551}, /* core_bi_px_gpio_30 */
+ {41, 553}, /* core_bi_px_gpio_32 */
+ {42, 554}, /* core_bi_px_gpio_34 */
+ {43, 555}, /* core_bi_px_gpio_36 */
+ {44, 556}, /* core_bi_px_gpio_37 */
+ {45, 557}, /* core_bi_px_gpio_38 */
+ {46, 558}, /* core_bi_px_gpio_39 */
+ {47, 559}, /* core_bi_px_gpio_40 */
+ {49, 561}, /* core_bi_px_gpio_43 */
+ {50, 562}, /* core_bi_px_gpio_44 */
+ {51, 563}, /* core_bi_px_gpio_46 */
+ {52, 564}, /* core_bi_px_gpio_48 */
+ {54, 566}, /* core_bi_px_gpio_52 */
+ {55, 567}, /* core_bi_px_gpio_53 */
+ {56, 568}, /* core_bi_px_gpio_54 */
+ {57, 569}, /* core_bi_px_gpio_56 */
+ {58, 570}, /* core_bi_px_gpio_57 */
+ {59, 571}, /* bi_px_ssc_23 */
+ {60, 572}, /* bi_px_ssc_24 */
+ {61, 573}, /* bi_px_ssc_25 */
+ {62, 574}, /* bi_px_ssc_26 */
+ {63, 575}, /* bi_px_ssc_27 */
+ {64, 576}, /* bi_px_ssc_28 */
+ {65, 577}, /* bi_px_ssc_29 */
+ {66, 578}, /* core_bi_px_gpio_66 */
+ {67, 579}, /* core_bi_px_gpio_68 */
+ {68, 580}, /* bi_px_ssc_20 */
+ {69, 581}, /* bi_px_ssc_30 */
+ {70, 582}, /* core_bi_px_gpio_77 */
+ {71, 583}, /* core_bi_px_gpio_78 */
+ {72, 584}, /* core_bi_px_gpio_79 */
+ {73, 585}, /* core_bi_px_gpio_80 */
+ {74, 586}, /* core_bi_px_gpio_84 */
+ {75, 587}, /* core_bi_px_gpio_85 */
+ {76, 588}, /* core_bi_px_gpio_86 */
+ {77, 589}, /* core_bi_px_gpio_88 */
+ {79, 591}, /* core_bi_px_gpio_91 */
+ {80, 592}, /* core_bi_px_gpio_92 */
+ {81, 593}, /* core_bi_px_gpio_95 */
+ {82, 594}, /* core_bi_px_gpio_96 */
+ {83, 595}, /* core_bi_px_gpio_97 */
+ {84, 596}, /* core_bi_px_gpio_101 */
+ {85, 597}, /* core_bi_px_gpio_103 */
+ {86, 598}, /* bi_px_ssc_22 */
+ {87, 599}, /* core_bi_px_to_mpm[6] */
+ {88, 600}, /* core_bi_px_to_mpm[0] */
+ {89, 601}, /* core_bi_px_to_mpm[1] */
+ {90, 602}, /* core_bi_px_gpio_115 */
+ {91, 603}, /* core_bi_px_gpio_116 */
+ {92, 604}, /* core_bi_px_gpio_117 */
+ {93, 605}, /* core_bi_px_gpio_118 */
+ {94, 641}, /* core_bi_px_gpio_119 */
+ {95, 642}, /* core_bi_px_gpio_120 */
+ {96, 643}, /* core_bi_px_gpio_121 */
+ {97, 644}, /* core_bi_px_gpio_122 */
+ {98, 645}, /* core_bi_px_gpio_123 */
+ {99, 646}, /* core_bi_px_gpio_124 */
+ {100, 647}, /* core_bi_px_gpio_125 */
+ {101, 648}, /* core_bi_px_to_mpm[5] */
+ {102, 649}, /* core_bi_px_gpio_127 */
+ {103, 650}, /* core_bi_px_gpio_128 */
+ {104, 651}, /* core_bi_px_gpio_129 */
+ {105, 652}, /* core_bi_px_gpio_130 */
+ {106, 653}, /* core_bi_px_gpio_132 */
+ {107, 654}, /* core_bi_px_gpio_133 */
+ {108, 655}, /* core_bi_px_gpio_145 */
+ {119, 666}, /* core_bi_px_to_mpm[2] */
+ {120, 667}, /* core_bi_px_to_mpm[3] */
+ {121, 668}, /* core_bi_px_to_mpm[4] */
+ {122, 669}, /* core_bi_px_gpio_41 */
+ {123, 670}, /* core_bi_px_gpio_89 */
+ {124, 671}, /* core_bi_px_gpio_31 */
+ {125, 95}, /* core_bi_px_gpio_49 */
+ {-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return qcom_pdc_init(node, parent, sdm670_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdm670, "qcom,pdc-sdm670", qcom_pdc_gic_init);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 2678a00..5bd52e4 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -158,6 +158,11 @@
#define FLASH_LED_DISABLE 0x00
#define FLASH_LED_SAFETY_TMR_DISABLED 0x13
#define FLASH_LED_MAX_TOTAL_CURRENT_MA 3750
+#define FLASH_LED_IRES5P0_MAX_CURR_MA 640
+#define FLASH_LED_IRES7P5_MAX_CURR_MA 960
+#define FLASH_LED_IRES10P0_MAX_CURR_MA 1280
+#define FLASH_LED_IRES12P5_MAX_CURR_MA 1600
+#define MAX_IRES_LEVELS 4
/* notifier call chain for flash-led irqs */
static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
@@ -196,13 +201,15 @@
struct pinctrl_state *hw_strobe_state_suspend;
int hw_strobe_gpio;
int ires_ua;
+ int default_ires_ua;
int max_current;
int current_ma;
int prev_current_ma;
u8 duration;
u8 id;
u8 type;
- u8 ires;
+ u8 ires_idx;
+ u8 default_ires_idx;
u8 hdrm_val;
u8 current_reg_val;
u8 strobe_ctrl;
@@ -305,6 +312,11 @@
125, 119, 113, 107, 149, 143, 137, 131,
};
+static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = {
+ FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA,
+ FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
+};
+
static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
{
int rc;
@@ -935,6 +947,7 @@
static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
{
+ int i = 0;
int prgm_current_ma = value;
int min_ma = fnode->ires_ua / 1000;
struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
@@ -944,7 +957,22 @@
else if (value < min_ma)
prgm_current_ma = min_ma;
+ fnode->ires_idx = fnode->default_ires_idx;
+ fnode->ires_ua = fnode->default_ires_ua;
+
prgm_current_ma = min(prgm_current_ma, fnode->max_current);
+ if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) {
+ /* find the matching ires */
+ for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) {
+ if (prgm_current_ma <= max_ires_curr_ma_table[i]) {
+ fnode->ires_idx = i;
+ fnode->ires_ua = FLASH_LED_IRES_MIN_UA +
+ (FLASH_LED_IRES_BASE - fnode->ires_idx) *
+ FLASH_LED_IRES_DIVISOR;
+ break;
+ }
+ }
+ }
fnode->current_ma = prgm_current_ma;
fnode->cdev.brightness = prgm_current_ma;
fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
@@ -1062,7 +1090,7 @@
val = 0;
for (i = 0; i < led->num_fnodes; i++)
if (snode->led_mask & BIT(led->fnode[i].id))
- val |= led->fnode[i].ires << (led->fnode[i].id * 2);
+ val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
FLASH_LED_CURRENT_MASK, val);
@@ -1434,13 +1462,14 @@
return rc;
}
- fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
- fnode->ires = FLASH_LED_IRES_DEFAULT_VAL;
+ fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
+ fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL;
rc = of_property_read_u32(node, "qcom,ires-ua", &val);
if (!rc) {
- fnode->ires_ua = val;
- fnode->ires = FLASH_LED_IRES_BASE -
- (val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR;
+ fnode->default_ires_ua = fnode->ires_ua = val;
+ fnode->default_ires_idx = fnode->ires_idx =
+ FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) /
+ FLASH_LED_IRES_DIVISOR;
} else if (rc != -EINVAL) {
pr_err("Unable to read current resolution rc=%d\n", rc);
return rc;
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index cb19cef..f18022b 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -160,18 +160,19 @@
#define QPNP_WLED_MOD_EN_SHFT 7
#define QPNP_WLED_MOD_EN 1
#define QPNP_WLED_GATE_DRV_MASK 0xFE
-#define QPNP_WLED_SYNC_DLY_MASK 0xF8
+#define QPNP_WLED_SYNC_DLY_MASK GENMASK(2, 0)
#define QPNP_WLED_SYNC_DLY_MIN_US 0
#define QPNP_WLED_SYNC_DLY_MAX_US 1400
#define QPNP_WLED_SYNC_DLY_STEP_US 200
#define QPNP_WLED_DEF_SYNC_DLY_US 400
-#define QPNP_WLED_FS_CURR_MASK 0xF0
+#define QPNP_WLED_FS_CURR_MASK GENMASK(3, 0)
#define QPNP_WLED_FS_CURR_MIN_UA 0
#define QPNP_WLED_FS_CURR_MAX_UA 30000
#define QPNP_WLED_FS_CURR_STEP_UA 2500
-#define QPNP_WLED_CABC_MASK 0x7F
+#define QPNP_WLED_CABC_MASK 0x80
#define QPNP_WLED_CABC_SHIFT 7
#define QPNP_WLED_CURR_SINK_SHIFT 4
+#define QPNP_WLED_CURR_SINK_MASK GENMASK(7, 4)
#define QPNP_WLED_BRIGHT_LSB_MASK 0xFF
#define QPNP_WLED_BRIGHT_MSB_SHIFT 8
#define QPNP_WLED_BRIGHT_MSB_MASK 0x0F
@@ -208,12 +209,14 @@
#define QPNP_WLED_SEC_UNLOCK 0xA5
#define QPNP_WLED_MAX_STRINGS 4
+#define QPNP_PM660_WLED_MAX_STRINGS 3
#define WLED_MAX_LEVEL_4095 4095
#define QPNP_WLED_RAMP_DLY_MS 20
#define QPNP_WLED_TRIGGER_NONE "none"
#define QPNP_WLED_STR_SIZE 20
#define QPNP_WLED_MIN_MSLEEP 20
#define QPNP_WLED_SC_DLY_MS 20
+#define QPNP_WLED_SOFT_START_DLY_US 10000
#define NUM_SUPPORTED_AVDD_VOLTAGES 6
#define QPNP_WLED_DFLT_AVDD_MV 7600
@@ -381,6 +384,8 @@
u16 ramp_ms;
u16 ramp_step;
u16 cons_sync_write_delay_us;
+ u16 auto_calibration_ovp_count;
+ u16 max_strings;
u8 strings[QPNP_WLED_MAX_STRINGS];
u8 num_strings;
u8 loop_auto_gm_thresh;
@@ -396,6 +401,9 @@
bool en_ext_pfet_sc_pro;
bool prev_state;
bool ovp_irq_disabled;
+ bool auto_calib_enabled;
+ bool auto_calib_done;
+ ktime_t start_ovp_fault_time;
};
/* helper to read a pmic register */
@@ -531,7 +539,7 @@
u8 reg;
/* set brightness registers */
- for (i = 0; i < wled->num_strings; i++) {
+ for (i = 0; i < wled->max_strings; i++) {
reg = level & QPNP_WLED_BRIGHT_LSB_MASK;
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base,
@@ -600,7 +608,8 @@
* OVP interrupt disabled when the module is disabled.
*/
if (state) {
- usleep_range(10000, 11000);
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
rc = qpnp_wled_psm_config(wled, false);
if (rc < 0)
return rc;
@@ -873,32 +882,25 @@
struct device_attribute *attr, const char *buf, size_t count)
{
struct qpnp_wled *wled = dev_get_drvdata(dev);
- int data, i, rc, temp;
+ int data, i, rc;
u8 reg;
rc = kstrtoint(buf, 10, &data);
if (rc)
return rc;
- for (i = 0; i < wled->num_strings; i++) {
+ for (i = 0; i < wled->max_strings; i++) {
if (data < QPNP_WLED_FS_CURR_MIN_UA)
data = QPNP_WLED_FS_CURR_MIN_UA;
else if (data > QPNP_WLED_FS_CURR_MAX_UA)
data = QPNP_WLED_FS_CURR_MAX_UA;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), ®);
+ reg = data / QPNP_WLED_FS_CURR_STEP_UA;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+ QPNP_WLED_FS_CURR_MASK, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_FS_CURR_MASK;
- temp = data / QPNP_WLED_FS_CURR_STEP_UA;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
}
wled->fs_curr_ua = data;
@@ -1090,6 +1092,229 @@
return 0;
}
+#define AUTO_CALIB_BRIGHTNESS 16
+static int wled_auto_calibrate(struct qpnp_wled *wled)
+{
+ int rc = 0, i;
+ u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts;
+
+ mutex_lock(&wled->lock);
+
+ /* disable OVP IRQ */
+ if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
+ disable_irq_nosync(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
+
+ /* read configured sink configuration */
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config);
+ if (rc < 0) {
+ pr_err("Failed to read SINK configuration rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* disable the module before starting calibration */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* set low brightness across all sinks */
+ rc = qpnp_wled_set_level(wled, AUTO_CALIB_BRIGHTNESS);
+ if (rc < 0) {
+ pr_err("Failed to set brightness for calibration rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* disable all sinks */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0);
+ if (rc < 0) {
+ pr_err("Failed to disable all sinks rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK,
+ QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+ /*
+ * Delay for the WLED soft-start, check the OVP status
+ * only after soft-start is complete
+ */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+ /* iterate through the strings one by one */
+ for (i = 0; i < wled->max_strings; i++) {
+ sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i);
+
+ /* Enable feedback control */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+ i + 1);
+ if (rc < 0) {
+ pr_err("Failed to enable feedback for SINK %d rc = %d\n",
+ i + 1, rc);
+ goto failed_calib;
+ }
+
+ /* enable the sink */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_test);
+ if (rc < 0) {
+ pr_err("Failed to configure SINK %d rc=%d\n",
+ i + 1, rc);
+ goto failed_calib;
+ }
+
+ /* delay for WLED soft-start */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_INT_RT_STS(wled->ctrl_base), &int_sts);
+ if (rc < 0) {
+ pr_err("Error in reading WLED_INT_RT_STS rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ if (int_sts & QPNP_WLED_OVP_FAULT_BIT)
+ pr_debug("WLED OVP fault detected with SINK %d\n",
+ i + 1);
+ else
+ sink_valid |= sink_test;
+ }
+
+ if (sink_valid == sink_config) {
+ pr_debug("WLED auto-calibration complete, default sink-config=%x OK!\n",
+ sink_config);
+ } else {
+ pr_warn("Invalid WLED default sink config=%x changing it to=%x\n",
+ sink_config, sink_valid);
+ sink_config = sink_valid;
+ }
+
+ if (!sink_config) {
+ pr_warn("No valid WLED sinks found\n");
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* write the new sink configuration */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base), sink_config);
+ if (rc < 0) {
+ pr_err("Failed to reconfigure the default sink rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* MODULATOR_EN setting for valid sinks */
+ for (i = 0; i < wled->max_strings; i++) {
+ if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i)))
+ reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+ else
+ reg = 0x0; /* disable modulator_en for unused sink */
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+ reg &= QPNP_WLED_GATE_DRV_MASK;
+ else
+ reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
+ if (rc < 0) {
+ pr_err("Failed to configure MODULATOR_EN rc=%d\n", rc);
+ goto failed_calib;
+ }
+ }
+
+ /* restore the feedback setting */
+ rc = qpnp_wled_write_reg(wled,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base),
+ wled->fdbk_op);
+ if (rc < 0) {
+ pr_err("Failed to restore feedback setting rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* restore brightness */
+ rc = qpnp_wled_set_level(wled, wled->cdev.brightness);
+ if (rc < 0) {
+ pr_err("Failed to set brightness after calibration rc=%d\n",
+ rc);
+ goto failed_calib;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK,
+ QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
+ /* delay for WLED soft-start */
+ usleep_range(QPNP_WLED_SOFT_START_DLY_US,
+ QPNP_WLED_SOFT_START_DLY_US + 1000);
+
+failed_calib:
+ if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
+ enable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = false;
+ }
+ mutex_unlock(&wled->lock);
+ return rc;
+}
+
+#define WLED_AUTO_CAL_OVP_COUNT 5
+#define WLED_AUTO_CAL_CNT_DLY_US 1000000 /* 1 second */
+static bool qpnp_wled_auto_cal_required(struct qpnp_wled *wled)
+{
+ s64 elapsed_time_us;
+
+ /*
+ * Check if the OVP fault was an occasional one
+ * or if its firing continuously, the latter qualifies
+ * for an auto-calibration check.
+ */
+ if (!wled->auto_calibration_ovp_count) {
+ wled->start_ovp_fault_time = ktime_get();
+ wled->auto_calibration_ovp_count++;
+ } else {
+ elapsed_time_us = ktime_us_delta(ktime_get(),
+ wled->start_ovp_fault_time);
+ if (elapsed_time_us > WLED_AUTO_CAL_CNT_DLY_US)
+ wled->auto_calibration_ovp_count = 0;
+ else
+ wled->auto_calibration_ovp_count++;
+
+ if (wled->auto_calibration_ovp_count >=
+ WLED_AUTO_CAL_OVP_COUNT) {
+ wled->auto_calibration_ovp_count = 0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
/* ovp irq handler */
static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
{
@@ -1114,6 +1339,21 @@
if (fault_sts & (QPNP_WLED_OVP_FAULT_BIT | QPNP_WLED_ILIM_FAULT_BIT))
pr_err("WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
int_sts, fault_sts);
+
+ if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) {
+ if (wled->auto_calib_enabled && !wled->auto_calib_done) {
+ if (qpnp_wled_auto_cal_required(wled)) {
+ rc = wled_auto_calibrate(wled);
+ if (rc < 0) {
+ pr_err("Failed auto-calibration rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+ wled->auto_calib_done = true;
+ }
+ }
+ }
+
return IRQ_HANDLED;
}
@@ -1423,7 +1663,7 @@
static int qpnp_wled_config(struct qpnp_wled *wled)
{
int rc, i, temp;
- u8 reg = 0;
+ u8 reg = 0, sink_en = 0, mask;
/* Configure display type */
rc = qpnp_wled_set_disp(wled, wled->ctrl_base);
@@ -1622,16 +1862,50 @@
rc = qpnp_wled_write_reg(wled, QPNP_WLED_CURR_SINK_REG(wled->sink_base),
reg);
+ for (i = 0; i < wled->max_strings; i++) {
+ /* SYNC DELAY */
+ if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US)
+ wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US;
+
+ reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
+ mask = QPNP_WLED_SYNC_DLY_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i),
+ mask, reg);
+ if (rc < 0)
+ return rc;
+
+ /* FULL SCALE CURRENT */
+ if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA)
+ wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
+
+ reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
+ mask = QPNP_WLED_FS_CURR_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base, i),
+ mask, reg);
+ if (rc < 0)
+ return rc;
+
+ /* CABC */
+ reg = wled->en_cabc ? (1 << QPNP_WLED_CABC_SHIFT) : 0;
+ mask = QPNP_WLED_CABC_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ mask, reg);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* Settings specific to valid sinks */
for (i = 0; i < wled->num_strings; i++) {
- if (wled->strings[i] >= QPNP_WLED_MAX_STRINGS) {
+ if (wled->strings[i] >= wled->max_strings) {
dev_err(&wled->pdev->dev, "Invalid string number\n");
return -EINVAL;
}
-
/* MODULATOR */
rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_MOD_EN_REG(wled->sink_base,
- wled->strings[i]), ®);
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), ®);
if (rc < 0)
return rc;
reg &= QPNP_WLED_MOD_EN_MASK;
@@ -1643,72 +1917,22 @@
reg |= ~QPNP_WLED_GATE_DRV_MASK;
rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_MOD_EN_REG(wled->sink_base,
- wled->strings[i]), reg);
+ QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg);
if (rc)
return rc;
- /* SYNC DELAY */
- if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US)
- wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US;
-
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
- wled->strings[i]), ®);
- if (rc < 0)
- return rc;
- reg &= QPNP_WLED_SYNC_DLY_MASK;
- temp = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
-
- /* FULL SCALE CURRENT */
- if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA)
- wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
-
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), ®);
- if (rc < 0)
- return rc;
- reg &= QPNP_WLED_FS_CURR_MASK;
- temp = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
- reg |= temp;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_FS_CURR_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
-
- /* CABC */
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_CABC_REG(wled->sink_base,
- wled->strings[i]), ®);
- if (rc < 0)
- return rc;
- reg &= QPNP_WLED_CABC_MASK;
- reg |= (wled->en_cabc << QPNP_WLED_CABC_SHIFT);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_CABC_REG(wled->sink_base,
- wled->strings[i]), reg);
- if (rc)
- return rc;
-
- /* Enable CURRENT SINK */
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_CURR_SINK_REG(wled->sink_base), ®);
- if (rc < 0)
- return rc;
+ /* SINK EN */
temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
- reg |= (1 << temp);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_CURR_SINK_REG(wled->sink_base), reg);
- if (rc)
- return rc;
+ sink_en |= (1 << temp);
+ }
+ mask = QPNP_WLED_CURR_SINK_MASK;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base),
+ mask, sink_en);
+ if (rc < 0) {
+ dev_err(&wled->pdev->dev,
+ "Failed to enable WLED sink config rc = %d\n", rc);
+ return rc;
}
rc = qpnp_wled_sync_reg_toggle(wled);
@@ -1728,8 +1952,13 @@
wled->ovp_irq, rc);
return rc;
}
- disable_irq(wled->ovp_irq);
- wled->ovp_irq_disabled = true;
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base), ®);
+ /* disable the OVP irq only if the module is not enabled */
+ if (!rc && !(reg & QPNP_WLED_MODULE_EN_MASK)) {
+ disable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
}
if (wled->sc_irq >= 0) {
@@ -2091,11 +2320,16 @@
wled->en_cabc = of_property_read_bool(pdev->dev.of_node,
"qcom,en-cabc");
+ if (wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ wled->max_strings = QPNP_PM660_WLED_MAX_STRINGS;
+ else
+ wled->max_strings = QPNP_WLED_MAX_STRINGS;
+
prop = of_find_property(pdev->dev.of_node,
"qcom,led-strings-list", &temp_val);
if (!prop || !temp_val || temp_val > QPNP_WLED_MAX_STRINGS) {
dev_err(&pdev->dev, "Invalid strings info, use default");
- wled->num_strings = QPNP_WLED_MAX_STRINGS;
+ wled->num_strings = wled->max_strings;
for (i = 0; i < wled->num_strings; i++)
wled->strings[i] = i;
} else {
@@ -2118,6 +2352,9 @@
wled->lcd_psm_ctrl = of_property_read_bool(pdev->dev.of_node,
"qcom,lcd-psm-ctrl");
+
+ wled->auto_calib_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,auto-calibration-enable");
return 0;
}
@@ -2186,13 +2423,13 @@
}
mutex_init(&wled->bus_lock);
+ mutex_init(&wled->lock);
rc = qpnp_wled_config(wled);
if (rc) {
dev_err(&pdev->dev, "wled config failed\n");
return rc;
}
- mutex_init(&wled->lock);
INIT_WORK(&wled->work, qpnp_wled_work);
wled->ramp_ms = QPNP_WLED_RAMP_DLY_MS;
wled->ramp_step = 1;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d2cb1e8..3989bc6 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -330,6 +330,16 @@
}
EXPORT_SYMBOL(mbox_controller_is_idle);
+
+void mbox_chan_debug(struct mbox_chan *chan)
+{
+ if (!chan || !chan->cl || !chan->mbox->debug)
+ return;
+
+ return chan->mbox->debug(chan);
+}
+EXPORT_SYMBOL(mbox_chan_debug);
+
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
index dd022d3..f0bb0bc 100644
--- a/drivers/mailbox/msm_qmp.c
+++ b/drivers/mailbox/msm_qmp.c
@@ -26,7 +26,6 @@
#define QMP_MAGIC 0x4d41494c /* MAIL */
#define QMP_VERSION 0x1
#define QMP_FEATURES 0x0
-#define QMP_NUM_CHANS 0x1
#define QMP_TOUT_MS 5000
#define QMP_TX_TOUT_MS 2000
@@ -107,63 +106,89 @@
};
/**
- * struct qmp_device - local information for managing a single mailbox
- * @dev: The device that corresponds to this mailbox
- * @mbox: The mbox controller for this mailbox
- * @name: The name of this mailbox
- * @local_state: Current state of the mailbox protocol
- * @link_complete: Use to block until link negotiation with remote proc
- * is complete
- * @ch_complete: Use to block until the channel is fully opened
+ * struct qmp_mbox - local information for managing a single mailbox
+ * @list: List head for adding mbox to linked list
+ * @ctrl: Controller for this mailbox
+ * @priority: Priority of mailbox in the linked list
+ * @num_assigned: Number of channels assigned for allocated pool
+ * @num_shutdown: Number of channels that have shutdown
+ * @desc: Reference to the mailbox descriptor in SMEM
+ * @rx_disabled: Disable rx if multiple client are sending from this mbox
* @tx_sent: True if tx is sent and remote proc has not sent ack
- * @ch_in_use: True if this mailbox's channel owned by a client
- * @rx_buf: buffer to pass to client, holds copied data from mailbox
- * @version: Version and features received during link negotiation
+ * @idx_in_flight: current channel idx whos tx is in flight
* @mcore_mbox_offset: Offset of mcore mbox from the msgram start
* @mcore_mbox_size: Size of the mcore mbox
- * @desc: Reference to the mailbox descriptor in SMEM
+ * @rx_pkt: buffer to pass to client, holds copied data from mailbox
+ * @version: Version and features received during link negotiation
+ * @local_state: Current state of the mailbox protocol
+ * @state_lock: Serialize mailbox state changes
+ * @tx_lock: Serialize access for writes to mailbox
+ * @link_complete: Use to block until link negotiation with remote proc
+ * @ch_complete: Use to block until the channel is fully opened
+ * @ch_in_use: True if this mailbox's channel owned by a client
+ * @dwork: Delayed work to detect timed out tx
+ */
+struct qmp_mbox {
+ struct list_head list;
+ struct mbox_controller ctrl;
+ int priority;
+ u32 num_assigned;
+ u32 num_shutdown;
+
+ void __iomem *desc;
+ bool rx_disabled;
+ bool tx_sent;
+ u32 idx_in_flight;
+ u32 mcore_mbox_offset;
+ u32 mcore_mbox_size;
+ struct qmp_pkt rx_pkt;
+
+ struct qmp_core_version version;
+ enum qmp_local_state local_state;
+ struct mutex state_lock;
+ spinlock_t tx_lock;
+
+ struct completion link_complete;
+ struct completion ch_complete;
+ struct delayed_work dwork;
+ struct qmp_device *mdev;
+};
+
+/**
+ * struct qmp_device - local information for managing a single qmp edge
+ * @dev: The device that corresponds to this edge
+ * @name: The name of this mailbox
+ * @mboxes: The mbox controller for this mailbox
* @msgram: Reference to the start of msgram
- * @irq_mask: Mask written to @tx_irq_reg to trigger irq
* @tx_irq_reg: Reference to the register to send an irq to remote proc
* @rx_reset_reg: Reference to the register to reset the rx irq, if
* applicable
- * @rx_irq_line: The incoming interrupt line
- * @tx_irq_count: Number of tx interrupts triggered
- * @rx_irq_count: Number of rx interrupts received
- * @kwork: Work to be executed when an irq is received
+ * @kwork: kwork for rx handling
* @kworker: Handle to entitiy to process incoming data
* @task: Handle to task context used to run @kworker
- * @state_lock: Serialize mailbox state changes
- * @dwork: Delayed work to detect timed out tx
- * @tx_lock: Serialize access for writes to mailbox
+ * @irq_mask: Mask written to @tx_irq_reg to trigger irq
+ * @rx_irq_line: The incoming interrupt line
+ * @rx_work: Work to be executed when an irq is received
+ * @tx_irq_count: Number of tx interrupts triggered
+ * @rx_irq_count: Number of rx interrupts received
*/
struct qmp_device {
struct device *dev;
- struct mbox_controller *mbox;
const char *name;
- enum qmp_local_state local_state;
- struct completion link_complete;
- struct completion ch_complete;
- bool tx_sent;
- bool ch_in_use;
- struct qmp_pkt rx_pkt;
- struct qmp_core_version version;
- u32 mcore_mbox_offset;
- u32 mcore_mbox_size;
- void __iomem *desc;
+ struct list_head mboxes;
+
void __iomem *msgram;
- u32 irq_mask;
void __iomem *tx_irq_reg;
void __iomem *rx_reset_reg;
- u32 rx_irq_line;
- u32 tx_irq_count;
- u32 rx_irq_count;
+
struct kthread_work kwork;
struct kthread_worker kworker;
struct task_struct *task;
- struct mutex state_lock;
- struct delayed_work dwork;
- spinlock_t tx_lock;
+
+ u32 irq_mask;
+ u32 rx_irq_line;
+ u32 tx_irq_count;
+ u32 rx_irq_count;
};
/**
@@ -181,25 +206,7 @@
mdev->tx_irq_count++;
}
-/**
- * qmp_irq_handler() - handle irq from remote entitity.
- * @irq: irq number for the trggered interrupt.
- * @priv: private pointer to qmp mbox device.
- */
-irqreturn_t qmp_irq_handler(int irq, void *priv)
-{
- struct qmp_device *mdev = (struct qmp_device *)priv;
-
- if (mdev->rx_reset_reg)
- writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
-
- kthread_queue_work(&mdev->kworker, &mdev->kwork);
- mdev->rx_irq_count++;
-
- return IRQ_HANDLED;
-}
-
-static void memcpy32_toio(void *dest, void *src, size_t size)
+static void memcpy32_toio(void __iomem *dest, void *src, size_t size)
{
u32 *dest_local = (u32 *)dest;
u32 *src_local = (u32 *)src;
@@ -210,7 +217,7 @@
iowrite32(*src_local++, dest_local++);
}
-static void memcpy32_fromio(void *dest, void *src, size_t size)
+static void memcpy32_fromio(void *dest, void __iomem *src, size_t size)
{
u32 *dest_local = (u32 *)dest;
u32 *src_local = (u32 *)src;
@@ -222,62 +229,75 @@
}
/**
- * set_ucore_link_ack() - set the link ack in the ucore channel desc.
- * @mdev: the mailbox for the field that is being set.
- * @state: the value to set the ack field to.
- */
-static void set_ucore_link_ack(struct qmp_device *mdev, u32 state)
-{
- u32 offset;
-
- offset = offsetof(struct mbox_desc, ucore);
- offset += offsetof(struct channel_desc, link_state_ack);
- iowrite32(state, mdev->desc + offset);
-}
-
-/**
- * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
- * @mdev: the mailbox for the field that is being set.
- * @state: the value to set the ack field to.
- */
-static void set_ucore_ch_ack(struct qmp_device *mdev, u32 state)
-{
- u32 offset;
-
- offset = offsetof(struct mbox_desc, ucore);
- offset += offsetof(struct channel_desc, ch_state_ack);
- iowrite32(state, mdev->desc + offset);
-}
-
-/**
- * set_mcore_ch() - set the channel state in the mcore channel desc.
- * @mdev: the mailbox for the field that is being set.
- * @state: the value to set the channel field to.
- */
-static void set_mcore_ch(struct qmp_device *mdev, u32 state)
-{
- u32 offset;
-
- offset = offsetof(struct mbox_desc, mcore);
- offset += offsetof(struct channel_desc, ch_state);
- iowrite32(state, mdev->desc + offset);
-}
-
-/**
* qmp_notify_timeout() - Notify client of tx timeout with -EIO
* @work: Structure for work that was scheduled.
*/
static void qmp_notify_timeout(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
- struct mbox_chan *chan = &mdev->mbox->chans[0];
+ struct qmp_mbox *mbox = container_of(dwork, struct qmp_mbox, dwork);
+ struct mbox_chan *chan = &mbox->ctrl.chans[mbox->idx_in_flight];
int err = -EIO;
+ unsigned long flags;
- pr_err("%s: qmp tx timeout for %s\n", __func__, mdev->name);
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ if (!mbox->tx_sent) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+ return;
+ }
+ pr_err("%s: qmp tx timeout for %d\n", __func__, mbox->idx_in_flight);
+ mbox->tx_sent = false;
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
mbox_chan_txdone(chan, err);
}
+static inline void qmp_schedule_tx_timeout(struct qmp_mbox *mbox)
+{
+ schedule_delayed_work(&mbox->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
+}
+
+/**
+ * set_ucore_link_ack() - set the link ack in the ucore channel desc.
+ * @mbox: the mailbox for the field that is being set.
+ * @state: the value to set the ack field to.
+ */
+static void set_ucore_link_ack(struct qmp_mbox *mbox, u32 state)
+{
+ u32 offset;
+
+ offset = offsetof(struct mbox_desc, ucore);
+ offset += offsetof(struct channel_desc, link_state_ack);
+ iowrite32(state, mbox->desc + offset);
+}
+
+/**
+ * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
+ * @mbox: the mailbox for the field that is being set.
+ * @state: the value to set the ack field to.
+ */
+static void set_ucore_ch_ack(struct qmp_mbox *mbox, u32 state)
+{
+ u32 offset;
+
+ offset = offsetof(struct mbox_desc, ucore);
+ offset += offsetof(struct channel_desc, ch_state_ack);
+ iowrite32(state, mbox->desc + offset);
+}
+
+/**
+ * set_mcore_ch() - set the channel state in the mcore channel desc.
+ * @mbox: the mailbox for the field that is being set.
+ * @state: the value to set the channel field to.
+ */
+static void set_mcore_ch(struct qmp_mbox *mbox, u32 state)
+{
+ u32 offset;
+
+ offset = offsetof(struct mbox_desc, mcore);
+ offset += offsetof(struct channel_desc, ch_state);
+ iowrite32(state, mbox->desc + offset);
+}
+
/**
* qmp_startup() - Start qmp mailbox channel for communication. Waits for
* remote subsystem to open channel if link is not
@@ -288,35 +308,27 @@
*/
static int qmp_startup(struct mbox_chan *chan)
{
- struct qmp_device *mdev = chan->con_priv;
+ struct qmp_mbox *mbox = chan->con_priv;
- if (!mdev)
+ if (!mbox)
return -EINVAL;
- mutex_lock(&mdev->state_lock);
- if (mdev->local_state == CHANNEL_CONNECTED) {
- mutex_unlock(&mdev->state_lock);
- return -EINVAL;
- }
- if (!completion_done(&mdev->link_complete)) {
- mutex_unlock(&mdev->state_lock);
+ mutex_lock(&mbox->state_lock);
+ if (!completion_done(&mbox->link_complete)) {
+ mutex_unlock(&mbox->state_lock);
return -EAGAIN;
}
- set_mcore_ch(mdev, QMP_MBOX_CH_CONNECTED);
- mdev->local_state = LOCAL_CONNECTING;
- mutex_unlock(&mdev->state_lock);
+ set_mcore_ch(mbox, QMP_MBOX_CH_CONNECTED);
+ mbox->local_state = LOCAL_CONNECTING;
+ mutex_unlock(&mbox->state_lock);
- send_irq(mdev);
- wait_for_completion_interruptible_timeout(&mdev->ch_complete,
+ send_irq(mbox->mdev);
+ wait_for_completion_interruptible_timeout(&mbox->ch_complete,
msecs_to_jiffies(QMP_TOUT_MS));
return 0;
}
-static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
-{
- schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
-}
/**
* qmp_send_data() - Copy the data to the channel's mailbox and notify
@@ -331,31 +343,39 @@
*/
static int qmp_send_data(struct mbox_chan *chan, void *data)
{
- struct qmp_device *mdev = chan->con_priv;
+ struct qmp_mbox *mbox = chan->con_priv;
+ struct qmp_device *mdev;
struct qmp_pkt *pkt = (struct qmp_pkt *)data;
void __iomem *addr;
unsigned long flags;
+ int i;
- if (!mdev || !data || mdev->local_state != CHANNEL_CONNECTED)
+ if (!mbox || !data || mbox->local_state != CHANNEL_CONNECTED)
return -EINVAL;
+ mdev = mbox->mdev;
- spin_lock_irqsave(&mdev->tx_lock, flags);
- addr = mdev->msgram + mdev->mcore_mbox_offset;
- if (ioread32(addr)) {
- spin_unlock_irqrestore(&mdev->tx_lock, flags);
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ addr = mdev->msgram + mbox->mcore_mbox_offset;
+ if (mbox->tx_sent) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
return -EBUSY;
}
- if (pkt->size + sizeof(pkt->size) > mdev->mcore_mbox_size) {
- spin_unlock_irqrestore(&mdev->tx_lock, flags);
+ if (pkt->size + sizeof(pkt->size) > mbox->mcore_mbox_size) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
return -EINVAL;
}
+
memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size);
iowrite32(pkt->size, addr);
- mdev->tx_sent = true;
+ mbox->tx_sent = true;
+ for (i = 0; i < mbox->ctrl.num_chans; i++) {
+ if (chan == &mbox->ctrl.chans[i])
+ mbox->idx_in_flight = i;
+ }
send_irq(mdev);
- qmp_schedule_tx_timeout(mdev);
- spin_unlock_irqrestore(&mdev->tx_lock, flags);
+ qmp_schedule_tx_timeout(mbox);
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
return 0;
}
@@ -367,16 +387,23 @@
*/
static void qmp_shutdown(struct mbox_chan *chan)
{
- struct qmp_device *mdev = chan->con_priv;
+ struct qmp_mbox *mbox = chan->con_priv;
- mutex_lock(&mdev->state_lock);
- if (mdev->local_state != LINK_DISCONNECTED) {
- mdev->local_state = LOCAL_DISCONNECTING;
- set_mcore_ch(mdev, QMP_MBOX_CH_DISCONNECTED);
- send_irq(mdev);
+ mutex_lock(&mbox->state_lock);
+ mbox->num_shutdown++;
+ if (mbox->num_shutdown < mbox->num_assigned) {
+ mutex_unlock(&mbox->state_lock);
+ return;
}
- mdev->ch_in_use = false;
- mutex_unlock(&mdev->state_lock);
+
+ if (mbox->local_state != LINK_DISCONNECTED) {
+ mbox->local_state = LOCAL_DISCONNECTING;
+ set_mcore_ch(mbox, QMP_MBOX_CH_DISCONNECTED);
+ send_irq(mbox->mdev);
+ }
+ mbox->num_shutdown = 0;
+ mbox->num_assigned = 0;
+ mutex_unlock(&mbox->state_lock);
}
/**
@@ -396,33 +423,34 @@
/**
* qmp_recv_data() - received notification that data is available in the
* mailbox. Copy data from mailbox and pass to client.
- * @mdev: mailbox device that received the notification.
+ * @mbox: mailbox device that received the notification.
* @mbox_of: offset of mailbox from msgram start.
*/
-static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
+static void qmp_recv_data(struct qmp_mbox *mbox, u32 mbox_of)
{
void __iomem *addr;
struct qmp_pkt *pkt;
- addr = mdev->msgram + mbox_of;
- pkt = &mdev->rx_pkt;
+ addr = mbox->mdev->msgram + mbox_of;
+ pkt = &mbox->rx_pkt;
pkt->size = ioread32(addr);
- if (pkt->size > mdev->mcore_mbox_size)
+ if (pkt->size > mbox->mcore_mbox_size)
pr_err("%s: Invalid mailbox packet\n", __func__);
else {
memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size);
- mbox_chan_received_data(&mdev->mbox->chans[0], &pkt);
+ mbox_chan_received_data(&mbox->ctrl.chans[mbox->idx_in_flight],
+ pkt);
}
iowrite32(0, addr);
- send_irq(mdev);
+ send_irq(mbox->mdev);
}
/**
* init_mcore_state() - initialize the mcore state of a mailbox.
* @mdev: mailbox device to be initialized.
*/
-static void init_mcore_state(struct qmp_device *mdev)
+static void init_mcore_state(struct qmp_mbox *mbox)
{
struct channel_desc mcore;
u32 offset = offsetof(struct mbox_desc, mcore);
@@ -431,40 +459,60 @@
mcore.link_state_ack = QMP_MBOX_LINK_DOWN;
mcore.ch_state = QMP_MBOX_CH_DISCONNECTED;
mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED;
- mcore.mailbox_size = mdev->mcore_mbox_size;
- mcore.mailbox_offset = mdev->mcore_mbox_offset;
- memcpy32_toio(mdev->desc + offset, &mcore, sizeof(mcore));
+ mcore.mailbox_size = mbox->mcore_mbox_size;
+ mcore.mailbox_offset = mbox->mcore_mbox_offset;
+ memcpy32_toio(mbox->desc + offset, &mcore, sizeof(mcore));
+}
+
+/**
+ * qmp_irq_handler() - handle irq from remote entitity.
+ * @irq: irq number for the trggered interrupt.
+ * @priv: private pointer to qmp mbox device.
+ */
+static irqreturn_t qmp_irq_handler(int irq, void *priv)
+{
+ struct qmp_device *mdev = (struct qmp_device *)priv;
+
+ if (mdev->rx_reset_reg)
+ writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
+
+ kthread_queue_work(&mdev->kworker, &mdev->kwork);
+ mdev->rx_irq_count++;
+
+ return IRQ_HANDLED;
}
/**
* __qmp_rx_worker() - Handle incoming messages from remote processor.
- * @mdev: mailbox device that received notification.
+ * @mbox: mailbox device that received notification.
*/
-static void __qmp_rx_worker(struct qmp_device *mdev)
+static void __qmp_rx_worker(struct qmp_mbox *mbox)
{
- u32 msg_len;
+ u32 msg_len, idx;
struct mbox_desc desc;
+ struct qmp_device *mdev = mbox->mdev;
+ unsigned long flags;
- memcpy_fromio(&desc, mdev->desc, sizeof(desc));
+ memcpy_fromio(&desc, mbox->desc, sizeof(desc));
if (desc.magic != QMP_MAGIC)
return;
- mutex_lock(&mdev->state_lock);
- switch (mdev->local_state) {
+ mutex_lock(&mbox->state_lock);
+ switch (mbox->local_state) {
case LINK_DISCONNECTED:
- mdev->version.version = desc.version;
- mdev->version.features = desc.features;
- set_ucore_link_ack(mdev, desc.ucore.link_state);
+ mbox->version.version = desc.version;
+ mbox->version.features = desc.features;
+ set_ucore_link_ack(mbox, desc.ucore.link_state);
if (desc.mcore.mailbox_size) {
- mdev->mcore_mbox_size = desc.mcore.mailbox_size;
- mdev->mcore_mbox_offset = desc.mcore.mailbox_offset;
+ mbox->mcore_mbox_size = desc.mcore.mailbox_size;
+ mbox->mcore_mbox_offset = desc.mcore.mailbox_offset;
}
- init_mcore_state(mdev);
- mdev->local_state = LINK_NEGOTIATION;
- mdev->rx_pkt.data = devm_kzalloc(mdev->dev,
+ init_mcore_state(mbox);
+ mbox->local_state = LINK_NEGOTIATION;
+ mbox->rx_pkt.data = devm_kzalloc(mdev->dev,
desc.ucore.mailbox_size,
GFP_KERNEL);
- if (!mdev->rx_pkt.data) {
+ if (!mbox->rx_pkt.data) {
pr_err("In %s: failed to allocate rx pkt\n", __func__);
break;
}
@@ -477,8 +525,8 @@
__func__);
break;
}
- mdev->local_state = LINK_CONNECTED;
- complete_all(&mdev->link_complete);
+ mbox->local_state = LINK_CONNECTED;
+ complete_all(&mbox->link_complete);
break;
case LINK_CONNECTED:
if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
@@ -486,23 +534,23 @@
__func__);
break;
}
- set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+ set_ucore_ch_ack(mbox, desc.ucore.ch_state);
send_irq(mdev);
break;
case LOCAL_CONNECTING:
if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED &&
desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED)
- mdev->local_state = LOCAL_CONNECTED;
+ mbox->local_state = LOCAL_CONNECTED;
if (desc.ucore.ch_state != desc.ucore.ch_state_ack) {
- set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+ set_ucore_ch_ack(mbox, desc.ucore.ch_state);
send_irq(mdev);
}
- if (mdev->local_state == LOCAL_CONNECTED &&
+ if (mbox->local_state == LOCAL_CONNECTED &&
desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED &&
desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) {
- mdev->local_state = CHANNEL_CONNECTED;
- complete_all(&mdev->ch_complete);
+ mbox->local_state = CHANNEL_CONNECTED;
+ complete_all(&mbox->ch_complete);
}
break;
case LOCAL_CONNECTED:
@@ -511,50 +559,58 @@
__func__);
break;
}
- set_ucore_ch_ack(mdev, desc.ucore.ch_state);
- mdev->local_state = CHANNEL_CONNECTED;
+ set_ucore_ch_ack(mbox, desc.ucore.ch_state);
+ mbox->local_state = CHANNEL_CONNECTED;
send_irq(mdev);
- complete_all(&mdev->ch_complete);
+ complete_all(&mbox->ch_complete);
break;
case CHANNEL_CONNECTED:
if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) {
- set_ucore_ch_ack(mdev, desc.ucore.ch_state);
- mdev->local_state = LOCAL_CONNECTED;
+ set_ucore_ch_ack(mbox, desc.ucore.ch_state);
+ mbox->local_state = LOCAL_CONNECTED;
send_irq(mdev);
}
msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset);
- if (msg_len)
- qmp_recv_data(mdev, desc.ucore.mailbox_offset);
+ if (msg_len && !mbox->rx_disabled)
+ qmp_recv_data(mbox, desc.ucore.mailbox_offset);
- if (mdev->tx_sent) {
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ idx = mbox->idx_in_flight;
+ if (mbox->tx_sent) {
msg_len = ioread32(mdev->msgram +
- mdev->mcore_mbox_offset);
+ mbox->mcore_mbox_offset);
if (msg_len == 0) {
- mdev->tx_sent = false;
- cancel_delayed_work(&mdev->dwork);
- mbox_chan_txdone(&mdev->mbox->chans[0], 0);
+ mbox->tx_sent = false;
+ cancel_delayed_work(&mbox->dwork);
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+ mbox_chan_txdone(&mbox->ctrl.chans[idx], 0);
+ spin_lock_irqsave(&mbox->tx_lock, flags);
}
}
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
break;
case LOCAL_DISCONNECTING:
if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED &&
desc.mcore.ch_state == desc.mcore.ch_state_ack)
- mdev->local_state = LINK_CONNECTED;
- reinit_completion(&mdev->ch_complete);
+ mbox->local_state = LINK_CONNECTED;
+ reinit_completion(&mbox->ch_complete);
break;
default:
pr_err("In %s: Local Channel State corrupted\n", __func__);
}
- mutex_unlock(&mdev->state_lock);
+ mutex_unlock(&mbox->state_lock);
}
static void rx_worker(struct kthread_work *work)
{
struct qmp_device *mdev;
+ struct qmp_mbox *mbox;
mdev = container_of(work, struct qmp_device, kwork);
- __qmp_rx_worker(mdev);
+ list_for_each_entry(mbox, &mdev->mboxes, list) {
+ __qmp_rx_worker(mbox);
+ }
}
/**
@@ -566,48 +622,207 @@
static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *spec)
{
- struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
- unsigned int channel = spec->args[0];
+ struct qmp_mbox *dev = container_of(mbox, struct qmp_mbox, ctrl);
+ struct mbox_chan *chan;
- if (!mdev || channel >= mbox->num_chans)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&mdev->state_lock);
- if (mdev->ch_in_use) {
- pr_err("%s, mbox channel already in use %s\n", __func__,
- mdev->name);
- mutex_unlock(&mdev->state_lock);
- return ERR_PTR(-EBUSY);
+ if (dev->num_assigned >= mbox->num_chans || !dev->ctrl.chans) {
+ pr_err("%s: QMP out of channels\n", __func__);
+ return ERR_PTR(-ENOMEM);
}
- mdev->ch_in_use = true;
- mutex_unlock(&mdev->state_lock);
- return &mbox->chans[0];
+
+ mutex_lock(&dev->state_lock);
+ chan = &dev->ctrl.chans[dev->num_assigned++];
+ mutex_unlock(&dev->state_lock);
+
+ return chan;
}
/**
- * parse_devicetree() - Parse the device tree information for QMP, map io
- * memory and register for needed interrupts
- * @pdev: platform device for this driver.
- * @mdev: mailbox device to hold the device tree configuration.
+ * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
+ * @mdev: mailbox device to cleanup.
+ */
+static void cleanup_workqueue(struct qmp_device *mdev)
+{
+ kthread_flush_worker(&mdev->kworker);
+ kthread_stop(mdev->task);
+ mdev->task = NULL;
+}
+
+static int qmp_mbox_remove(struct platform_device *pdev)
+{
+ struct qmp_device *mdev = platform_get_drvdata(pdev);
+ struct qmp_mbox *mbox = NULL;
+
+ disable_irq(mdev->rx_irq_line);
+ cleanup_workqueue(mdev);
+
+ list_for_each_entry(mbox, &mdev->mboxes, list) {
+ mbox_controller_unregister(&mbox->ctrl);
+ }
+ return 0;
+}
+
+/**
+ * get_mbox_num_chans() - Find how many mbox channels need to be allocated
+ *
+ * @node: device node for this mailbox.
+ *
+ * Return: the number of phandles referring to this device node
+ */
+static u32 get_mbox_num_chans(struct device_node *node)
+{
+ int i, j, ret;
+ u32 num_chans = 0;
+ struct device_node *np;
+ struct of_phandle_args p;
+
+ for_each_node_with_property(np, "mboxes") {
+ if (!of_device_is_available(np))
+ continue;
+ i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ for (j = 0; j < i; j++) {
+ ret = of_parse_phandle_with_args(np, "mboxes",
+ "#mbox-cells", j, &p);
+ if (!ret && p.np == node) {
+ num_chans++;
+ break;
+ }
+ }
+ }
+ if (num_chans)
+ return num_chans;
+
+ return 1;
+}
+
+/**
+ * mdev_add_mbox() - Add a mailbox to qmp device based on priority
+ *
+ * @mdev: qmp device to add mailbox to.
+ * @new: new mailbox to add to qmp device.
+ */
+static void mdev_add_mbox(struct qmp_device *mdev, struct qmp_mbox *new)
+{
+ struct qmp_mbox *mbox;
+
+ list_for_each_entry(mbox, &mdev->mboxes, list) {
+ if (mbox->priority > new->priority)
+ continue;
+ list_add_tail(&new->list, &mbox->list);
+ return;
+ }
+ list_add_tail(&new->list, &mdev->mboxes);
+}
+
+static struct mbox_chan_ops qmp_mbox_ops = {
+ .startup = qmp_startup,
+ .shutdown = qmp_shutdown,
+ .send_data = qmp_send_data,
+ .last_tx_done = qmp_last_tx_done,
+};
+
+static const struct of_device_id qmp_mbox_match_table[] = {
+ { .compatible = "qcom,qmp-mbox" },
+ {},
+};
+
+/**
+ * qmp_mbox_init() - Parse the device tree for qmp mailbox and init structure
+ *
+ * @n: child device node representing a mailbox.
+ * @mbox: device structure for this edge.
*
* Return: 0 on succes or standard Linux error code.
*/
-static int qmp_parse_devicetree(struct platform_device *pdev,
- struct qmp_device *mdev)
+static int qmp_mbox_init(struct device_node *n, struct qmp_device *mdev)
{
+ int rc, i;
+ char *key;
+ struct qmp_mbox *mbox;
+ struct mbox_chan *chans;
+ u32 mbox_of, mbox_size, desc_of, priority, num_chans;
+
+ key = "mbox-desc-offset";
+ rc = of_property_read_u32(n, key, &desc_of);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ return 0;
+ }
+ key = "priority";
+ rc = of_property_read_u32(n, key, &priority);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ return 0;
+ }
+ mbox = devm_kzalloc(mdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(n, "mbox-offset", &mbox_of);
+ if (!rc)
+ mbox->mcore_mbox_offset = mbox_of;
+ rc = of_property_read_u32(n, "mbox-size", &mbox_size);
+ if (!rc)
+ mbox->mcore_mbox_size = mbox_size;
+
+ mbox->mdev = mdev;
+ mbox->priority = priority;
+ mbox->desc = mdev->msgram + desc_of;
+ num_chans = get_mbox_num_chans(n);
+ mbox->rx_disabled = (num_chans > 1) ? true : false;
+ chans = devm_kzalloc(mdev->dev, sizeof(*chans) * num_chans, GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < num_chans; i++)
+ chans[i].con_priv = mbox;
+
+ mbox->ctrl.dev = mdev->dev;
+ mbox->ctrl.ops = &qmp_mbox_ops;
+ mbox->ctrl.chans = chans;
+ mbox->ctrl.num_chans = num_chans;
+ mbox->ctrl.txdone_irq = true;
+ mbox->ctrl.txdone_poll = false;
+ mbox->ctrl.of_xlate = qmp_mbox_of_xlate;
+
+ rc = mbox_controller_register(&mbox->ctrl);
+ if (rc) {
+ pr_err("%s: failed to register mbox controller %d\n", __func__,
+ rc);
+ return rc;
+ }
+ spin_lock_init(&mbox->tx_lock);
+ mutex_init(&mbox->state_lock);
+ mbox->local_state = LINK_DISCONNECTED;
+ init_completion(&mbox->link_complete);
+ init_completion(&mbox->ch_complete);
+ mbox->tx_sent = false;
+ mbox->num_assigned = 0;
+ INIT_DELAYED_WORK(&mbox->dwork, qmp_notify_timeout);
+
+ mdev_add_mbox(mdev, mbox);
+ return 0;
+}
+
+
+/**
+ * qmp_edge_init() - Parse the device tree information for QMP, map io
+ * memory and register for needed interrupts
+ * @pdev: platform device for this driver.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_edge_init(struct platform_device *pdev)
+{
+ struct qmp_device *mdev = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
+ struct resource *msgram_r, *tx_irq_reg_r;
char *key;
int rc;
- const char *subsys_name;
- u32 rx_irq_line, tx_irq_mask;
- u32 desc_of = 0;
- u32 mbox_of = 0;
- u32 mbox_size = 0;
- struct resource *msgram_r, *tx_irq_reg_r;
key = "label";
- subsys_name = of_get_property(node, key, NULL);
- if (!subsys_name) {
+ mdev->name = of_get_property(node, key, NULL);
+ if (!mdev->name) {
pr_err("%s: missing key %s\n", __func__, key);
return -ENODEV;
}
@@ -627,143 +842,60 @@
}
key = "qcom,irq-mask";
- rc = of_property_read_u32(node, key, &tx_irq_mask);
+ rc = of_property_read_u32(node, key, &mdev->irq_mask);
if (rc) {
pr_err("%s: missing key %s\n", __func__, key);
return -ENODEV;
}
key = "interrupts";
- rx_irq_line = irq_of_parse_and_map(node, 0);
- if (!rx_irq_line) {
+ mdev->rx_irq_line = irq_of_parse_and_map(node, 0);
+ if (!mdev->rx_irq_line) {
pr_err("%s: missing key %s\n", __func__, key);
return -ENODEV;
}
- key = "mbox-desc-offset";
- rc = of_property_read_u32(node, key, &desc_of);
- if (rc) {
- pr_err("%s: missing key %s\n", __func__, key);
- return -ENODEV;
- }
-
- key = "mbox-offset";
- rc = of_property_read_u32(node, key, &mbox_of);
- if (!rc)
- mdev->mcore_mbox_offset = mbox_of;
-
- key = "mbox-size";
- rc = of_property_read_u32(node, key, &mbox_size);
- if (!rc)
- mdev->mcore_mbox_size = mbox_size;
-
- mdev->name = subsys_name;
- mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
- resource_size(msgram_r));
- if (!mdev->msgram)
- return -ENOMEM;
-
- mdev->desc = mdev->msgram + desc_of;
- if (!mdev->desc)
- return -ENOMEM;
-
- mdev->irq_mask = tx_irq_mask;
+ mdev->dev = &pdev->dev;
mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start,
resource_size(tx_irq_reg_r));
- if (!mdev->tx_irq_reg)
- return -ENOMEM;
+ mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
+ resource_size(msgram_r));
+ if (!mdev->msgram || !mdev->tx_irq_reg)
+ return -EIO;
- mdev->rx_irq_line = rx_irq_line;
+ INIT_LIST_HEAD(&mdev->mboxes);
return 0;
}
-/**
- * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
- * @mdev: mailbox device to cleanup.
- */
-static void cleanup_workqueue(struct qmp_device *mdev)
-{
- kthread_flush_worker(&mdev->kworker);
- kthread_stop(mdev->task);
- mdev->task = NULL;
-}
-
-static struct mbox_chan_ops qmp_mbox_ops = {
- .startup = qmp_startup,
- .shutdown = qmp_shutdown,
- .send_data = qmp_send_data,
- .last_tx_done = qmp_last_tx_done,
-};
-
-static const struct of_device_id qmp_mbox_match_table[] = {
- { .compatible = "qcom,qmp-mbox" },
- {},
-};
-
static int qmp_mbox_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- struct mbox_controller *mbox;
+ struct device_node *edge_node = pdev->dev.of_node;
struct qmp_device *mdev;
- struct mbox_chan *chans;
int ret = 0;
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
- platform_set_drvdata(pdev, mdev);
- ret = qmp_parse_devicetree(pdev, mdev);
+ platform_set_drvdata(pdev, mdev);
+ ret = qmp_edge_init(pdev);
if (ret)
return ret;
- mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
+ ret = qmp_mbox_init(edge_node, mdev);
+ if (ret)
+ return ret;
- chans = devm_kzalloc(&pdev->dev, sizeof(*chans) * QMP_NUM_CHANS,
- GFP_KERNEL);
- if (!chans)
- return -ENOMEM;
-
- mbox->dev = &pdev->dev;
- mbox->ops = &qmp_mbox_ops;
- mbox->chans = chans;
- mbox->chans[0].con_priv = mdev;
- mbox->num_chans = QMP_NUM_CHANS;
- mbox->txdone_irq = true;
- mbox->txdone_poll = false;
- mbox->of_xlate = qmp_mbox_of_xlate;
-
- mdev->dev = &pdev->dev;
- mdev->mbox = mbox;
- spin_lock_init(&mdev->tx_lock);
- mutex_init(&mdev->state_lock);
- mdev->local_state = LINK_DISCONNECTED;
kthread_init_work(&mdev->kwork, rx_worker);
kthread_init_worker(&mdev->kworker);
mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s",
mdev->name);
- init_completion(&mdev->link_complete);
- init_completion(&mdev->ch_complete);
- mdev->tx_sent = false;
- mdev->ch_in_use = false;
- INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
-
- ret = mbox_controller_register(mbox);
- if (ret) {
- cleanup_workqueue(mdev);
- pr_err("%s: failed to register mbox controller %d\n", __func__,
- ret);
- return ret;
- }
ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler,
IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
- node->name, mdev);
+ edge_node->name, mdev);
if (ret < 0) {
- cleanup_workqueue(mdev);
- mbox_controller_unregister(mdev->mbox);
+ qmp_mbox_remove(pdev);
pr_err("%s: request irq on %d failed: %d\n", __func__,
mdev->rx_irq_line, ret);
return ret;
@@ -773,19 +905,11 @@
pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__,
mdev->rx_irq_line, ret);
+ /* Trigger RX */
qmp_irq_handler(0, mdev);
return 0;
}
-static int qmp_mbox_remove(struct platform_device *pdev)
-{
- struct qmp_device *mdev = platform_get_drvdata(pdev);
-
- cleanup_workqueue(mdev);
- mbox_controller_unregister(mdev->mbox);
- return 0;
-}
-
static struct platform_driver qmp_mbox_driver = {
.probe = qmp_mbox_probe,
.remove = qmp_mbox_remove,
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index bde20b4..f923db5 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -11,12 +11,13 @@
*
*/
-#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -40,6 +41,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/rpmh.h>
+#define TCS_DRV_IPC_LOG_SIZE 2
+
#define MAX_CMDS_PER_TCS 16
#define MAX_TCS_PER_TYPE 3
#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
@@ -140,8 +143,35 @@
u64 tcs_last_recv_ts[MAX_POOL_SIZE];
atomic_t tcs_send_count[MAX_POOL_SIZE];
atomic_t tcs_irq_count[MAX_POOL_SIZE];
+ void *ipc_log_ctx;
};
+/* Log to IPC and Ftrace */
+#define log_send_msg(drv, m, n, i, a, d, c, t) do { \
+ trace_rpmh_send_msg(drv->name, m, n, i, a, d, c, t); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "send msg: m=%d n=%d msgid=0x%x addr=0x%x data=0x%x cmpl=%d trigger=%d", \
+ m, n, i, a, d, c, t); \
+ } while (0)
+
+#define log_rpmh_notify_irq(drv, m, a, e) do { \
+ trace_rpmh_notify_irq(drv->name, m, a, e); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "irq response: m=%d addr=0x%x err=%d", m, a, e); \
+ } while (0)
+
+#define log_rpmh_control_msg(drv, d) do { \
+ trace_rpmh_control_msg(drv->name, d); \
+ ipc_log_string(drv->ipc_log_ctx, "ctrlr msg: data=0x%x", d); \
+ } while (0)
+
+#define log_rpmh_notify(drv, m, a, e) do { \
+ trace_rpmh_notify(drv->name, m, a, e); \
+ ipc_log_string(drv->ipc_log_ctx, \
+ "tx done: m=%d addr=0x%x err=%d", m, a, e); \
+ } while (0)
+
+
static int tcs_response_pool_init(struct tcs_drv *drv)
{
struct tcs_response_pool *pool;
@@ -223,7 +253,6 @@
break;
}
pos++;
- udelay(1);
} while (1);
spin_unlock_irqrestore(&pool->lock, flags);
@@ -241,11 +270,11 @@
return;
msg = resp->msg;
- pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+ pr_warn("Response object [idx=%d for-tcs=%d in-use=%d]\n",
resp->idx, resp->m, resp->in_use);
- pr_debug("Msg: state=%d\n", msg->state);
+ pr_warn("Msg: state=%d\n", msg->state);
for (i = 0; i < msg->num_payload; i++)
- pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
+ pr_warn("addr=0x%x data=0x%x complete=0x%x\n",
msg->payload[i].addr,
msg->payload[i].data,
msg->payload[i].complete);
@@ -425,8 +454,10 @@
sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
if ((!(sts & CMD_STATUS_ISSUED)) ||
((resp->msg->is_complete || cmd->complete) &&
- (!(sts & CMD_STATUS_COMPL))))
+ (!(sts & CMD_STATUS_COMPL)))) {
resp->err = -EIO;
+ break;
+ }
}
/* Check for response if this was a read request */
@@ -437,13 +468,15 @@
mbox_chan_received_data(resp->chan, resp->msg);
}
- trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
+ log_rpmh_notify_irq(drv, m, resp->msg->payload[0].addr,
resp->err);
/* Clear the AMC mode for non-ACTIVE TCSes */
tcs = get_tcs_from_index(drv, m);
if (tcs && tcs->type != ACTIVE_TCS) {
data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+ data &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, data);
data &= ~TCS_AMC_MODE_ENABLE;
write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
/*
@@ -480,7 +513,7 @@
{
struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
- trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
+ log_rpmh_notify(drv, m, msg->payload[0].addr, err);
mbox_chan_txdone(chan, err);
}
@@ -524,7 +557,7 @@
u32 msgid, cmd_msgid = 0;
u32 cmd_enable = 0;
u32 cmd_complete;
- u32 enable = TCS_AMC_MODE_ENABLE;
+ u32 enable;
struct tcs_cmd *cmd;
int i;
void __iomem *base = drv->reg_base;
@@ -546,7 +579,7 @@
write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
- trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
+ log_send_msg(drv, m, n + i, msgid, cmd->addr,
cmd->data, cmd->complete, trigger);
}
@@ -558,12 +591,22 @@
write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
if (trigger) {
- /* HW req: Clear the DRV_CONTROL and enable TCS again */
- write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
- /* Enable the AMC mode on the TCS */
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
+
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
enable |= TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
+ write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, enable);
}
}
@@ -667,7 +710,8 @@
int n = 0;
/* For active requests find the first free AMC. */
- if (tcs->type == ACTIVE_TCS)
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE ||
+ msg->state == RPMH_AWAKE_STATE)
return find_free_tcs(tcs);
/* Find if we already have the msg in our TCS */
@@ -780,6 +824,10 @@
spin_lock_irqsave(&tcs->tcs_lock, flags);
for (i = 0; i < tcs->num_tcs; i++) {
m = i + tcs->tcs_offset;
+ if (!tcs_is_free(drv, m)) {
+ spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+ return -EBUSY;
+ }
__tcs_buffer_invalidate(drv->reg_base, m);
}
/* Mark the TCS as free */
@@ -795,7 +843,7 @@
int n;
struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
void __iomem *base = drv->reg_base;
- u32 enable, addr, data, msgid;
+ u32 enable, addr, data, msgid, sts, irq_sts;
if (!tcs || tcs_is_free(drv, m))
return;
@@ -804,15 +852,24 @@
if (!enable)
return;
- pr_debug("TCS-%d contents:\n", m);
+ pr_warn("RSC:%s\n", drv->name);
+
+ sts = read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
+ data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+ irq_sts = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
+ pr_warn("TCS=%d [ctrlr-sts:%s amc-mode:0x%x irq-sts:%s]\n",
+ m, sts ? "IDLE" : "BUSY", data,
+ (irq_sts & BIT(m)) ? "COMPLETED" : "PENDING");
+
for (n = 0; n < tcs->ncpt; n++) {
if (!(enable & BIT(n)))
continue;
addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
- pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
- n, addr, data, msgid);
+ sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, n);
+ pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x]\n",
+ n, addr, data, msgid, sts);
}
}
@@ -824,7 +881,7 @@
for (i = 0; i < drv->num_tcs; i++) {
if (!atomic_read(&drv->tcs_in_use[i]))
continue;
- pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+ pr_warn("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
curr, i,
atomic_read(&drv->tcs_send_count[i]),
drv->tcs_last_sent_ts[i],
@@ -835,6 +892,13 @@
}
}
+static void chan_debug(struct mbox_chan *chan)
+{
+ struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+
+ dump_tcs_stats(drv);
+}
+
/**
* chan_tcs_write: Validate the incoming message and write to the
* appropriate TCS block.
@@ -859,7 +923,8 @@
goto tx_fail;
}
- if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
+ if (!msg->payload || !msg->num_payload ||
+ msg->num_payload > MAX_RPMH_PAYLOAD) {
dev_err(dev, "Payload error\n");
ret = -EINVAL;
goto tx_fail;
@@ -889,8 +954,11 @@
* Since we are re-purposing the wake TCS, invalidate previous
* contents to avoid confusion.
*/
- if (msg->state == RPMH_AWAKE_STATE)
- tcs_mbox_invalidate(chan);
+ if (msg->state == RPMH_AWAKE_STATE) {
+ ret = tcs_mbox_invalidate(chan);
+ if (ret)
+ goto tx_fail;
+ }
/* Post the message to the TCS and trigger */
ret = tcs_mbox_write(chan, msg, true);
@@ -902,15 +970,16 @@
drv, msg, chan, TCS_M_INIT, ret);
dev_err(dev, "Error sending RPMH message %d\n", ret);
- if (resp)
+ if (!IS_ERR(resp))
send_tcs_response(resp);
+ else
+ dev_err(dev, "No response object %ld\n", PTR_ERR(resp));
ret = 0;
}
/* If we were just busy waiting for TCS, dump the state and return */
if (ret == -EBUSY) {
- pr_info("TCS Busy, retrying RPMH message send\n");
- dump_tcs_stats(drv);
+ pr_info_ratelimited("TCS Busy, retrying RPMH message send\n");
ret = -EAGAIN;
}
@@ -926,7 +995,7 @@
for (i = 0; i < msg->num_payload; i++) {
/* Only data is write capable */
writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
- trace_rpmh_control_msg(drv->name, msg->payload[i].data);
+ log_rpmh_control_msg(drv, msg->payload[i].data);
addr += TCS_HIDDEN_CMD_SHIFT;
}
}
@@ -971,7 +1040,8 @@
goto tx_done;
}
- if (msg->num_payload > MAX_RPMH_PAYLOAD) {
+ if (!msg->payload || (!msg->num_payload && !msg->invalidate) ||
+ msg->num_payload > MAX_RPMH_PAYLOAD) {
dev_err(dev, "Payload error\n");
goto tx_done;
}
@@ -1111,7 +1181,8 @@
if (tcs->num_tcs > MAX_TCS_PER_TYPE)
return -EINVAL;
- if (st > max_tcs)
+ if (st + tcs->num_tcs > max_tcs &&
+ st + tcs->num_tcs >= sizeof(tcs->tcs_mask))
return -EINVAL;
tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
@@ -1133,10 +1204,12 @@
for (j = 0; j < i; j++) {
ret = of_parse_phandle_with_args(np, "mboxes",
"#mbox-cells", j, &p);
- if (!ret && p.np == pdev->dev.of_node)
+ of_node_put(p.np);
+ if (!ret && p.np == pdev->dev.of_node) {
+ num_chans++;
break;
+ }
}
- num_chans++;
}
if (!num_chans) {
@@ -1162,6 +1235,7 @@
drv->mbox.txdone_irq = true;
drv->mbox.of_xlate = of_tcs_mbox_xlate;
drv->mbox.is_idle = tcs_drv_is_idle;
+ drv->mbox.debug = chan_debug;
drv->num_tcs = st;
drv->pdev = pdev;
INIT_LIST_HEAD(&drv->response_pending);
@@ -1182,7 +1256,7 @@
ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
- "tcs_irq", drv);
+ drv->name, drv);
if (ret)
return ret;
@@ -1193,6 +1267,9 @@
for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
atomic_set(&drv->tcs_in_use[i], 0);
+ drv->ipc_log_ctx = ipc_log_context_create(TCS_DRV_IPC_LOG_SIZE,
+ drv->name, 0);
+
ret = mbox_controller_register(&drv->mbox);
if (ret)
return ret;
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 3b19017..eb4bdf6 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -646,6 +646,8 @@
android_verity_target.direct_access = dm_linear_direct_access,
android_verity_target.io_hints = NULL;
+ set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0);
+
err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
if (!err) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ac8235b..0d437c9 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -431,7 +431,7 @@
unsigned long flags;
struct priority_group *pg;
struct pgpath *pgpath;
- bool bypassed = true;
+ unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -470,7 +470,7 @@
*/
do {
list_for_each_entry(pg, &m->priority_groups, list) {
- if (pg->bypassed == bypassed)
+ if (pg->bypassed == !!bypassed)
continue;
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath)) {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index be869a9..0b678b5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1095,6 +1095,19 @@
return;
}
+ /*
+ * Increment the unmapped blocks. This prevents a race between the
+ * passdown io and reallocation of freed blocks.
+ */
+ r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+ bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
+
discard_parent = bio_alloc(GFP_NOIO, 1);
if (!discard_parent) {
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@@ -1115,19 +1128,6 @@
end_discard(&op, r);
}
}
-
- /*
- * Increment the unmapped blocks. This prevents a race between the
- * passdown io and reallocation of freed blocks.
- */
- r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
- if (r) {
- metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
- bio_io_error(m->bio);
- cell_defer_no_holder(tc, m->cell);
- mempool_free(m, pool->mapping_pool);
- return;
- }
}
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eddd360..8ebf1b9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1861,7 +1861,7 @@
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2270,7 +2270,7 @@
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5..81a7875 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1073,7 +1073,7 @@
*/
DEFINE_WAIT(w);
for (;;) {
- flush_signals(current);
+ sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
@@ -1082,7 +1082,10 @@
!md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f34ad2b..8f117d6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5300,12 +5300,15 @@
* userspace, we want an interruptible
* wait.
*/
- flush_signals(current);
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
+ sigset_t full, old;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
do_prepare = true;
}
goto retry;
@@ -7557,12 +7560,10 @@
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index aeda2b6..fbe0165 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -40,6 +40,8 @@
* Common definitions
*/
+#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */
+
/*
* DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
*/
@@ -61,6 +63,104 @@
#endif
/*
+ * enum dmx_success: Success codes for the Demux Callback API.
+ */
+enum dmx_success {
+ DMX_OK = 0, /* Received Ok */
+ DMX_OK_PES_END, /* Received OK, data reached end of PES packet */
+ DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */
+ DMX_OK_EOS, /* Received OK, reached End-of-Stream (EOS) */
+ DMX_OK_MARKER, /* Received OK, reached a data Marker */
+ DMX_LENGTH_ERROR, /* Incorrect length */
+ DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */
+ DMX_CRC_ERROR, /* Incorrect CRC */
+ DMX_FRAME_ERROR, /* Frame alignment error */
+ DMX_FIFO_ERROR, /* Receiver FIFO overrun */
+ DMX_MISSED_ERROR, /* Receiver missed packet */
+ DMX_OK_DECODER_BUF, /* Received OK, new ES data in decoder buffer */
+ DMX_OK_IDX, /* Received OK, new index event */
+ DMX_OK_SCRAMBLING_STATUS, /* Received OK, new scrambling status */
+};
+
+
+/*
+ * struct dmx_data_ready: Parameters for event notification callback.
+ * Event notification notifies demux device that data is written
+ * and available in the device's output buffer or provides
+ * notification on errors and other events. In the latter case
+ * data_length is zero.
+ */
+struct dmx_data_ready {
+ enum dmx_success status;
+
+ /*
+ * data_length may be 0 in case of DMX_OK_PES_END or DMX_OK_EOS
+ * and in non-DMX_OK_XXX events. In DMX_OK_PES_END,
+ * data_length is for data coming after the end of PES.
+ */
+ int data_length;
+
+ union {
+ struct {
+ int start_gap;
+ int actual_length;
+ int disc_indicator_set;
+ int pes_length_mismatch;
+ u64 stc;
+ u32 tei_counter;
+ u32 cont_err_counter;
+ u32 ts_packets_num;
+ } pes_end;
+
+ struct {
+ u64 pcr;
+ u64 stc;
+ int disc_indicator_set;
+ } pcr;
+
+ struct {
+ int handle;
+ int cookie;
+ u32 offset;
+ u32 len;
+ int pts_exists;
+ u64 pts;
+ int dts_exists;
+ u64 dts;
+ u32 tei_counter;
+ u32 cont_err_counter;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 stc;
+ } buf;
+
+ struct {
+ u64 id;
+ } marker;
+
+ struct dmx_index_event_info idx_event;
+ struct dmx_scrambling_status_event_info scrambling_bits;
+ };
+};
+
+/*
+ * struct data_buffer: Parameters of buffer allocated by
+ * demux device for input/output. Can be used to directly map the
+ * demux-device buffer to HW output if HW supports it.
+ */
+struct data_buffer {
+ /* dvb_ringbuffer managed by demux-device */
+ const struct dvb_ringbuffer *ringbuff;
+
+
+ /*
+ * Private handle returned by kernel demux when
+ * map_buffer is called in case external buffer
+ * is used. NULL if buffer is allocated internally.
+ */
+ void *priv_handle;
+};
+/*
* TS packet reception
*/
@@ -95,10 +195,18 @@
* Using this API, the client can set the filtering properties to start/stop
* filtering TS packets on a particular TS feed.
*/
+struct dmx_ts_feed;
+
+typedef int (*dmx_ts_data_ready_cb)(
+ struct dmx_ts_feed *source,
+ struct dmx_data_ready *dmx_data_ready);
+
struct dmx_ts_feed {
int is_filtering;
struct dmx_demux *parent;
+ struct data_buffer buffer;
void *priv;
+ struct dmx_decoder_buffers *decoder_buffers;
int (*set)(struct dmx_ts_feed *feed,
u16 pid,
int type,
@@ -107,6 +215,34 @@
ktime_t timeout);
int (*start_filtering)(struct dmx_ts_feed *feed);
int (*stop_filtering)(struct dmx_ts_feed *feed);
+ int (*set_video_codec)(struct dmx_ts_feed *feed,
+ enum dmx_video_codec video_codec);
+ int (*set_idx_params)(struct dmx_ts_feed *feed,
+ struct dmx_indexing_params *idx_params);
+ int (*get_decoder_buff_status)(
+ struct dmx_ts_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+ int (*reuse_decoder_buffer)(
+ struct dmx_ts_feed *feed,
+ int cookie);
+ int (*data_ready_cb)(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_ts_feed *feed,
+ u32 bytes_num);
+ int (*set_tsp_out_format)(struct dmx_ts_feed *feed,
+ enum dmx_tsp_format_t tsp_format);
+ int (*set_secure_mode)(struct dmx_ts_feed *feed,
+ struct dmx_secure_mode *sec_mode);
+ int (*set_cipher_ops)(struct dmx_ts_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+ int (*oob_command)(struct dmx_ts_feed *feed,
+ struct dmx_oob_command *cmd);
+ int (*ts_insertion_init)(struct dmx_ts_feed *feed);
+ int (*ts_insertion_terminate)(struct dmx_ts_feed *feed);
+ int (*ts_insertion_insert_buffer)(struct dmx_ts_feed *feed,
+ char *data, size_t size);
+ int (*get_scrambling_bits)(struct dmx_ts_feed *feed, u8 *value);
+ int (*flush_buffer)(struct dmx_ts_feed *feed, size_t length);
};
/*
@@ -131,14 +267,21 @@
* corresponding bits are compared. The filter only accepts sections that are
* equal to filter_value in all the tested bit positions.
*/
+
+struct dmx_section_feed;
struct dmx_section_filter {
u8 filter_value[DMX_MAX_FILTER_SIZE];
u8 filter_mask[DMX_MAX_FILTER_SIZE];
u8 filter_mode[DMX_MAX_FILTER_SIZE];
struct dmx_section_feed *parent; /* Back-pointer */
+ struct data_buffer buffer;
void *priv; /* Pointer to private data of the API client */
};
+typedef int (*dmx_section_data_ready_cb)(
+ struct dmx_section_filter *source,
+ struct dmx_data_ready *dmx_data_ready);
+
/**
* struct dmx_section_feed - Structure that contains a section feed filter
*
@@ -189,8 +332,24 @@
struct dmx_section_filter *filter);
int (*start_filtering)(struct dmx_section_feed *feed);
int (*stop_filtering)(struct dmx_section_feed *feed);
+ int (*data_ready_cb)(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback);
+ int (*notify_data_read)(struct dmx_section_filter *filter,
+ u32 bytes_num);
+ int (*set_secure_mode)(struct dmx_section_feed *feed,
+ struct dmx_secure_mode *sec_mode);
+ int (*set_cipher_ops)(struct dmx_section_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+ int (*oob_command)(struct dmx_section_feed *feed,
+ struct dmx_oob_command *cmd);
+ int (*get_scrambling_bits)(struct dmx_section_feed *feed, u8 *value);
+ int (*flush_buffer)(struct dmx_section_feed *feed, size_t length);
};
+/*
+ * Callback functions
+ */
+
/**
* typedef dmx_ts_cb - DVB demux TS filter callback function prototype
*
@@ -295,9 +454,19 @@
size_t buffer2_len,
struct dmx_section_filter *source);
-/*
- * DVB Front-End
- */
+typedef int (*dmx_ts_fullness) (
+ struct dmx_ts_feed *source,
+ int required_space,
+ int wait);
+
+typedef int (*dmx_section_fullness) (
+ struct dmx_section_filter *source,
+ int required_space,
+ int wait);
+
+/*--------------------------------------------------------------------------*/
+/* DVB Front-End */
+/*--------------------------------------------------------------------------*/
/**
* enum dmx_frontend_source - Used to identify the type of frontend
@@ -312,6 +481,13 @@
enum dmx_frontend_source {
DMX_MEMORY_FE,
DMX_FRONTEND_0,
+ DMX_FRONTEND_1,
+ DMX_FRONTEND_2,
+ DMX_FRONTEND_3,
+ DMX_STREAM_0, /* external stream input, e.g. LVDS */
+ DMX_STREAM_1,
+ DMX_STREAM_2,
+ DMX_STREAM_3
};
/**
@@ -345,14 +521,24 @@
*/
enum dmx_demux_caps {
DMX_TS_FILTERING = 1,
+ DMX_PES_FILTERING = 2,
DMX_SECTION_FILTERING = 4,
DMX_MEMORY_BASED_FILTERING = 8,
+ DMX_CRC_CHECKING = 16,
+ DMX_TS_DESCRAMBLING = 32
};
/*
* Demux resource type identifier.
*/
+/*
+ * DMX_FE_ENTRY(): Casts elements in the list of registered
+ * front-ends from the generic type struct list_head
+ * to the type * struct dmx_frontend.
+ *
+ */
+
/**
* DMX_FE_ENTRY - Casts elements in the list of registered
* front-ends from the generic type struct list_head
@@ -557,6 +743,10 @@
enum dmx_demux_caps capabilities;
struct dmx_frontend *frontend;
void *priv;
+ struct data_buffer dvr_input; /* DVR input buffer */
+ int dvr_input_protected;
+ struct dentry *debugfs_demux_dir; /* debugfs dir */
+
int (*open)(struct dmx_demux *demux);
int (*close)(struct dmx_demux *demux);
int (*write)(struct dmx_demux *demux, const char __user *buf,
@@ -582,15 +772,31 @@
int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
- /* private: */
+ int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
- /*
- * Only used at av7110, to read some data from firmware.
- * As this was never documented, we have no clue about what's
- * there, and its usage on other drivers aren't encouraged.
- */
+ int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
+
+ int (*set_tsp_format)(struct dmx_demux *demux,
+ enum dmx_tsp_format_t tsp_format);
+
+ int (*set_playback_mode)(struct dmx_demux *demux,
+ enum dmx_playback_mode_t mode,
+ dmx_ts_fullness ts_fullness_callback,
+ dmx_section_fullness sec_fullness_callback);
+
+ int (*write_cancel)(struct dmx_demux *demux);
+
int (*get_stc)(struct dmx_demux *demux, unsigned int num,
u64 *stc, unsigned int *base);
+
+ int (*map_buffer)(struct dmx_demux *demux,
+ struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **mem);
+
+ int (*unmap_buffer)(struct dmx_demux *demux,
+ void *priv_handle);
+
+ int (*get_tsp_size)(struct dmx_demux *demux);
};
#endif /* #ifndef __DEMUX_H */
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 7b67e1d..f2d39a9 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -28,15 +28,74 @@
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/mm.h>
#include "dmxdev.h"
-static int debug;
+static int overflow_auto_flush = 1;
+module_param(overflow_auto_flush, int, 0644);
+MODULE_PARM_DESC(overflow_auto_flush,
+ "Automatically flush buffer on overflow (default: on)");
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
-#define dprintk if (debug) printk
+static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size,
+ u32 size_align)
+{
+ if (size_align)
+ return size <= max_size && !(size % size_align);
+ else
+ return size <= max_size;
+}
+
+static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter)
+{
+ struct dmx_caps caps;
+ size_t size = filter->buffer.size;
+
+ /*
+ * For backward compatibility, if no demux capabilities can
+ * be retrieved assume size is ok.
+ * Decoder filter buffer size is verified when decoder buffer is set.
+ */
+ if (filter->dev->demux->get_caps) {
+ filter->dev->demux->get_caps(filter->dev->demux, &caps);
+
+ if (filter->type == DMXDEV_TYPE_SEC)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.section.max_size,
+ caps.section.size_alignment);
+
+ if (filter->params.pes.output == DMX_OUT_TAP)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.pes.max_size,
+ caps.pes.size_alignment);
+
+ size = (filter->params.pes.output == DMX_OUT_TS_TAP) ?
+ filter->dev->dvr_buffer.size : size;
+
+ if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP ||
+ filter->params.pes.output == DMX_OUT_TS_TAP) {
+ if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.recording_188_tsp.max_size,
+ caps.recording_188_tsp.size_alignment);
+
+ return dvb_dmxdev_verify_buffer_size(
+ size,
+ caps.recording_192_tsp.max_size,
+ caps.recording_192_tsp.size_alignment);
+ }
+ }
+
+ return 1;
+}
static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
const u8 *src, size_t len)
@@ -50,16 +109,401 @@
free = dvb_ringbuffer_free(buf);
if (len > free) {
- dprintk("dmxdev: buffer overflow\n");
+ pr_debug("dmxdev: buffer overflow\n");
return -EOVERFLOW;
}
return dvb_ringbuffer_write(buf, src, len);
}
-static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
- int non_blocking, char __user *buf,
- size_t count, loff_t *ppos)
+static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
+ int bytes_read)
+{
+ if (!filter)
+ return;
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ if (filter->feed.sec.feed->notify_data_read)
+ filter->feed.sec.feed->notify_data_read(
+ filter->filter.sec,
+ bytes_read);
+ } else {
+ struct dmxdev_feed *feed;
+
+ /*
+ * All feeds of same demux-handle share the same output
+ * buffer, it is enough to notify on the buffer status
+ * on one of the feeds
+ */
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (feed->ts->notify_data_read)
+ feed->ts->notify_data_read(
+ feed->ts,
+ bytes_read);
+ }
+}
+
+static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
+{
+ index++;
+ if (index >= DMX_EVENT_QUEUE_SIZE)
+ index = 0;
+
+ return index;
+}
+
+static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events)
+{
+ int new_write_index;
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index)
+ return 1;
+
+ return 0;
+
+}
+
+static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
+{
+ events->read_index = 0;
+ events->write_index = 0;
+ events->notified_index = 0;
+ events->bytes_read_no_event = 0;
+ events->current_event_data_size = 0;
+ events->wakeup_events_counter = 0;
+}
+
+static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
+ struct dmxdev_events_queue *events)
+{
+ dvb_dmxdev_flush_events(events);
+ dvb_ringbuffer_flush(buffer);
+}
+
+static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.pes.total_length <= bytes_read)
+ return event->params.pes.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.pes.total_length -= bytes_read;
+
+ start_delta = event->params.pes.start_offset -
+ event->params.pes.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.pes.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.pes.start_offset += start_delta;
+ event->params.pes.actual_length -= start_delta;
+
+ event->params.pes.base_offset =
+ event->params.pes.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ int start_delta;
+
+ if (event->params.section.total_length <= bytes_read)
+ return event->params.section.total_length;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+
+ event->params.section.total_length -= bytes_read;
+
+ start_delta = event->params.section.start_offset -
+ event->params.section.base_offset;
+
+ if (bytes_read <= start_delta) {
+ event->params.section.base_offset +=
+ bytes_read;
+ } else {
+ start_delta =
+ bytes_read - start_delta;
+
+ event->params.section.start_offset += start_delta;
+ event->params.section.actual_length -= start_delta;
+
+ event->params.section.base_offset =
+ event->params.section.start_offset;
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
+ int bytes_read)
+{
+ if (event->params.recording_chunk.size <= bytes_read)
+ return event->params.recording_chunk.size;
+
+ /*
+ * only part of the data relevant to this event was read.
+ * Update the event's information to reflect the new state.
+ */
+ event->params.recording_chunk.size -= bytes_read;
+ event->params.recording_chunk.offset += bytes_read;
+
+ return 0;
+}
+
+static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ int res;
+ int new_write_index;
+ int data_event;
+
+ /* Check if the event is disabled */
+ if (events->event_mask.disable_mask & event->type)
+ return 0;
+
+ /* Check if we are adding an event that user already read its data */
+ if (events->bytes_read_no_event) {
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ events->bytes_read_no_event);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event,
+ events->bytes_read_no_event);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was fully
+ * consumed already, discard event.
+ */
+ events->bytes_read_no_event -= res;
+ return 0;
+ }
+ events->bytes_read_no_event = 0;
+ } else {
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ return 0;
+ }
+ }
+
+ new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
+ if (new_write_index == events->read_index) {
+ pr_err("dmxdev: events overflow\n");
+ return -EOVERFLOW;
+ }
+
+ events->queue[events->write_index] = *event;
+ events->write_index = new_write_index;
+
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter++;
+
+ return 0;
+}
+
+static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
+ struct dmx_filter_event *event)
+{
+ if (events->notified_index == events->write_index)
+ return -ENODATA;
+
+ *event = events->queue[events->notified_index];
+
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(events->notified_index);
+
+ if (!(events->event_mask.no_wakeup_mask & event->type))
+ events->wakeup_events_counter--;
+
+ return 0;
+}
+
+static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
+ int bytes_read)
+{
+ struct dmx_filter_event *event;
+ int res;
+ int data_event;
+
+ /*
+ * If data events are not enabled on this filter,
+ * there's nothing to update.
+ */
+ if (events->data_read_event_masked)
+ return 0;
+
+ /*
+ * Go through all events that were notified and
+ * remove them from the events queue if their respective
+ * data was read.
+ */
+ while ((events->read_index != events->notified_index) &&
+ (bytes_read)) {
+ event = events->queue + events->read_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->read_index =
+ dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ /*
+ * non-data event was already notified,
+ * no need to keep it
+ */
+ events->read_index = dvb_dmxdev_advance_event_idx(
+ events->read_index);
+ }
+ }
+
+ if (!bytes_read)
+ return 0;
+
+ /*
+ * If we reached here it means:
+ * bytes_read != 0
+ * events->read_index == events->notified_index
+ * Check if there are pending events in the queue
+ * which the user didn't read while their relevant data
+ * was read.
+ */
+ while ((events->notified_index != events->write_index) &&
+ (bytes_read)) {
+ event = events->queue + events->notified_index;
+
+ data_event = 1;
+
+ if (event->type == DMX_EVENT_NEW_PES)
+ res = dvb_dmxdev_update_pes_event(event, bytes_read);
+ else if (event->type == DMX_EVENT_NEW_SECTION)
+ res = dvb_dmxdev_update_section_event(event,
+ bytes_read);
+ else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
+ res = dvb_dmxdev_update_rec_event(event, bytes_read);
+ else
+ data_event = 0;
+
+ if (data_event) {
+ if (res) {
+ /*
+ * Data relevant to this event was
+ * fully consumed, remove it from the queue.
+ */
+ bytes_read -= res;
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ if (!(events->event_mask.no_wakeup_mask &
+ event->type))
+ events->wakeup_events_counter--;
+ } else {
+ bytes_read = 0;
+ }
+ } else {
+ if (bytes_read) {
+ /*
+ * data was read beyond the non-data event,
+ * making it not relevant anymore
+ */
+ events->notified_index =
+ dvb_dmxdev_advance_event_idx(
+ events->notified_index);
+ if (!(events->event_mask.no_wakeup_mask &
+ event->type))
+ events->wakeup_events_counter--;
+ }
+ }
+
+ events->read_index = events->notified_index;
+ }
+
+ /*
+ * Check if data was read without having a respective
+ * event in the events-queue
+ */
+ if (bytes_read)
+ events->bytes_read_no_event += bytes_read;
+
+ return 0;
+}
+
+static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *src)
+{
+ int data_status_change;
+
+ if (filter)
+ if (mutex_lock_interruptible(&filter->mutex))
+ return -ERESTARTSYS;
+
+ if (!src->data ||
+ !dvb_ringbuffer_empty(src) ||
+ src->error ||
+ (filter &&
+ (filter->state != DMXDEV_STATE_GO) &&
+ (filter->state != DMXDEV_STATE_DONE)))
+ data_status_change = 1;
+ else
+ data_status_change = 0;
+
+ if (filter)
+ mutex_unlock(&filter->mutex);
+
+ return data_status_change;
+}
+
+static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *src,
+ int non_blocking, char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t todo;
ssize_t avail;
@@ -70,7 +514,7 @@
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
return ret;
}
@@ -80,15 +524,35 @@
break;
}
+ if (filter) {
+ if ((filter->state == DMXDEV_STATE_DONE) &&
+ dvb_ringbuffer_empty(src))
+ break;
+
+ mutex_unlock(&filter->mutex);
+ }
+
ret = wait_event_interruptible(src->queue,
- !dvb_ringbuffer_empty(src) ||
- (src->error != 0));
+ dvb_dmxdev_check_data(filter, src));
+
+ if (filter) {
+ if (mutex_lock_interruptible(&filter->mutex))
+ return -ERESTARTSYS;
+
+ if ((filter->state != DMXDEV_STATE_GO) &&
+ (filter->state != DMXDEV_STATE_DONE))
+ return -ENODEV;
+ }
+
if (ret < 0)
break;
+ if (!src->data)
+ return 0;
+
if (src->error) {
ret = src->error;
- dvb_ringbuffer_flush(src);
+ src->error = 0;
break;
}
@@ -103,6 +567,9 @@
buf += ret;
}
+ if (count - todo) /* some data was read? */
+ wake_up_all(&src->queue);
+
return (count - todo) ? (count - todo) : ret;
}
@@ -120,13 +587,238 @@
return NULL;
}
+static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
+{
+ int i;
+ struct dmxdev_filter *filter;
+ struct dmxdev_feed *feed;
+
+ for (i = 0; i < dmxdev->filternum; i++) {
+ filter = &dmxdev->filter[i];
+ if (!filter || filter->state != DMXDEV_STATE_GO)
+ continue;
+
+ switch (filter->type) {
+ case DMXDEV_TYPE_SEC:
+ filter->feed.sec.feed->oob_command(
+ filter->feed.sec.feed, cmd);
+ break;
+ case DMXDEV_TYPE_PES:
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+ feed->ts->oob_command(feed->ts, cmd);
+ break;
+ case DMXDEV_TYPE_NONE:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
+{
+ int ret = 0;
+ size_t todo;
+ int bytes_written = 0;
+ size_t split;
+ size_t tsp_size;
+ u8 *data_start;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+
+ todo = dvr_cmd->cmd.data_feed_count;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ while (todo >= tsp_size) {
+ /* wait for input */
+ ret = wait_event_interruptible(
+ src->queue,
+ (dvb_ringbuffer_avail(src) >= tsp_size) ||
+ dmxdev->dvr_in_exit || src->error);
+
+ if (ret < 0)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = -ENODEV;
+ break;
+ }
+
+ if (src->error) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ wake_up_all(&src->queue);
+ ret = -EINVAL;
+ break;
+ }
+
+ dmxdev->dvr_processing_input = 1;
+
+ split = (src->pread + todo > src->size) ?
+ src->size - src->pread : 0;
+
+ /*
+ * In DVR PULL mode, write might block.
+ * Lock on DVR buffer is released before calling to
+ * write, if DVR was released meanwhile, dvr_in_exit is
+ * prompted. Lock is acquired when updating the read pointer
+ * again to preserve read/write pointers consistency.
+ *
+ * In protected input mode, DVR input buffer is not mapped
+ * to kernel memory. Underlying demux implementation
+ * should trigger HW to read from DVR input buffer
+ * based on current read offset.
+ */
+ if (split > 0) {
+ data_start = (dmxdev->demux->dvr_input_protected) ?
+ NULL : (src->data + src->pread);
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = dmxdev->demux->write(dmxdev->demux,
+ data_start,
+ split);
+
+ if (ret < 0) {
+ pr_err("dmxdev: dvr write error %d\n", ret);
+ continue;
+ }
+
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
+ if (ret < split) {
+ dmxdev->dvr_processing_input = 0;
+ spin_unlock(&dmxdev->dvr_in_lock);
+ wake_up_all(&src->queue);
+ continue;
+ }
+ }
+
+ data_start = (dmxdev->demux->dvr_input_protected) ?
+ NULL : (src->data + src->pread);
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+ ret = dmxdev->demux->write(dmxdev->demux,
+ data_start, todo);
+
+ if (ret < 0) {
+ pr_err("dmxdev: dvr write error %d\n", ret);
+ continue;
+ }
+
+ if (dmxdev->dvr_in_exit) {
+ ret = -ENODEV;
+ break;
+ }
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ todo -= ret;
+ bytes_written += ret;
+ DVB_RINGBUFFER_SKIP(src, ret);
+ dmxdev->dvr_processing_input = 0;
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&src->queue);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return bytes_written;
+}
+
+static int dvr_input_thread_entry(void *arg)
+{
+ struct dmxdev *dmxdev = arg;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command dvr_cmd;
+ int leftover = 0;
+ int ret;
+
+ while (1) {
+ /* wait for input */
+ ret = wait_event_interruptible(
+ cmdbuf->queue,
+ (!cmdbuf->data) ||
+ (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
+ (dmxdev->dvr_in_exit));
+
+ if (ret < 0)
+ break;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
+ spin_unlock(&dmxdev->dvr_in_lock);
+ break;
+ }
+
+ dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
+
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
+ dvr_cmd.cmd.data_feed_count += leftover;
+
+ ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
+ if (ret < 0) {
+ pr_debug("%s: DVR data feed failed, ret=%d\n",
+ __func__, ret);
+ continue;
+ }
+
+ leftover = dvr_cmd.cmd.data_feed_count - ret;
+ } else {
+ /*
+ * For EOS, try to process leftover data in the input
+ * buffer.
+ */
+ if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
+ struct dvr_command feed_cmd;
+
+ feed_cmd.type = DVR_DATA_FEED_CMD;
+ feed_cmd.cmd.data_feed_count =
+ dvb_ringbuffer_avail(
+ &dmxdev->dvr_input_buffer);
+ dvb_dvr_feed_cmd(dmxdev, &feed_cmd);
+ }
+
+ dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
+ }
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
static int dvb_dvr_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
struct dmx_frontend *front;
+ void *mem;
- dprintk("function : %s\n", __func__);
+ pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE));
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -144,21 +836,28 @@
}
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
- void *mem;
if (!dvbdev->readers) {
mutex_unlock(&dmxdev->mutex);
return -EBUSY;
}
- mem = vmalloc(DVR_BUFFER_SIZE);
+ mem = vmalloc_user(DVR_BUFFER_SIZE);
if (!mem) {
mutex_unlock(&dmxdev->mutex);
return -ENOMEM;
}
dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
- dvbdev->readers--;
- }
+ dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+ dmxdev->dvr_output_events.event_mask.disable_mask = 0;
+ dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
+ dmxdev->dvr_feeds_count = 0;
+ dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+ dmxdev->dvr_priv_buff_handle = NULL;
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ dvbdev->readers--;
+ } else if (!dvbdev->writers) {
+ dmxdev->dvr_in_exit = 0;
+ dmxdev->dvr_processing_input = 0;
dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
if (!dmxdev->demux->write) {
@@ -172,9 +871,51 @@
mutex_unlock(&dmxdev->mutex);
return -EINVAL;
}
+
+ mem = vmalloc_user(DVR_BUFFER_SIZE);
+ if (!mem) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
+
dmxdev->demux->disconnect_frontend(dmxdev->demux);
dmxdev->demux->connect_frontend(dmxdev->demux, front);
+ dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+ dvb_ringbuffer_init(&dmxdev->dvr_input_buffer,
+ mem,
+ DVR_BUFFER_SIZE);
+
+ dmxdev->demux->dvr_input.priv_handle = NULL;
+ dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
+ dmxdev->demux->dvr_input_protected = 0;
+ mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
+ if (!mem) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
+ dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
+ DVR_CMDS_BUFFER_SIZE);
+ dvbdev->writers--;
+
+ dmxdev->dvr_input_thread =
+ kthread_run(
+ dvr_input_thread_entry,
+ (void *)dmxdev,
+ "dvr_input");
+
+ if (IS_ERR(dmxdev->dvr_input_thread)) {
+ vfree(dmxdev->dvr_input_buffer.data);
+ vfree(dmxdev->dvr_cmd_buffer.data);
+ dmxdev->dvr_input_buffer.data = NULL;
+ dmxdev->dvr_cmd_buffer.data = NULL;
+ mutex_unlock(&dmxdev->mutex);
+ return -ENOMEM;
+ }
}
+
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
return 0;
@@ -187,11 +928,6 @@
mutex_lock(&dmxdev->mutex);
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
- dmxdev->demux->disconnect_frontend(dmxdev->demux);
- dmxdev->demux->connect_frontend(dmxdev->demux,
- dmxdev->dvr_orig_fe);
- }
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
dvbdev->readers++;
if (dmxdev->dvr_buffer.data) {
@@ -200,12 +936,100 @@
spin_lock_irq(&dmxdev->lock);
dmxdev->dvr_buffer.data = NULL;
spin_unlock_irq(&dmxdev->lock);
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ dmxdev->dvr_priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdev->dvr_priv_buff_handle);
+ dmxdev->dvr_priv_buff_handle = NULL;
+ }
+ } else {
+ int i;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_in_exit = 1;
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
+
+ /*
+ * There might be dmx filters reading now from DVR
+ * device, in PULL mode, they might be also stalled
+ * on output, signal to them that DVR is exiting.
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL) {
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ for (i = 0; i < dmxdev->filternum; i++)
+ if (dmxdev->filter[i].state == DMXDEV_STATE_GO)
+ wake_up_all(
+ &dmxdev->filter[i].buffer.queue);
+ }
+
+ /* notify kernel demux that we are canceling */
+ if (dmxdev->demux->write_cancel)
+ dmxdev->demux->write_cancel(dmxdev->demux);
+
+ /*
+ * Now stop dvr-input thread so that no one
+ * would process data from dvr input buffer any more
+ * before it gets freed.
+ */
+ kthread_stop(dmxdev->dvr_input_thread);
+
+ dvbdev->writers++;
+ dmxdev->demux->disconnect_frontend(dmxdev->demux);
+ dmxdev->demux->connect_frontend(dmxdev->demux,
+ dmxdev->dvr_orig_fe);
+
+ if (dmxdev->dvr_input_buffer.data) {
+ void *mem = dmxdev->dvr_input_buffer.data;
+ /*
+ * Ensure all the operations on the DVR input buffer
+ * are completed before it gets freed.
+ */
+ mb();
+ spin_lock_irq(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_input_buffer.data = NULL;
+ spin_unlock_irq(&dmxdev->dvr_in_lock);
+
+ if (dmxdev->dvr_input_buffer_mode ==
+ DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdev->dvr_input_buffer_mode ==
+ DMX_BUFFER_MODE_EXTERNAL) &&
+ (dmxdev->demux->dvr_input.priv_handle)) {
+ if (!dmxdev->demux->dvr_input_protected)
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdev->demux->dvr_input.priv_handle);
+ dmxdev->demux->dvr_input.priv_handle = NULL;
+ }
+
+ if (dmxdev->dvr_cmd_buffer.data) {
+ void *mem = dmxdev->dvr_cmd_buffer.data;
+ /*
+ * Ensure all the operations on the DVR command buffer
+ * are completed before it gets freed.
+ */
+ mb();
+ spin_lock_irq(&dmxdev->dvr_in_lock);
+ dmxdev->dvr_cmd_buffer.data = NULL;
+ spin_unlock_irq(&dmxdev->dvr_in_lock);
vfree(mem);
}
}
/* TODO */
dvbdev->users--;
if (dvbdev->users == 1 && dmxdev->exit == 1) {
+ fops_put(file->f_op);
+ file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dvbdev->wait_queue);
} else
@@ -214,17 +1038,21 @@
return 0;
}
-static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+
+static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct dvb_device *dvbdev = file->private_data;
+ struct dvb_device *dvbdev = filp->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
+ struct dvb_ringbuffer *buffer;
+ enum dmx_buffer_mode buffer_mode;
+ int vma_size;
+ int buffer_size;
int ret;
- if (!dmxdev->demux->write)
- return -EOPNOTSUPP;
- if ((file->f_flags & O_ACCMODE) != O_WRONLY)
+ if (((filp->f_flags & O_ACCMODE) == O_RDONLY) &&
+ (vma->vm_flags & VM_WRITE))
return -EINVAL;
+
if (mutex_lock_interruptible(&dmxdev->mutex))
return -ERESTARTSYS;
@@ -232,58 +1060,693 @@
mutex_unlock(&dmxdev->mutex);
return -ENODEV;
}
- ret = dmxdev->demux->write(dmxdev->demux, buf, count);
+
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
+ buffer = &dmxdev->dvr_buffer;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ } else {
+ buffer = &dmxdev->dvr_input_buffer;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ }
+
+ if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) {
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ vma_size = vma->vm_end - vma->vm_start;
+
+ /* Make sure requested mapping is not larger than buffer size */
+ buffer_size = buffer->size + (PAGE_SIZE-1);
+ buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+ if (vma_size != buffer_size) {
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, buffer->data, 0);
+ if (ret) {
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+
mutex_unlock(&dmxdev->mutex);
return ret;
}
+static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+ int last_dvr_cmd;
+
+ spin_lock(&dmxdev->dvr_in_lock);
+
+ /* Peek at the last DVR command queued, try to coalesce FEED commands */
+ if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
+ last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
+ if (last_dvr_cmd < 0)
+ last_dvr_cmd += cmdbuf->size;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
+ if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
+ dvr_cmd->cmd.data_feed_count += count;
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+ }
+
+ /*
+ * We assume command buffer is large enough so that overflow should not
+ * happen. Overflow to the command buffer means data previously written
+ * to the input buffer is 'orphan' - does not have a matching FEED
+ * command. Issue a warning if this ever happens.
+ * Orphan data might still be processed if EOS is issued.
+ */
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
+ pr_err("%s: DVR command buffer overflow\n", __func__);
+ spin_unlock(&dmxdev->dvr_in_lock);
+ return;
+ }
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_DATA_FEED_CMD;
+ dvr_cmd->cmd.data_feed_count = count;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ spin_unlock(&dmxdev->dvr_in_lock);
+
+ wake_up_all(&cmdbuf->queue);
+}
+
+static int dvb_dvr_external_input_only(struct dmxdev *dmxdev)
+{
+ struct dmx_caps caps;
+ int is_external_only;
+ int flags;
+ size_t tsp_size;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ /*
+ * For backward compatibility, default assumes that
+ * external only buffers are not supported.
+ */
+ flags = 0;
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (tsp_size == 188)
+ flags = caps.playback_188_tsp.flags;
+ else
+ flags = caps.playback_192_tsp.flags;
+ }
+
+ if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+ (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+ is_external_only = 1;
+ else
+ is_external_only = 0;
+
+ return is_external_only;
+}
+
+static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ unsigned long size)
+{
+ struct dmx_caps caps;
+ int tsp_size;
+
+ if (!dmxdev->demux->get_caps)
+ return 1;
+
+ if (dmxdev->demux->get_tsp_size)
+ tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
+ else
+ tsp_size = 188;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if ((f_flags & O_ACCMODE) == O_RDONLY)
+ return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+ caps.recording_188_tsp.max_size,
+ caps.recording_188_tsp.size_alignment)) ||
+ (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+ caps.recording_192_tsp.max_size,
+ caps.recording_192_tsp.size_alignment));
+
+ return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
+ caps.playback_188_tsp.max_size,
+ caps.playback_188_tsp.size_alignment)) ||
+ (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
+ caps.playback_192_tsp.max_size,
+ caps.playback_192_tsp.size_alignment));
+}
+
+static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct dmxdev *dmxdev = dvbdev->priv;
+ struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ int ret;
+ size_t todo;
+ ssize_t free_space;
+
+ if (!dmxdev->demux->write)
+ return -EOPNOTSUPP;
+
+ if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) ||
+ ((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+ !src->data || !cmdbuf->data ||
+ (dvb_dvr_external_input_only(dmxdev) &&
+ (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL)))
+ return -EINVAL;
+
+ if ((file->f_flags & O_NONBLOCK) &&
+ (dvb_ringbuffer_free(src) == 0))
+ return -EWOULDBLOCK;
+
+ ret = 0;
+ for (todo = count; todo > 0; todo -= ret) {
+ ret = wait_event_interruptible(src->queue,
+ (dvb_ringbuffer_free(src)) ||
+ !src->data || !cmdbuf->data ||
+ (src->error != 0) || dmxdev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
+ if ((!src->data) || (!cmdbuf->data)) {
+ mutex_unlock(&dmxdev->mutex);
+ return 0;
+ }
+
+ if (dmxdev->exit || dmxdev->dvr_in_exit) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ENODEV;
+ }
+
+ if (src->error) {
+ ret = src->error;
+ dvb_ringbuffer_flush(src);
+ mutex_unlock(&dmxdev->mutex);
+ wake_up_all(&src->queue);
+ return ret;
+ }
+
+ free_space = dvb_ringbuffer_free(src);
+
+ if (free_space > todo)
+ free_space = todo;
+
+ ret = dvb_ringbuffer_write_user(src, buf, free_space);
+
+ if (ret < 0) {
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ buf += ret;
+
+ dvb_dvr_queue_data_feed(dmxdev, ret);
+
+ mutex_unlock(&dmxdev->mutex);
+ }
+
+ return (count - todo) ? (count - todo) : ret;
+}
+
+static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ struct dvb_ringbuffer *buffer = &filter->buffer;
+ struct dmxdev_events_queue *events = &filter->events;
+
+ if (filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP) {
+ buffer = &filter->dev->dvr_buffer;
+ events = &filter->dev->dvr_output_events;
+ }
+
+ /*
+ * Drop 'length' pending data bytes from the ringbuffer and update
+ * event queue accordingly, similarly to dvb_dmxdev_release_data().
+ */
+ spin_lock_irqsave(&filter->dev->lock, flags);
+ DVB_RINGBUFFER_SKIP(buffer, length);
+ buffer->error = 0;
+ dvb_dmxdev_flush_events(events);
+ events->current_event_start_offset = buffer->pwrite;
+ spin_unlock_irqrestore(&filter->dev->lock, flags);
+
+ if (filter->type == DMXDEV_TYPE_PES) {
+ struct dmxdev_feed *feed;
+
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (feed->ts->flush_buffer)
+ return feed->ts->flush_buffer(feed->ts, length);
+ } else if (filter->type == DMXDEV_TYPE_SEC &&
+ filter->feed.sec.feed->flush_buffer) {
+ return filter->feed.sec.feed->flush_buffer(
+ filter->feed.sec.feed, length);
+ }
+
+ return ret;
+}
+
+static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter,
+ struct dvb_ringbuffer *buf)
+{
+ size_t flush_len;
+
+ /*
+ * When buffer overflowed, demux-dev marked the buffer in
+ * error state. If auto-flush is enabled discard current
+ * pending data in buffer.
+ */
+ if (overflow_auto_flush) {
+ flush_len = dvb_ringbuffer_avail(buf);
+ dvb_dmxdev_flush_data(filter, flush_len);
+ }
+}
+
static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
+ ssize_t res;
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
+ unsigned long flags;
if (dmxdev->exit)
return -ENODEV;
- return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags,
+ dmxdev->dvr_buffer.size))
+ return -EINVAL;
+
+ res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer,
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (res > 0) {
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
+ spin_lock_irqsave(&dmxdev->lock, flags);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
+ spin_unlock_irqrestore(&dmxdev->lock, flags);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+ } else if (res == -EOVERFLOW) {
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+ &dmxdev->dvr_buffer);
+ }
+
+ return res;
+}
+
+/*
+ * dvb_dvr_push_oob_cmd
+ *
+ * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
+ struct dvr_command *dvr_cmd;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY ||
+ dmxdev->source < DMX_SOURCE_DVR0)
+ return -EPERM;
+
+ if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
+ return -ENOMEM;
+
+ dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
+ dvr_cmd->type = DVR_OOB_CMD;
+ dvr_cmd->cmd.oobcmd = *cmd;
+ DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
+ wake_up_all(&cmdbuf->queue);
+
+ return 0;
+}
+
+static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags)
+{
+ size_t flush_len;
+ int ret;
+
+ if ((f_flags & O_ACCMODE) != O_RDONLY)
+ return -EINVAL;
+
+ flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+ ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len);
+
+ return ret;
}
static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
- unsigned long size)
+ unsigned int f_flags,
+ unsigned long size)
{
- struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
+ struct dvb_ringbuffer *buf;
void *newmem;
void *oldmem;
+ spinlock_t *lock;
+ enum dmx_buffer_mode buffer_mode;
- dprintk("function : %s\n", __func__);
+ pr_debug("function : %s\n", __func__);
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ }
if (buf->size == size)
return 0;
- if (!size)
+ if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
return -EINVAL;
- newmem = vmalloc(size);
+ newmem = vmalloc_user(size);
if (!newmem)
return -ENOMEM;
oldmem = buf->data;
- spin_lock_irq(&dmxdev->lock);
+ spin_lock_irq(lock);
+
+ if (((f_flags & O_ACCMODE) != O_RDONLY) &&
+ (dmxdev->dvr_processing_input)) {
+ spin_unlock_irq(lock);
+ vfree(oldmem);
+ return -EBUSY;
+ }
+
buf->data = newmem;
buf->size = size;
/* reset and not flush in case the buffer shrinks */
dvb_ringbuffer_reset(buf);
- spin_unlock_irq(&dmxdev->lock);
+
+ spin_unlock_irq(lock);
vfree(oldmem);
return 0;
}
+static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev,
+ unsigned int f_flags, enum dmx_buffer_mode mode)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+ enum dmx_buffer_mode *buffer_mode;
+ void **buff_handle;
+ void *oldmem;
+ int *is_protected;
+
+ if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+ (mode != DMX_BUFFER_MODE_EXTERNAL))
+ return -EINVAL;
+
+ if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+ return -EINVAL;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = &dmxdev->dvr_buffer_mode;
+ buff_handle = &dmxdev->dvr_priv_buff_handle;
+ is_protected = NULL;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = &dmxdev->dvr_input_buffer_mode;
+ buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+ is_protected = &dmxdev->demux->dvr_input_protected;
+ }
+
+ if (mode == *buffer_mode)
+ return 0;
+
+ oldmem = buf->data;
+ spin_lock_irq(lock);
+ buf->data = NULL;
+ spin_unlock_irq(lock);
+
+ *buffer_mode = mode;
+
+ if (mode == DMX_BUFFER_MODE_INTERNAL) {
+ /* switched from external to internal */
+ if (*buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ *buff_handle);
+ *buff_handle = NULL;
+ }
+
+ if (is_protected)
+ *is_protected = 0;
+
+ /* set default internal buffer */
+ dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE);
+ } else if (oldmem) {
+ /* switched from internal to external */
+ vfree(oldmem);
+ }
+
+ return 0;
+}
+
+static int dvb_dvr_set_buffer(struct dmxdev *dmxdev,
+ unsigned int f_flags, struct dmx_buffer *dmx_buffer)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+ enum dmx_buffer_mode buffer_mode;
+ void **buff_handle;
+ void *newmem;
+ void *oldmem;
+ int *is_protected;
+ struct dmx_caps caps;
+
+ if (dmxdev->demux->get_caps)
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ else
+ caps.caps = 0;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ buffer_mode = dmxdev->dvr_buffer_mode;
+ buff_handle = &dmxdev->dvr_priv_buff_handle;
+ is_protected = NULL;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ buffer_mode = dmxdev->dvr_input_buffer_mode;
+ buff_handle = &dmxdev->demux->dvr_input.priv_handle;
+ is_protected = &dmxdev->demux->dvr_input_protected;
+ if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) &&
+ dmx_buffer->is_protected)
+ return -EINVAL;
+ }
+
+ if (!dmx_buffer->size ||
+ (buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+ return -EINVAL;
+
+ oldmem = *buff_handle;
+
+ /*
+ * Protected buffer is relevant only for DVR input buffer
+ * when DVR device is opened for write. In such case,
+ * buffer is mapped only if the buffer is not protected one.
+ */
+ if (!is_protected || !dmx_buffer->is_protected) {
+ if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer,
+ buff_handle, &newmem))
+ return -ENOMEM;
+ } else {
+ newmem = NULL;
+ *buff_handle = NULL;
+ }
+
+ spin_lock_irq(lock);
+ buf->data = newmem;
+ buf->size = dmx_buffer->size;
+ if (is_protected)
+ *is_protected = dmx_buffer->is_protected;
+ dvb_ringbuffer_reset(buf);
+ spin_unlock_irq(lock);
+
+ if (oldmem)
+ dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+ return 0;
+}
+
+static int dvb_dvr_get_event(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_filter_event *event)
+{
+ int res = 0;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ spin_lock_irq(&dmxdev->lock);
+
+ if (dmxdev->dvr_buffer.error == -EOVERFLOW) {
+ event->type = DMX_EVENT_BUFFER_OVERFLOW;
+ dmxdev->dvr_buffer.error = 0;
+ } else {
+ res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events,
+ event);
+ if (res) {
+ spin_unlock_irq(&dmxdev->lock);
+ return res;
+ }
+ }
+
+ spin_unlock_irq(&dmxdev->lock);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
+ &dmxdev->dvr_buffer);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+
+ return res;
+}
+
+static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_ringbuffer *buf;
+ spinlock_t *lock;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY) {
+ buf = &dmxdev->dvr_buffer;
+ lock = &dmxdev->lock;
+ } else {
+ buf = &dmxdev->dvr_input_buffer;
+ lock = &dmxdev->dvr_in_lock;
+ }
+
+ spin_lock_irq(lock);
+
+ dmx_buffer_status->error = buf->error;
+ dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+ dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+ dmx_buffer_status->read_offset = buf->pread;
+ dmx_buffer_status->write_offset = buf->pwrite;
+ dmx_buffer_status->size = buf->size;
+ buf->error = 0;
+
+ spin_unlock_irq(lock);
+
+ if (dmx_buffer_status->error == -EOVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf);
+
+ return 0;
+}
+
+static int dvb_dvr_release_data(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ u32 bytes_count)
+{
+ ssize_t buff_fullness;
+
+ if (!((f_flags & O_ACCMODE) == O_RDONLY))
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
+
+ if (bytes_count > buff_fullness)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
+
+ dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
+ spin_lock_irq(&dmxdev->lock);
+ dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
+ spin_unlock_irq(&dmxdev->lock);
+
+ wake_up_all(&dmxdev->dvr_buffer.queue);
+ return 0;
+}
+
+/*
+ * dvb_dvr_feed_data - Notify new data in DVR input buffer
+ *
+ * @dmxdev - demux device instance
+ * @f_flags - demux device file flag (access mode)
+ * @bytes_count - how many bytes were written to the input buffer
+ *
+ * Note: this function assume dmxdev->mutex was taken, so buffer cannot
+ * be released during its operation.
+ */
+static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
+ unsigned int f_flags,
+ u32 bytes_count)
+{
+ ssize_t free_space;
+ struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
+
+ if ((f_flags & O_ACCMODE) == O_RDONLY)
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ free_space = dvb_ringbuffer_free(buffer);
+
+ if (bytes_count > free_space)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_PUSH(buffer, bytes_count);
+
+ dvb_dvr_queue_data_feed(dmxdev, bytes_count);
+
+ return 0;
+}
+
static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
*dmxdevfilter, int state)
{
@@ -301,12 +1764,13 @@
if (buf->size == size)
return 0;
- if (!size)
+ if (!size ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
return -EINVAL;
if (dmxdevfilter->state >= DMXDEV_STATE_GO)
return -EBUSY;
- newmem = vmalloc(size);
+ newmem = vmalloc_user(size);
if (!newmem)
return -ENOMEM;
@@ -325,15 +1789,805 @@
return 0;
}
+static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_buffer_mode mode)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ void *oldmem;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
+ (mode != DMX_BUFFER_MODE_EXTERNAL))
+ return -EINVAL;
+
+ if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
+ return -EINVAL;
+
+ if (mode == dmxdevfilter->buffer_mode)
+ return 0;
+
+ oldmem = buf->data;
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ buf->data = NULL;
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ dmxdevfilter->buffer_mode = mode;
+
+ if (mode == DMX_BUFFER_MODE_INTERNAL) {
+ /* switched from external to internal */
+ if (dmxdevfilter->priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdevfilter->priv_buff_handle);
+ dmxdevfilter->priv_buff_handle = NULL;
+ }
+ } else if (oldmem) {
+ /* switched from internal to external */
+ vfree(oldmem);
+ }
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_buffer *buffer)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ void *newmem;
+ void *oldmem;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((!buffer->size) ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL))
+ return -EINVAL;
+
+ oldmem = dmxdevfilter->priv_buff_handle;
+ if (dmxdev->demux->map_buffer(dmxdev->demux, buffer,
+ &dmxdevfilter->priv_buff_handle, &newmem))
+ return -ENOMEM;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ buf->data = newmem;
+ buf->size = buffer->size;
+ dvb_ringbuffer_reset(buf);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (oldmem)
+ dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_tsp_format_t dmx_tsp_format)
+{
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
+ (dmx_tsp_format < DMX_TSP_FORMAT_188))
+ return -EINVAL;
+
+ dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_decoder_buffer_size(
+ struct dmxdev_filter *dmxdevfilter,
+ unsigned long size)
+{
+ struct dmx_caps caps;
+ struct dmx_demux *demux = dmxdevfilter->dev->demux;
+
+ if (demux->get_caps) {
+ demux->get_caps(demux, &caps);
+ if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size,
+ caps.decoder.size_alignment))
+ return -EINVAL;
+ }
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (dmxdevfilter->decoder_buffers.buffers_size == size)
+ return 0;
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ /*
+ * In case decoder buffers were already set before to some external
+ * buffers, setting the decoder buffer size alone implies transition
+ * to internal buffer mode.
+ */
+ dmxdevfilter->decoder_buffers.buffers_size = size;
+ dmxdevfilter->decoder_buffers.buffers_num = 0;
+ dmxdevfilter->decoder_buffers.is_linear = 0;
+ return 0;
+}
+
+static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter,
+ dmx_source_t *source)
+{
+ int ret = 0;
+ struct dmxdev *dev;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ dev = dmxdevfilter->dev;
+ if (dev->demux->set_source)
+ ret = dev->demux->set_source(dev->demux, source);
+
+ if (!ret)
+ dev->source = *source;
+
+ return ret;
+}
+
+static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
+ int cookie)
+{
+ struct dmxdev_feed *feed;
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
+ (dmxdevfilter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_ES_DATA))
+ return -EPERM;
+
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ if (feed && feed->ts && feed->ts->reuse_decoder_buffer)
+ return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
+
+ return -ENODEV;
+}
+
+static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask ||
+ (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
+ return -EINVAL;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ /*
+ * Overflow event is not allowed to be masked.
+ * This is because if overflow occurs, demux stops outputting data
+ * until user is notified. If user is using events to read the data,
+ * the overflow event must be always enabled or otherwise we would
+ * never recover from overflow state.
+ */
+ event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+ event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
+
+ dmxdevfilter->events.event_mask = *event_mask;
+
+ return 0;
+}
+
+static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_events_mask *event_mask)
+{
+ if (!event_mask)
+ return -EINVAL;
+
+ *event_mask = dmxdevfilter->events.event_mask;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_indexing_params *idx_params)
+{
+ int found_pid;
+ struct dmxdev_feed *feed;
+ struct dmxdev_feed *ts_feed = NULL;
+ struct dmx_caps caps;
+ int ret = 0;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!idx_params ||
+ !(caps.caps & DMX_CAP_VIDEO_INDEXING) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ if (idx_params->enable && !idx_params->types)
+ return -EINVAL;
+
+ found_pid = 0;
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+ if (feed->pid == idx_params->pid) {
+ found_pid = 1;
+ ts_feed = feed;
+ ts_feed->idx_params = *idx_params;
+ if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
+ ts_feed->ts->set_idx_params)
+ ret = ts_feed->ts->set_idx_params(
+ ts_feed->ts, idx_params);
+ break;
+ }
+ }
+
+ if (!found_pid)
+ return -EINVAL;
+
+ return ret;
+}
+
+static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter,
+ struct dmx_scrambling_bits *scrambling_bits)
+{
+ struct dmxdev_feed *feed;
+
+ if (!scrambling_bits ||
+ (filter->state != DMXDEV_STATE_GO))
+ return -EINVAL;
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ if (filter->feed.sec.feed->get_scrambling_bits)
+ return filter->feed.sec.feed->get_scrambling_bits(
+ filter->feed.sec.feed,
+ &scrambling_bits->value);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (feed->pid == scrambling_bits->pid) {
+ if (feed->ts->get_scrambling_bits)
+ return feed->ts->get_scrambling_bits(feed->ts,
+ &scrambling_bits->value);
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker)
+{
+ struct ts_insertion_buffer *ts_buffer =
+ container_of(to_delayed_work(worker),
+ struct ts_insertion_buffer, dwork);
+ struct dmxdev_feed *feed;
+ size_t free_bytes;
+ struct dmx_ts_feed *ts;
+
+ mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+
+ if (ts_buffer->abort ||
+ (ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) {
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+ return;
+ }
+
+ feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ ts = feed->ts;
+ free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer);
+
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+
+ if (ts_buffer->size < free_bytes)
+ ts->ts_insertion_insert_buffer(ts,
+ ts_buffer->buffer, ts_buffer->size);
+
+ if (ts_buffer->repetition_time && !ts_buffer->abort)
+ schedule_delayed_work(&ts_buffer->dwork,
+ msecs_to_jiffies(ts_buffer->repetition_time));
+}
+
+static void dvb_dmxdev_queue_ts_insertion(
+ struct ts_insertion_buffer *ts_buffer)
+{
+ size_t tsp_size;
+
+ if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ tsp_size = 188;
+ else
+ tsp_size = 192;
+
+ if (ts_buffer->size % tsp_size) {
+ pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n",
+ __func__, ts_buffer->size, tsp_size);
+ return;
+ }
+
+ ts_buffer->abort = 0;
+ schedule_delayed_work(&ts_buffer->dwork, 0);
+}
+
+static void dvb_dmxdev_cancel_ts_insertion(
+ struct ts_insertion_buffer *ts_buffer)
+{
+ /*
+ * This function assumes it is called while mutex
+ * of demux filter is taken. Since work in workqueue
+ * captures the filter's mutex to protect against the DB,
+ * mutex needs to be released before waiting for the work
+ * to get finished otherwise work in workqueue will
+ * never be finished.
+ */
+ if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) {
+ pr_err("%s: mutex is not locked!\n", __func__);
+ return;
+ }
+
+ ts_buffer->abort = 1;
+
+ mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
+ cancel_delayed_work_sync(&ts_buffer->dwork);
+ mutex_lock(&ts_buffer->dmxdevfilter->mutex);
+}
+
+static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_set_ts_insertion *params)
+{
+ int ret = 0;
+ int first_buffer;
+ struct dmxdev_feed *feed;
+ struct ts_insertion_buffer *ts_buffer;
+ struct dmx_caps caps;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!params ||
+ !params->size ||
+ !(caps.caps & DMX_CAP_TS_INSERTION) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer));
+ if (!ts_buffer)
+ return -ENOMEM;
+
+ ts_buffer->buffer = vmalloc(params->size);
+ if (!ts_buffer->buffer) {
+ vfree(ts_buffer);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(ts_buffer->buffer,
+ params->ts_packets, params->size)) {
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ return -EFAULT;
+ }
+
+ if (params->repetition_time &&
+ params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME)
+ params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME;
+
+ ts_buffer->size = params->size;
+ ts_buffer->identifier = params->identifier;
+ ts_buffer->repetition_time = params->repetition_time;
+ ts_buffer->dmxdevfilter = dmxdevfilter;
+ INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work);
+
+ first_buffer = list_empty(&dmxdevfilter->insertion_buffers);
+ list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO)
+ return 0;
+
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ if (first_buffer && feed->ts->ts_insertion_init)
+ ret = feed->ts->ts_insertion_init(feed->ts);
+
+ if (!ret) {
+ dvb_dmxdev_queue_ts_insertion(ts_buffer);
+ } else {
+ list_del(&ts_buffer->next);
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ }
+
+ return ret;
+}
+
+static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_abort_ts_insertion *params)
+{
+ int ret = 0;
+ int found_buffer;
+ struct dmxdev_feed *feed;
+ struct ts_insertion_buffer *ts_buffer, *tmp;
+ struct dmx_caps caps;
+
+ if (!dmxdevfilter->dev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
+
+ if (!params ||
+ !(caps.caps & DMX_CAP_TS_INSERTION) ||
+ (dmxdevfilter->state < DMXDEV_STATE_SET) ||
+ (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
+ ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
+ (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
+ return -EINVAL;
+
+ found_buffer = 0;
+ list_for_each_entry_safe(ts_buffer, tmp,
+ &dmxdevfilter->insertion_buffers, next) {
+ if (ts_buffer->identifier == params->identifier) {
+ list_del(&ts_buffer->next);
+ found_buffer = 1;
+ break;
+ }
+ }
+
+ if (!found_buffer)
+ return -EINVAL;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO) {
+ dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+ if (list_empty(&dmxdevfilter->insertion_buffers)) {
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+ if (feed->ts->ts_insertion_terminate)
+ ret = feed->ts->ts_insertion_terminate(
+ feed->ts);
+ }
+ }
+
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+
+ return ret;
+}
+
+static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
+ int required_space, int wait)
+{
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dvb_ringbuffer *src;
+ struct dmxdev_events_queue *events;
+ int ret;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: NULL demux filter object!\n", __func__);
+ return -ENODEV;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ src = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ } else {
+ src = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
+ do {
+ ret = 0;
+
+ if (dmxdevfilter->dev->dvr_in_exit)
+ return -ENODEV;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if ((!src->data) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO))
+ ret = -EINVAL;
+ else if (src->error)
+ ret = src->error;
+
+ if (ret) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return ret;
+ }
+
+ if ((required_space <= dvb_ringbuffer_free(src)) &&
+ (!dvb_dmxdev_events_is_full(events))) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+
+ if (!wait)
+ return -ENOSPC;
+
+ ret = wait_event_interruptible(src->queue,
+ (!src->data) ||
+ ((dvb_ringbuffer_free(src) >= required_space) &&
+ (!dvb_dmxdev_events_is_full(events))) ||
+ (src->error != 0) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO) ||
+ dmxdevfilter->dev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+ } while (1);
+}
+
+static int dvb_dmxdev_sec_fullness_callback(
+ struct dmx_section_filter *filter,
+ int required_space, int wait)
+{
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dvb_ringbuffer *src;
+ struct dmxdev_events_queue *events;
+ int ret;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: NULL demux filter object!\n", __func__);
+ return -ENODEV;
+ }
+
+ src = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+
+ do {
+ ret = 0;
+
+ if (dmxdevfilter->dev->dvr_in_exit)
+ return -ENODEV;
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if ((!src->data) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO))
+ ret = -EINVAL;
+ else if (src->error)
+ ret = src->error;
+
+ if (ret) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return ret;
+ }
+
+ if ((required_space <= dvb_ringbuffer_free(src)) &&
+ (!dvb_dmxdev_events_is_full(events))) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+
+ if (!wait)
+ return -ENOSPC;
+
+ ret = wait_event_interruptible(src->queue,
+ (!src->data) ||
+ ((dvb_ringbuffer_free(src) >= required_space) &&
+ (!dvb_dmxdev_events_is_full(events))) ||
+ (src->error != 0) ||
+ (dmxdevfilter->state != DMXDEV_STATE_GO) ||
+ dmxdevfilter->dev->dvr_in_exit);
+
+ if (ret < 0)
+ return ret;
+ } while (1);
+}
+
+static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter,
+ enum dmx_playback_mode_t playback_mode)
+{
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ struct dmx_caps caps;
+
+ if (dmxdev->demux->get_caps)
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ else
+ caps.caps = 0;
+
+ if ((playback_mode != DMX_PB_MODE_PUSH) &&
+ (playback_mode != DMX_PB_MODE_PULL))
+ return -EINVAL;
+
+ if (dmxdev->demux->set_playback_mode == NULL)
+ return -EINVAL;
+
+ if (((dmxdev->source < DMX_SOURCE_DVR0) ||
+ !(caps.caps & DMX_CAP_PULL_MODE)) &&
+ (playback_mode == DMX_PB_MODE_PULL))
+ return -EPERM;
+
+ if (dmxdevfilter->state == DMXDEV_STATE_GO)
+ return -EBUSY;
+
+ dmxdev->playback_mode = playback_mode;
+
+ return dmxdev->demux->set_playback_mode(
+ dmxdev->demux,
+ dmxdev->playback_mode,
+ dvb_dmxdev_ts_fullness_callback,
+ dvb_dmxdev_sec_fullness_callback);
+}
+
+static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter)
+{
+ size_t flush_len;
+ int ret;
+
+ if (filter->state != DMXDEV_STATE_GO)
+ return -EINVAL;
+
+ flush_len = dvb_ringbuffer_avail(&filter->buffer);
+ ret = dvb_dmxdev_flush_data(filter, flush_len);
+
+ return ret;
+}
+
+static int dvb_dmxdev_get_buffer_status(
+ struct dmxdev_filter *dmxdevfilter,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
+
+ /*
+ * Note: Taking the dmxdevfilter->dev->lock spinlock is required only
+ * when getting the status of the Demux-userspace data ringbuffer .
+ * In case we are getting the status of a decoder buffer, taking this
+ * spinlock is not required and in fact might lead to a deadlock.
+ */
+ if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
+ (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
+ struct dmxdev_feed *feed;
+ int ret;
+
+ /* Only one feed should be in the list in case of decoder */
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ /* Ask for status of decoder's buffer from underlying HW */
+ if (feed->ts->get_decoder_buff_status)
+ ret = feed->ts->get_decoder_buff_status(
+ feed->ts,
+ dmx_buffer_status);
+ else
+ ret = -ENODEV;
+
+ return ret;
+ }
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ if (!buf->data) {
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = buf->error;
+ dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
+ dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
+ dmx_buffer_status->read_offset = buf->pread;
+ dmx_buffer_status->write_offset = buf->pwrite;
+ dmx_buffer_status->size = buf->size;
+ buf->error = 0;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (dmx_buffer_status->error == -EOVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf);
+
+ return 0;
+}
+
+static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter,
+ u32 bytes_count)
+{
+ ssize_t buff_fullness;
+
+ if (!dmxdevfilter->buffer.data)
+ return -EINVAL;
+
+ if (!bytes_count)
+ return 0;
+
+ buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer);
+
+ if (bytes_count > buff_fullness)
+ return -EINVAL;
+
+ DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
+
+ dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return 0;
+}
+
+static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
+ struct dmx_filter_event *event)
+{
+ int res = 0;
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ /* Check first for filter overflow */
+ if (dmxdevfilter->buffer.error == -EOVERFLOW) {
+ event->type = DMX_EVENT_BUFFER_OVERFLOW;
+ } else {
+ res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
+ if (res) {
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+ return res;
+ }
+ }
+
+ /* clear buffer error now that user was notified */
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW ||
+ event->type == DMX_EVENT_SECTION_TIMEOUT)
+ dmxdevfilter->buffer.error = 0;
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+ &dmxdevfilter->buffer);
+
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * If no-data events are enabled on this filter,
+ * the events can be removed from the queue when
+ * user gets them.
+ * For filters with data events enabled, the event is removed
+ * from the queue only when the respective data is read.
+ */
+ if (event->type != DMX_EVENT_BUFFER_OVERFLOW &&
+ dmxdevfilter->events.data_read_event_masked)
+ dmxdevfilter->events.read_index =
+ dvb_dmxdev_advance_event_idx(
+ dmxdevfilter->events.read_index);
+
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
static void dvb_dmxdev_filter_timeout(unsigned long data)
{
struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+ struct dmx_filter_event event;
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
+ event.type = DMX_EVENT_SECTION_TIMEOUT;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
spin_unlock_irq(&dmxdevfilter->dev->lock);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
}
static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
@@ -355,31 +2609,77 @@
struct dmx_section_filter *filter)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
- int ret;
+ struct dmx_filter_event event;
+ ssize_t free;
- if (dmxdevfilter->buffer.error) {
- wake_up(&dmxdevfilter->buffer.queue);
- return 0;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter.\n", __func__);
+ return -EINVAL;
}
+
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->state != DMXDEV_STATE_GO) {
+
+ if (dmxdevfilter->buffer.error ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- del_timer(&dmxdevfilter->timer);
- dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
- ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
- buffer1_len);
- if (ret == buffer1_len) {
- ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
- buffer2_len);
+
+ /* Discard section data if event cannot be notified */
+ if (!(dmxdevfilter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_SECTION) &&
+ dvb_dmxdev_events_is_full(&dmxdevfilter->events)) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
}
- if (ret < 0)
- dmxdevfilter->buffer.error = ret;
+
+ if ((buffer1_len + buffer2_len) == 0) {
+ if (buffer1 == NULL && buffer2 == NULL) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+
+ return 0;
+ }
+
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+
+ del_timer(&dmxdevfilter->timer);
+
+ /* Verify output buffer has sufficient space, or report overflow */
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+ if (free < (buffer1_len + buffer2_len)) {
+ pr_debug("%s: section filter overflow (pid=%u)\n",
+ __func__, dmxdevfilter->params.sec.pid);
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ return 0;
+ }
+
+ dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len);
+ dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len);
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.total_length = buffer1_len + buffer2_len;
+ event.params.section.actual_length =
+ event.params.section.total_length;
+
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
return 0;
}
@@ -389,31 +2689,401 @@
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
- int ret;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ ssize_t free;
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter (feed->is_filtering=%d)\n",
+ __func__, feed->is_filtering);
+ return -EINVAL;
+ }
spin_lock(&dmxdevfilter->dev->lock);
- if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- if (dmxdevfilter->params.pes.output == DMX_OUT_TAP
- || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
buffer = &dmxdevfilter->buffer;
- else
+ events = &dmxdevfilter->events;
+ } else {
buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
if (buffer->error) {
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&buffer->queue);
+ wake_up_all(&buffer->queue);
+ return buffer->error;
+ }
+
+ if (!events->current_event_data_size)
+ events->current_event_start_offset = buffer->pwrite;
+
+ /* Verify output buffer has sufficient space, or report overflow */
+ free = dvb_ringbuffer_free(buffer);
+ if (free < (buffer1_len + buffer2_len)) {
+ pr_debug("%s: buffer overflow error, pid=%u\n",
+ __func__, dmxdevfilter->params.pes.pid);
+ buffer->error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+
+ return -EOVERFLOW;
+ }
+
+ if (buffer1_len + buffer2_len) {
+ dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+
+ events->current_event_data_size += (buffer1_len + buffer2_len);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+ dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ && events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+
+ dvb_dmxdev_add_event(events, &event);
+ events->current_event_data_size = 0;
+ }
+ }
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+}
+
+static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ int res = 0;
+ struct dmxdev_filter *dmxdevfilter = filter->priv;
+ struct dmx_filter_event event;
+ ssize_t free;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n",
+ __func__, dmx_data_ready->status,
+ dmx_data_ready->data_length);
+ return -EINVAL;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->buffer.error == -ETIMEDOUT ||
+ dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
+ spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
- ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
- if (ret == buffer1_len)
- ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
- if (ret < 0)
- buffer->error = ret;
+
+ if (dmx_data_ready->data_length == 0) {
+ if (dmx_data_ready->status == DMX_CRC_ERROR) {
+ /* Section was dropped due to CRC error */
+ event.type = DMX_EVENT_SECTION_CRC_ERROR;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_EOS) {
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_MARKER) {
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+ event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+ event.params.scrambling_status =
+ dmx_data_ready->scrambling_bits;
+ dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+ pr_debug("dmxdev: section filter overflow (pid=%u)\n",
+ dmxdevfilter->params.sec.pid);
+ /* Set buffer error to notify user overflow occurred */
+ dmxdevfilter->buffer.error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ }
+ return 0;
+ }
+
+ event.type = DMX_EVENT_NEW_SECTION;
+ event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
+ event.params.section.total_length = dmx_data_ready->data_length;
+ event.params.section.actual_length = dmx_data_ready->data_length;
+
+ if (dmx_data_ready->status == DMX_MISSED_ERROR)
+ event.params.section.flags = DMX_FILTER_CC_ERROR;
+ else
+ event.params.section.flags = 0;
+
+ free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
+ if (free < dmx_data_ready->data_length) {
+ pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+ __func__, dmx_data_ready->data_length, free);
+ } else {
+ res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
+ DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer,
+ dmx_data_ready->data_length);
+ }
+
spin_unlock(&dmxdevfilter->dev->lock);
- wake_up(&buffer->queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
+ return res;
+}
+
+static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
+ struct dmx_data_ready *dmx_data_ready)
+{
+ struct dmxdev_filter *dmxdevfilter = feed->priv;
+ struct dvb_ringbuffer *buffer;
+ struct dmxdev_events_queue *events;
+ struct dmx_filter_event event;
+ ssize_t free;
+
+ if (!dmxdevfilter) {
+ pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n",
+ __func__, feed->is_filtering,
+ dmx_data_ready->status,
+ dmx_data_ready->data_length);
+ return -EINVAL;
+ }
+
+ spin_lock(&dmxdevfilter->dev->lock);
+
+ if (dmxdevfilter->state != DMXDEV_STATE_GO ||
+ dmxdevfilter->eos_state) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
+ buffer = &dmxdevfilter->buffer;
+ events = &dmxdevfilter->events;
+ } else {
+ buffer = &dmxdevfilter->dev->dvr_buffer;
+ events = &dmxdevfilter->dev->dvr_output_events;
+ }
+
+ if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) {
+ pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n",
+ dmxdevfilter->params.pes.output == DMX_OUT_DECODER ?
+ "decoder" : "",
+ dmxdevfilter->params.pes.pid);
+ /* Set buffer error to notify user overflow occurred */
+ buffer->error = -EOVERFLOW;
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_EOS) {
+ /* Report partial recording chunk */
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
+ dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ && events->current_event_data_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ events->current_event_data_size;
+ events->current_event_start_offset =
+ (events->current_event_start_offset +
+ events->current_event_data_size) %
+ buffer->size;
+ events->current_event_data_size = 0;
+ dvb_dmxdev_add_event(events, &event);
+ }
+
+ dmxdevfilter->eos_state = 1;
+ pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n");
+ event.type = DMX_EVENT_EOS;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_MARKER) {
+ pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n",
+ dmx_data_ready->marker.id);
+ event.type = DMX_EVENT_MARKER;
+ event.params.marker.id = dmx_data_ready->marker.id;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_PCR) {
+ pr_debug("dmxdev: event callback DMX_OK_PCR\n");
+ event.type = DMX_EVENT_NEW_PCR;
+ event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
+ event.params.pcr.stc = dmx_data_ready->pcr.stc;
+ if (dmx_data_ready->pcr.disc_indicator_set)
+ event.params.pcr.flags =
+ DMX_FILTER_DISCONTINUITY_INDICATOR;
+ else
+ event.params.pcr.flags = 0;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_IDX) {
+ pr_debug("dmxdev: event callback DMX_OK_IDX\n");
+ event.type = DMX_EVENT_NEW_INDEX_ENTRY;
+ event.params.index = dmx_data_ready->idx_event;
+
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
+ event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
+ event.params.scrambling_status =
+ dmx_data_ready->scrambling_bits;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
+ event.type = DMX_EVENT_NEW_ES_DATA;
+ event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
+ event.params.es_data.cookie = dmx_data_ready->buf.cookie;
+ event.params.es_data.offset = dmx_data_ready->buf.offset;
+ event.params.es_data.data_len = dmx_data_ready->buf.len;
+ event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists;
+ event.params.es_data.pts = dmx_data_ready->buf.pts;
+ event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
+ event.params.es_data.dts = dmx_data_ready->buf.dts;
+ event.params.es_data.stc = dmx_data_ready->buf.stc;
+ event.params.es_data.transport_error_indicator_counter =
+ dmx_data_ready->buf.tei_counter;
+ event.params.es_data.continuity_error_counter =
+ dmx_data_ready->buf.cont_err_counter;
+ event.params.es_data.ts_packets_num =
+ dmx_data_ready->buf.ts_packets_num;
+ event.params.es_data.ts_dropped_bytes =
+ dmx_data_ready->buf.ts_dropped_bytes;
+ dvb_dmxdev_add_event(events, &event);
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ free = dvb_ringbuffer_free(buffer);
+ if (free < dmx_data_ready->data_length) {
+ pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
+ __func__, dmx_data_ready->data_length, free);
+
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
+ return 0;
+ }
+
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
+ if (dmx_data_ready->status == DMX_OK &&
+ !events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ } else if (dmx_data_ready->status == DMX_OK_PES_END) {
+ event.type = DMX_EVENT_NEW_PES;
+
+ event.params.pes.base_offset =
+ events->current_event_start_offset;
+ event.params.pes.start_offset =
+ (events->current_event_start_offset +
+ dmx_data_ready->pes_end.start_gap) %
+ buffer->size;
+
+ event.params.pes.actual_length =
+ dmx_data_ready->pes_end.actual_length;
+ event.params.pes.total_length =
+ events->current_event_data_size;
+
+ event.params.pes.flags = 0;
+ if (dmx_data_ready->pes_end.disc_indicator_set)
+ event.params.pes.flags |=
+ DMX_FILTER_DISCONTINUITY_INDICATOR;
+ if (dmx_data_ready->pes_end.pes_length_mismatch)
+ event.params.pes.flags |=
+ DMX_FILTER_PES_LENGTH_ERROR;
+
+ event.params.pes.stc = dmx_data_ready->pes_end.stc;
+ event.params.pes.transport_error_indicator_counter =
+ dmx_data_ready->pes_end.tei_counter;
+ event.params.pes.continuity_error_counter =
+ dmx_data_ready->pes_end.cont_err_counter;
+ event.params.pes.ts_packets_num =
+ dmx_data_ready->pes_end.ts_packets_num;
+
+ /* Do not report zero length PES */
+ if (event.params.pes.total_length)
+ dvb_dmxdev_add_event(events, &event);
+
+ events->current_event_data_size = 0;
+ }
+ } else if (!events->current_event_data_size) {
+ events->current_event_start_offset = buffer->pwrite;
+ }
+
+ events->current_event_data_size += dmx_data_ready->data_length;
+ DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length);
+
+ if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
+ while (events->current_event_data_size >=
+ dmxdevfilter->params.pes.rec_chunk_size) {
+ event.type = DMX_EVENT_NEW_REC_CHUNK;
+ event.params.recording_chunk.offset =
+ events->current_event_start_offset;
+ event.params.recording_chunk.size =
+ dmxdevfilter->params.pes.rec_chunk_size;
+ events->current_event_data_size =
+ events->current_event_data_size -
+ dmxdevfilter->params.pes.rec_chunk_size;
+ events->current_event_start_offset =
+ (events->current_event_start_offset +
+ dmxdevfilter->params.pes.rec_chunk_size) %
+ buffer->size;
+
+ dvb_dmxdev_add_event(events, &event);
+ }
+ }
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up_all(&buffer->queue);
return 0;
}
@@ -427,11 +3097,18 @@
switch (dmxdevfilter->type) {
case DMXDEV_TYPE_SEC:
del_timer(&dmxdevfilter->timer);
- dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
+ dmxdevfilter->feed.sec.feed->stop_filtering(
+ dmxdevfilter->feed.sec.feed);
break;
case DMXDEV_TYPE_PES:
- list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
+ list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
+ if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
+ dmxdevfilter->dev->dvr_feeds_count--;
+ if (!dmxdevfilter->dev->dvr_feeds_count)
+ dmxdevfilter->dev->dvr_feed = NULL;
+ }
feed->ts->stop_filtering(feed->ts);
+ }
break;
default:
return -EINVAL;
@@ -449,7 +3126,8 @@
switch (filter->type) {
case DMXDEV_TYPE_SEC:
- return filter->feed.sec->start_filtering(filter->feed.sec);
+ return filter->feed.sec.feed->start_filtering(
+ filter->feed.sec.feed);
case DMXDEV_TYPE_PES:
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = feed->ts->start_filtering(feed->ts);
@@ -483,7 +3161,7 @@
}
filter->dev->demux->release_section_feed(dmxdev->demux,
- filter->feed.sec);
+ filter->feed.sec.feed);
return 0;
}
@@ -492,25 +3170,38 @@
{
struct dmxdev_feed *feed;
struct dmx_demux *demux;
+ struct ts_insertion_buffer *ts_buffer;
if (dmxdevfilter->state < DMXDEV_STATE_GO)
return 0;
switch (dmxdevfilter->type) {
case DMXDEV_TYPE_SEC:
- if (!dmxdevfilter->feed.sec)
+ if (!dmxdevfilter->feed.sec.feed)
break;
dvb_dmxdev_feed_stop(dmxdevfilter);
if (dmxdevfilter->filter.sec)
- dmxdevfilter->feed.sec->
- release_filter(dmxdevfilter->feed.sec,
+ dmxdevfilter->feed.sec.feed->
+ release_filter(dmxdevfilter->feed.sec.feed,
dmxdevfilter->filter.sec);
dvb_dmxdev_feed_restart(dmxdevfilter);
- dmxdevfilter->feed.sec = NULL;
+ dmxdevfilter->feed.sec.feed = NULL;
break;
case DMXDEV_TYPE_PES:
dvb_dmxdev_feed_stop(dmxdevfilter);
demux = dmxdevfilter->dev->demux;
+
+ if (!list_empty(&dmxdevfilter->insertion_buffers)) {
+ feed = list_first_entry(&dmxdevfilter->feed.ts,
+ struct dmxdev_feed, next);
+
+ list_for_each_entry(ts_buffer,
+ &dmxdevfilter->insertion_buffers, next)
+ dvb_dmxdev_cancel_ts_insertion(ts_buffer);
+ if (feed->ts->ts_insertion_terminate)
+ feed->ts->ts_insertion_terminate(feed->ts);
+ }
+
list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
demux->release_ts_feed(demux, feed->ts);
feed->ts = NULL;
@@ -522,7 +3213,13 @@
return -EINVAL;
}
- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
+ dvb_ringbuffer_reset(&dmxdevfilter->buffer);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ wake_up_all(&dmxdevfilter->buffer.queue);
+
return 0;
}
@@ -589,12 +3286,76 @@
tsfeed = feed->ts;
tsfeed->priv = filter;
- ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+ if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+ tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer;
+ tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle;
+ if (!dmxdev->dvr_feeds_count)
+ dmxdev->dvr_feed = filter;
+ dmxdev->dvr_feeds_count++;
+ } else if (filter->params.pes.output == DMX_OUT_DECODER) {
+ tsfeed->buffer.ringbuff = &filter->buffer;
+ tsfeed->decoder_buffers = &filter->decoder_buffers;
+ tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+ } else {
+ tsfeed->buffer.ringbuff = &filter->buffer;
+ tsfeed->buffer.priv_handle = filter->priv_buff_handle;
+ }
+
+ if (tsfeed->data_ready_cb) {
+ ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
+ return ret;
+ }
+ }
+
+ ret = tsfeed->set(tsfeed, feed->pid,
+ ts_type, ts_pes,
+ filter->decoder_buffers.buffers_size,
+ timeout);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
return ret;
}
+ if (tsfeed->set_tsp_out_format)
+ tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
+
+ if (tsfeed->set_secure_mode)
+ tsfeed->set_secure_mode(tsfeed, &filter->sec_mode);
+
+ if (tsfeed->set_cipher_ops)
+ tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops);
+
+ if ((para->pes_type == DMX_PES_VIDEO0) ||
+ (para->pes_type == DMX_PES_VIDEO1) ||
+ (para->pes_type == DMX_PES_VIDEO2) ||
+ (para->pes_type == DMX_PES_VIDEO3)) {
+ if (tsfeed->set_video_codec) {
+ ret = tsfeed->set_video_codec(tsfeed,
+ para->video_codec);
+
+ if (ret < 0) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+ }
+
+ if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
+ (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
+ if (tsfeed->set_idx_params) {
+ ret = tsfeed->set_idx_params(
+ tsfeed, &feed->idx_params);
+ if (ret) {
+ dmxdev->demux->release_ts_feed(dmxdev->demux,
+ tsfeed);
+ return ret;
+ }
+ }
+
ret = tsfeed->start_filtering(tsfeed);
if (ret < 0) {
dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
@@ -604,12 +3365,50 @@
return 0;
}
+static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter)
+{
+ struct dmx_caps caps;
+ int is_external_only;
+ int flags;
+
+ /*
+ * For backward compatibility, default assumes that
+ * external only buffers are not supported.
+ */
+ flags = 0;
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (filter->type == DMXDEV_TYPE_SEC)
+ flags = caps.section.flags;
+ else if (filter->params.pes.output == DMX_OUT_DECODER)
+ /* For decoder filters dmxdev buffer is not required */
+ flags = 0;
+ else if (filter->params.pes.output == DMX_OUT_TAP)
+ flags = caps.pes.flags;
+ else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ flags = caps.recording_188_tsp.flags;
+ else
+ flags = caps.recording_192_tsp.flags;
+ }
+
+ if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
+ (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
+ is_external_only = 1;
+ else
+ is_external_only = 0;
+
+ return is_external_only;
+}
+
static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
{
struct dmxdev *dmxdev = filter->dev;
struct dmxdev_feed *feed;
void *mem;
int ret, i;
+ size_t tsp_size;
if (filter->state < DMXDEV_STATE_SET)
return -EINVAL;
@@ -617,34 +3416,64 @@
if (filter->state >= DMXDEV_STATE_GO)
dvb_dmxdev_filter_stop(filter);
+ if (!dvb_filter_verify_buffer_size(filter))
+ return -EINVAL;
+
if (!filter->buffer.data) {
- mem = vmalloc(filter->buffer.size);
+ /*
+ * dmxdev buffer in decoder filters is not really used
+ * to exchange data with applications. Decoder buffers
+ * can be set using DMX_SET_DECODER_BUFFER, which
+ * would not update the filter->buffer.data at all.
+ * Therefore we should not treat this filter as
+ * other regular filters and should not fail here
+ * even if user sets the buffer in deocder
+ * filter as external buffer.
+ */
+ if (filter->type == DMXDEV_TYPE_PES &&
+ (filter->params.pes.output == DMX_OUT_DECODER ||
+ filter->params.pes.output == DMX_OUT_TS_TAP))
+ filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+
+ if (!(filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP) &&
+ (filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL ||
+ dvb_filter_external_buffer_only(dmxdev, filter)))
+ return -ENOMEM;
+
+ mem = vmalloc_user(filter->buffer.size);
if (!mem)
return -ENOMEM;
spin_lock_irq(&filter->dev->lock);
filter->buffer.data = mem;
spin_unlock_irq(&filter->dev->lock);
+ } else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) &&
+ dvb_filter_external_buffer_only(dmxdev, filter)) {
+ return -ENOMEM;
}
- dvb_ringbuffer_flush(&filter->buffer);
+ filter->eos_state = 0;
+
+ spin_lock_irq(&filter->dev->lock);
+ dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
+ spin_unlock_irq(&filter->dev->lock);
switch (filter->type) {
case DMXDEV_TYPE_SEC:
{
struct dmx_sct_filter_params *para = &filter->params.sec;
struct dmx_section_filter **secfilter = &filter->filter.sec;
- struct dmx_section_feed **secfeed = &filter->feed.sec;
+ struct dmx_section_feed **secfeed = &filter->feed.sec.feed;
*secfilter = NULL;
*secfeed = NULL;
-
/* find active filter/feed with same PID */
for (i = 0; i < dmxdev->filternum; i++) {
if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
dmxdev->filter[i].params.sec.pid == para->pid) {
- *secfeed = dmxdev->filter[i].feed.sec;
+ *secfeed = dmxdev->filter[i].feed.sec.feed;
break;
}
}
@@ -652,22 +3481,44 @@
/* if no feed found, try to allocate new one */
if (!*secfeed) {
ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
- secfeed,
- dvb_dmxdev_section_callback);
+ secfeed,
+ dvb_dmxdev_section_callback);
if (ret < 0) {
- printk("DVB (%s): could not alloc feed\n",
+ pr_err("DVB (%s): could not alloc feed\n",
__func__);
return ret;
}
+ if ((*secfeed)->data_ready_cb) {
+ ret = (*secfeed)->data_ready_cb(
+ *secfeed,
+ dvb_dmxdev_section_event_cb);
+
+ if (ret < 0) {
+ pr_err(
+ "DVB (%s): could not set event cb\n",
+ __func__);
+ dvb_dmxdev_feed_restart(filter);
+ return ret;
+ }
+ }
+
ret = (*secfeed)->set(*secfeed, para->pid, 32768,
(para->flags & DMX_CHECK_CRC) ? 1 : 0);
if (ret < 0) {
- printk("DVB (%s): could not set feed\n",
- __func__);
+ pr_err("DVB (%s): could not set feed\n",
+ __func__);
dvb_dmxdev_feed_restart(filter);
return ret;
}
+
+ if ((*secfeed)->set_secure_mode)
+ (*secfeed)->set_secure_mode(*secfeed,
+ &filter->sec_mode);
+
+ if ((*secfeed)->set_cipher_ops)
+ (*secfeed)->set_cipher_ops(*secfeed,
+ &filter->feed.sec.cipher_ops);
} else {
dvb_dmxdev_feed_stop(filter);
}
@@ -675,12 +3526,14 @@
ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
if (ret < 0) {
dvb_dmxdev_feed_restart(filter);
- filter->feed.sec->start_filtering(*secfeed);
- dprintk("could not get filter\n");
+ filter->feed.sec.feed->start_filtering(*secfeed);
+ pr_debug("could not get filter\n");
return ret;
}
(*secfilter)->priv = filter;
+ (*secfilter)->buffer.ringbuff = &filter->buffer;
+ (*secfilter)->buffer.priv_handle = filter->priv_buff_handle;
memcpy(&((*secfilter)->filter_value[3]),
&(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
@@ -696,8 +3549,12 @@
(*secfilter)->filter_mask[2] = 0;
filter->todo = 0;
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_SECTION;
- ret = filter->feed.sec->start_filtering(filter->feed.sec);
+ ret = filter->feed.sec.feed->start_filtering(
+ filter->feed.sec.feed);
if (ret < 0)
return ret;
@@ -705,19 +3562,93 @@
break;
}
case DMXDEV_TYPE_PES:
+ if (filter->params.pes.rec_chunk_size <
+ DMX_REC_BUFF_CHUNK_MIN_SIZE)
+ filter->params.pes.rec_chunk_size =
+ DMX_REC_BUFF_CHUNK_MIN_SIZE;
+
+ if (filter->params.pes.rec_chunk_size >=
+ filter->buffer.size)
+ filter->params.pes.rec_chunk_size =
+ filter->buffer.size >> 2;
+
+ /* Align rec-chunk based on output format */
+ if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
+ tsp_size = 188;
+ else
+ tsp_size = 192;
+
+ filter->params.pes.rec_chunk_size /= tsp_size;
+ filter->params.pes.rec_chunk_size *= tsp_size;
+
+ if (filter->params.pes.output == DMX_OUT_TS_TAP)
+ dmxdev->dvr_output_events.data_read_event_masked =
+ dmxdev->dvr_output_events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_REC_CHUNK;
+ else if (filter->params.pes.output == DMX_OUT_TAP)
+ filter->events.data_read_event_masked =
+ filter->events.event_mask.disable_mask &
+ DMX_EVENT_NEW_PES;
+ else
+ filter->events.data_read_event_masked = 1;
+
+ ret = 0;
list_for_each_entry(feed, &filter->feed.ts, next) {
ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
- if (ret < 0) {
- dvb_dmxdev_filter_stop(filter);
- return ret;
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ break;
+
+ /* cleanup feeds that were started before the failure */
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (!feed->ts)
+ continue;
+ feed->ts->stop_filtering(feed->ts);
+ dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts);
+ feed->ts = NULL;
+
+ if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+ filter->dev->dvr_feeds_count--;
+ if (!filter->dev->dvr_feeds_count)
+ filter->dev->dvr_feed = NULL;
}
}
- break;
+ return ret;
+
default:
return -EINVAL;
}
dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
+
+ if ((filter->type == DMXDEV_TYPE_PES) &&
+ !list_empty(&filter->insertion_buffers)) {
+ struct ts_insertion_buffer *ts_buffer;
+
+ feed = list_first_entry(&filter->feed.ts,
+ struct dmxdev_feed, next);
+
+ ret = 0;
+ if (feed->ts->ts_insertion_init)
+ ret = feed->ts->ts_insertion_init(feed->ts);
+ if (!ret) {
+ list_for_each_entry(ts_buffer,
+ &filter->insertion_buffers, next)
+ dvb_dmxdev_queue_ts_insertion(
+ ts_buffer);
+ } else {
+ pr_err("%s: ts_insertion_init failed, err %d\n",
+ __func__, ret);
+ }
+ }
+
return 0;
}
@@ -747,11 +3678,28 @@
mutex_init(&dmxdevfilter->mutex);
file->private_data = dmxdevfilter;
+ memset(&dmxdevfilter->decoder_buffers,
+ 0,
+ sizeof(dmxdevfilter->decoder_buffers));
+ dmxdevfilter->decoder_buffers.buffers_size =
+ DMX_DEFAULT_DECODER_BUFFER_SIZE;
+ dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
+ dmxdevfilter->priv_buff_handle = NULL;
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+ dvb_dmxdev_flush_events(&dmxdevfilter->events);
+ dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
+ dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
+ dmxdevfilter->events.event_mask.wakeup_threshold = 1;
+
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
init_timer(&dmxdevfilter->timer);
+ dmxdevfilter->sec_mode.is_secured = 0;
+
+ INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers);
+
+ dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
dvbdev->users++;
mutex_unlock(&dmxdev->mutex);
@@ -761,23 +3709,40 @@
static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter)
{
+ struct ts_insertion_buffer *ts_buffer, *tmp;
+
mutex_lock(&dmxdev->mutex);
mutex_lock(&dmxdevfilter->mutex);
dvb_dmxdev_filter_stop(dmxdevfilter);
dvb_dmxdev_filter_reset(dmxdevfilter);
+ list_for_each_entry_safe(ts_buffer, tmp,
+ &dmxdevfilter->insertion_buffers, next) {
+ list_del(&ts_buffer->next);
+ vfree(ts_buffer->buffer);
+ vfree(ts_buffer);
+ }
+
if (dmxdevfilter->buffer.data) {
void *mem = dmxdevfilter->buffer.data;
spin_lock_irq(&dmxdev->lock);
dmxdevfilter->buffer.data = NULL;
spin_unlock_irq(&dmxdev->lock);
- vfree(mem);
+ if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)
+ vfree(mem);
+ }
+
+ if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
+ dmxdevfilter->priv_buff_handle) {
+ dmxdev->demux->unmap_buffer(dmxdev->demux,
+ dmxdevfilter->priv_buff_handle);
+ dmxdevfilter->priv_buff_handle = NULL;
}
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
- wake_up(&dmxdevfilter->buffer.queue);
+ wake_up_all(&dmxdevfilter->buffer.queue);
mutex_unlock(&dmxdevfilter->mutex);
mutex_unlock(&dmxdev->mutex);
return 0;
@@ -795,6 +3760,7 @@
struct dmxdev_filter *filter, u16 pid)
{
struct dmxdev_feed *feed;
+ int ret = 0;
if ((filter->type != DMXDEV_TYPE_PES) ||
(filter->state < DMXDEV_STATE_SET))
@@ -810,28 +3776,45 @@
return -ENOMEM;
feed->pid = pid;
- list_add(&feed->next, &filter->feed.ts);
+ feed->cipher_ops.operations_count = 0;
+ feed->idx_params.enable = 0;
if (filter->state >= DMXDEV_STATE_GO)
- return dvb_dmxdev_start_feed(dmxdev, filter, feed);
+ ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
- return 0;
+ if (!ret)
+ list_add(&feed->next, &filter->feed.ts);
+ else
+ kfree(feed);
+
+ return ret;
}
static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
struct dmxdev_filter *filter, u16 pid)
{
+ int feed_count;
struct dmxdev_feed *feed, *tmp;
if ((filter->type != DMXDEV_TYPE_PES) ||
(filter->state < DMXDEV_STATE_SET))
return -EINVAL;
+ feed_count = 0;
+ list_for_each_entry(tmp, &filter->feed.ts, next)
+ feed_count++;
+
+ if (feed_count <= 1)
+ return -EINVAL;
+
list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
- if ((feed->pid == pid) && (feed->ts != NULL)) {
- feed->ts->stop_filtering(feed->ts);
- filter->dev->demux->release_ts_feed(filter->dev->demux,
- feed->ts);
+ if (feed->pid == pid) {
+ if (feed->ts != NULL) {
+ feed->ts->stop_filtering(feed->ts);
+ filter->dev->demux->release_ts_feed(
+ filter->dev->demux,
+ feed->ts);
+ }
list_del(&feed->next);
kfree(feed);
}
@@ -844,7 +3827,7 @@
struct dmxdev_filter *dmxdevfilter,
struct dmx_sct_filter_params *params)
{
- dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+ pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
__func__, params->pid, params->flags, params->timeout);
dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -853,6 +3836,7 @@
memcpy(&dmxdevfilter->params.sec,
params, sizeof(struct dmx_sct_filter_params));
invert_mode(&dmxdevfilter->params.sec.filter);
+ dmxdevfilter->feed.sec.cipher_ops.operations_count = 0;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
if (params->flags & DMX_IMMEDIATE_START)
@@ -861,6 +3845,99 @@
return 0;
}
+static int dvb_dmxdev_set_secure_mode(
+ struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_secure_mode *sec_mode)
+{
+ if (!dmxdev || !filter || !sec_mode)
+ return -EINVAL;
+
+ if (filter->state == DMXDEV_STATE_GO) {
+ pr_err("%s: invalid filter state\n", __func__);
+ return -EBUSY;
+ }
+
+ pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured);
+
+ filter->sec_mode = *sec_mode;
+
+ return 0;
+}
+
+static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dmxdev_feed *feed;
+ struct dmxdev_feed *ts_feed = NULL;
+ struct dmxdev_sec_feed *sec_feed = NULL;
+ struct dmx_caps caps;
+
+ if (!dmxdev || !dmxdev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+
+ if (!filter || !cipher_ops ||
+ (cipher_ops->operations_count > caps.num_cipher_ops) ||
+ (cipher_ops->operations_count >
+ DMX_MAX_CIPHER_OPERATIONS_COUNT))
+ return -EINVAL;
+
+ pr_debug("%s: pid=%d, operations=%d\n", __func__,
+ cipher_ops->pid, cipher_ops->operations_count);
+
+ if (filter->state < DMXDEV_STATE_SET ||
+ filter->state > DMXDEV_STATE_GO) {
+ pr_err("%s: invalid filter state\n", __func__);
+ return -EPERM;
+ }
+
+ if (!filter->sec_mode.is_secured && cipher_ops->operations_count) {
+ pr_err("%s: secure mode must be enabled to set cipher ops\n",
+ __func__);
+ return -EPERM;
+ }
+
+ switch (filter->type) {
+ case DMXDEV_TYPE_PES:
+ list_for_each_entry(feed, &filter->feed.ts, next) {
+ if (feed->pid == cipher_ops->pid) {
+ ts_feed = feed;
+ ts_feed->cipher_ops = *cipher_ops;
+ if (filter->state == DMXDEV_STATE_GO &&
+ ts_feed->ts->set_cipher_ops)
+ ts_feed->ts->set_cipher_ops(
+ ts_feed->ts, cipher_ops);
+ break;
+ }
+ }
+ break;
+ case DMXDEV_TYPE_SEC:
+ if (filter->params.sec.pid == cipher_ops->pid) {
+ sec_feed = &filter->feed.sec;
+ sec_feed->cipher_ops = *cipher_ops;
+ if (filter->state == DMXDEV_STATE_GO &&
+ sec_feed->feed->set_cipher_ops)
+ sec_feed->feed->set_cipher_ops(sec_feed->feed,
+ cipher_ops);
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!ts_feed && !sec_feed) {
+ pr_err("%s: pid %d is undefined for this filter\n",
+ __func__, cipher_ops->pid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
struct dmxdev_filter *dmxdevfilter,
struct dmx_pes_filter_params *params)
@@ -891,6 +3968,55 @@
return 0;
}
+static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
+ struct dmxdev_filter *filter,
+ struct dmx_decoder_buffers *buffs)
+{
+ int i;
+ struct dmx_decoder_buffers *dec_buffs;
+ struct dmx_caps caps;
+
+ if (!dmxdev || !filter || !buffs)
+ return -EINVAL;
+
+ dec_buffs = &filter->decoder_buffers;
+ if (!dmxdev->demux->get_caps)
+ return -EINVAL;
+
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size,
+ caps.decoder.max_size, caps.decoder.size_alignment))
+ return -EINVAL;
+
+ if ((buffs->buffers_size == 0) ||
+ (buffs->is_linear &&
+ ((buffs->buffers_num <= 1) ||
+ (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM))))
+ return -EINVAL;
+
+ if (buffs->buffers_num == 0) {
+ /* Internal mode - linear buffers not supported in this mode */
+ if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
+ buffs->is_linear)
+ return -EINVAL;
+ } else {
+ /* External buffer(s) mode */
+ if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
+ buffs->buffers_num > 1) ||
+ !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
+ buffs->buffers_num > caps.decoder.max_buffer_num)
+ return -EINVAL;
+
+ dec_buffs->is_linear = buffs->is_linear;
+ dec_buffs->buffers_num = buffs->buffers_num;
+ dec_buffs->buffers_size = buffs->buffers_size;
+ for (i = 0; i < dec_buffs->buffers_num; i++)
+ dec_buffs->handles[i] = buffs->handles[i];
+ }
+
+ return 0;
+}
+
static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -902,7 +4028,7 @@
hcount = 3 + dfil->todo;
if (hcount > count)
hcount = count;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
file->f_flags & O_NONBLOCK,
buf, hcount, ppos);
if (result < 0) {
@@ -923,7 +4049,7 @@
}
if (count > dfil->todo)
count = dfil->todo;
- result = dvb_dmxdev_buffer_read(&dfil->buffer,
+ result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
file->f_flags & O_NONBLOCK,
buf, count, ppos);
if (result < 0)
@@ -942,12 +4068,36 @@
if (mutex_lock_interruptible(&dmxdevfilter->mutex))
return -ERESTARTSYS;
+ if (dmxdevfilter->eos_state &&
+ dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ return 0;
+ }
+
if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
else
- ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
- file->f_flags & O_NONBLOCK,
- buf, count, ppos);
+ ret = dvb_dmxdev_buffer_read(dmxdevfilter,
+ &dmxdevfilter->buffer,
+ file->f_flags & O_NONBLOCK,
+ buf, count, ppos);
+
+ if (ret > 0) {
+ dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
+ spin_lock_irq(&dmxdevfilter->dev->lock);
+ dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+ spin_unlock_irq(&dmxdevfilter->dev->lock);
+
+ /*
+ * in PULL mode, we might be stalling on
+ * event queue, so need to wake-up waiters
+ */
+ if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
+ wake_up_all(&dmxdevfilter->buffer.queue);
+ } else if (ret == -EOVERFLOW) {
+ dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
+ &dmxdevfilter->buffer);
+ }
mutex_unlock(&dmxdevfilter->mutex);
return ret;
@@ -1013,6 +4163,43 @@
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_BUFFER_MODE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter,
+ *(enum dmx_buffer_mode *)parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_BUFFER_STATUS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_RELEASE_DATA:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_release_data(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
case DMX_GET_PES_PIDS:
if (!dmxdev->demux->get_pes_pids) {
ret = -EINVAL;
@@ -1021,9 +4208,6 @@
dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
break;
-#if 0
- /* Not used upstream and never documented */
-
case DMX_GET_CAPS:
if (!dmxdev->demux->get_caps) {
ret = -EINVAL;
@@ -1033,13 +4217,65 @@
break;
case DMX_SET_SOURCE:
- if (!dmxdev->demux->set_source) {
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_source(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_TS_PACKET_FORMAT:
+ if (!dmxdev->demux->set_tsp_format) {
ret = -EINVAL;
break;
}
- ret = dmxdev->demux->set_source(dmxdev->demux, parg);
+
+ if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = dmxdev->demux->set_tsp_format(
+ dmxdev->demux,
+ *(enum dmx_tsp_format_t *)parg);
break;
-#endif
+
+ case DMX_SET_TS_OUT_FORMAT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
+ *(enum dmx_tsp_format_t *)parg);
+
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_DECODER_BUFFER_SIZE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_PLAYBACK_MODE:
+ ret = dvb_dmxdev_set_playback_mode(
+ dmxdevfilter,
+ *(enum dmx_playback_mode_t *)parg);
+ break;
+
+ case DMX_GET_EVENT:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
@@ -1070,8 +4306,109 @@
mutex_unlock(&dmxdevfilter->mutex);
break;
+ case DMX_SET_DECODER_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_SECURE_MODE:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_CIPHER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_REUSE_DECODER_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_EVENTS_MASK:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_INDEXING_PARAMS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_SET_TS_INSERTION:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_ABORT_TS_INSERTION:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_GET_SCRAMBLING_BITS:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
+ case DMX_FLUSH_BUFFER:
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+ ret = dvb_dmxdev_flush_buffer(dmxdevfilter);
+ mutex_unlock(&dmxdevfilter->mutex);
+ break;
+
default:
- ret = -EINVAL;
+ pr_err("%s: unknown ioctl code (0x%x)\n",
+ __func__, cmd);
+ ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&dmxdev->mutex);
@@ -1084,13 +4421,78 @@
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
+#ifdef CONFIG_COMPAT
+
+struct dmx_set_ts_insertion32 {
+ __u32 identifier;
+ __u32 repetition_time;
+ compat_uptr_t ts_packets;
+ compat_size_t size;
+};
+
+static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dmx_set_ts_insertion32 dmx_ts_insert32;
+ struct dmx_set_ts_insertion dmx_ts_insert;
+
+ ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg,
+ sizeof(dmx_ts_insert32));
+ if (ret) {
+ pr_err(
+ "%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
+
+ memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert));
+ dmx_ts_insert.identifier = dmx_ts_insert32.identifier;
+ dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time;
+ dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets);
+ dmx_ts_insert.size = dmx_ts_insert32.size;
+
+ ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert);
+
+ return ret;
+}
+
+#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32)
+
+/*
+ * compat ioctl is called whenever compatibility is required, i.e when a 32bit
+ * process calls an ioctl for a 64bit kernel.
+ */
+static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+
+ switch (cmd) {
+ case DMX_SET_TS_INSERTION32:
+ ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg);
+ break;
+ case DMX_SET_TS_INSERTION:
+ pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n",
+ __func__, DMX_SET_TS_INSERTION);
+ ret = -ENOIOCTLCMD;
+ break;
+ default:
+ /* use regular ioctl */
+ ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+ }
+
+ return ret;
+}
+#endif
+
static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
unsigned int mask = 0;
- if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
- return POLLERR;
+ if (!dmxdevfilter)
+ return -EINVAL;
poll_wait(file, &dmxdevfilter->buffer.queue, wait);
@@ -1099,20 +4501,80 @@
dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
return 0;
- if (dmxdevfilter->buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ if (dmxdevfilter->buffer.error) {
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
+ if (dmxdevfilter->buffer.error == -EOVERFLOW)
+ mask |= POLLPRI;
+ }
if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdevfilter->events.wakeup_events_counter >=
+ dmxdevfilter->events.event_mask.wakeup_threshold)
+ mask |= POLLPRI;
return mask;
}
+static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dmxdev_filter *dmxdevfilter = filp->private_data;
+ struct dmxdev *dmxdev = dmxdevfilter->dev;
+ int ret;
+ int vma_size;
+ int buffer_size;
+
+ vma_size = vma->vm_end - vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
+ mutex_unlock(&dmxdev->mutex);
+ return -ERESTARTSYS;
+ }
+
+ if ((!dmxdevfilter->buffer.data) ||
+ (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ /* Make sure requested mapping is not larger than buffer size */
+ buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1);
+ buffer_size = buffer_size & ~(PAGE_SIZE-1);
+
+ if (vma_size != buffer_size) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0);
+ if (ret) {
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTDUMP;
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ mutex_unlock(&dmxdevfilter->mutex);
+ mutex_unlock(&dmxdev->mutex);
+
+ return 0;
+}
+
static int dvb_demux_release(struct inode *inode, struct file *file)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
struct dmxdev *dmxdev = dmxdevfilter->dev;
-
int ret;
ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
@@ -1120,6 +4582,8 @@
mutex_lock(&dmxdev->mutex);
dmxdev->dvbdev->users--;
if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
+ fops_put(file->f_op);
+ file->f_op = NULL;
mutex_unlock(&dmxdev->mutex);
wake_up(&dmxdev->dvbdev->wait_queue);
} else
@@ -1136,6 +4600,10 @@
.release = dvb_demux_release,
.poll = dvb_demux_poll,
.llseek = default_llseek,
+ .mmap = dvb_demux_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dvb_demux_compat_ioctl,
+#endif
};
static const struct dvb_device dvbdev_demux = {
@@ -1161,11 +4629,44 @@
switch (cmd) {
case DMX_SET_BUFFER_SIZE:
- ret = dvb_dvr_set_buffer_size(dmxdev, arg);
+ ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_SET_BUFFER_MODE:
+ ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags,
+ *(enum dmx_buffer_mode *)parg);
+ break;
+
+ case DMX_SET_BUFFER:
+ ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_GET_BUFFER_STATUS:
+ ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_RELEASE_DATA:
+ ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_FEED_DATA:
+ ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
+ break;
+
+ case DMX_GET_EVENT:
+ ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_PUSH_OOB_COMMAND:
+ ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
+ break;
+
+ case DMX_FLUSH_BUFFER:
+ ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags);
break;
default:
- ret = -EINVAL;
+ ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&dmxdev->mutex);
@@ -1173,32 +4674,50 @@
}
static long dvb_dvr_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
+#ifdef CONFIG_COMPAT
+static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+}
+#endif
+
static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
unsigned int mask = 0;
- dprintk("function : %s\n", __func__);
-
- if (dmxdev->exit)
- return POLLERR;
-
- poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+ pr_debug("function : %s\n", __func__);
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
- if (dmxdev->dvr_buffer.error)
- mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR);
+ poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
+
+ if (dmxdev->dvr_buffer.error) {
+ mask |= (POLLIN | POLLRDNORM | POLLERR);
+ if (dmxdev->dvr_buffer.error == -EOVERFLOW)
+ mask |= POLLPRI;
+ }
if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
- mask |= (POLLIN | POLLRDNORM | POLLPRI);
- } else
- mask |= (POLLOUT | POLLWRNORM | POLLPRI);
+ mask |= (POLLIN | POLLRDNORM);
+
+ if (dmxdev->dvr_output_events.wakeup_events_counter >=
+ dmxdev->dvr_output_events.event_mask.wakeup_threshold)
+ mask |= POLLPRI;
+ } else {
+ poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
+ if (dmxdev->dvr_input_buffer.error)
+ mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR);
+
+ if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer))
+ mask |= (POLLOUT | POLLRDNORM | POLLPRI);
+ }
return mask;
}
@@ -1207,7 +4726,11 @@
.owner = THIS_MODULE,
.read = dvb_dvr_read,
.write = dvb_dvr_write,
+ .mmap = dvb_dvr_mmap,
.unlocked_ioctl = dvb_dvr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dvb_dvr_compat_ioctl,
+#endif
.open = dvb_dvr_open,
.release = dvb_dvr_release,
.poll = dvb_dvr_poll,
@@ -1223,9 +4746,94 @@
#endif
.fops = &dvb_dvr_fops
};
+
+
+/**
+ * debugfs service to print active filters information.
+ */
+static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
+{
+ int i;
+ struct dmxdev *dmxdev = s->private;
+ struct dmxdev_filter *filter;
+ int active_count = 0;
+ struct dmx_buffer_status buffer_status;
+ struct dmx_scrambling_bits scrambling_bits;
+ static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
+ int ret;
+
+ if (!dmxdev)
+ return 0;
+
+ for (i = 0; i < dmxdev->filternum; i++) {
+ filter = &dmxdev->filter[i];
+ if (filter->state >= DMXDEV_STATE_GO) {
+ active_count++;
+
+ seq_printf(s, "filter_%02d - ", i);
+
+ if (filter->type == DMXDEV_TYPE_SEC) {
+ seq_puts(s, "type: SEC, ");
+ seq_printf(s, "PID %04d ",
+ filter->params.sec.pid);
+ scrambling_bits.pid = filter->params.sec.pid;
+ } else {
+ seq_printf(s, "type: %s, ",
+ pes_feeds[filter->params.pes.output]);
+ seq_printf(s, "PID: %04d ",
+ filter->params.pes.pid);
+ scrambling_bits.pid = filter->params.pes.pid;
+ }
+
+ dvb_dmxdev_get_scrambling_bits(filter,
+ &scrambling_bits);
+
+ if (filter->type == DMXDEV_TYPE_PES &&
+ filter->params.pes.output == DMX_OUT_TS_TAP)
+ ret = dvb_dvr_get_buffer_status(dmxdev,
+ O_RDONLY, &buffer_status);
+ else
+ ret = dvb_dmxdev_get_buffer_status(filter,
+ &buffer_status);
+ if (!ret) {
+ seq_printf(s, "size: %08d, ",
+ buffer_status.size);
+ seq_printf(s, "fullness: %08d, ",
+ buffer_status.fullness);
+ seq_printf(s, "error: %d, ",
+ buffer_status.error);
+ }
+
+ seq_printf(s, "scramble: %d, ",
+ scrambling_bits.value);
+ seq_printf(s, "secured: %d\n",
+ filter->sec_mode.is_secured);
+ }
+ }
+
+ if (!active_count)
+ seq_puts(s, "No active filters\n");
+
+ return 0;
+}
+
+static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
+}
+
+static const struct file_operations dbgfs_filters_fops = {
+ .open = dvb_dmxdev_dbgfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
{
int i;
+ struct dmx_caps caps;
if (dmxdev->demux->open(dmxdev->demux) < 0)
return -EUSERS;
@@ -1234,8 +4842,12 @@
if (!dmxdev->filter)
return -ENOMEM;
+ dmxdev->playback_mode = DMX_PB_MODE_PUSH;
+ dmxdev->demux->dvr_input_protected = 0;
+
mutex_init(&dmxdev->mutex);
spin_lock_init(&dmxdev->lock);
+ spin_lock_init(&dmxdev->dvr_in_lock);
for (i = 0; i < dmxdev->filternum; i++) {
dmxdev->filter[i].dev = dmxdev;
dmxdev->filter[i].buffer.data = NULL;
@@ -1244,11 +4856,24 @@
}
dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
- DVB_DEVICE_DEMUX, dmxdev->filternum);
+ DVB_DEVICE_DEMUX, 0);
dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
- dmxdev, DVB_DEVICE_DVR, dmxdev->filternum);
+ dmxdev, DVB_DEVICE_DVR, 0);
dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
+ dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
+
+ /* Disable auto buffer flushing if plugin does not allow it */
+ if (dmxdev->demux->get_caps) {
+ dmxdev->demux->get_caps(dmxdev->demux, &caps);
+ if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH))
+ overflow_auto_flush = 0;
+ }
+
+ if (dmxdev->demux->debugfs_demux_dir)
+ debugfs_create_file("filters", 0444,
+ dmxdev->demux->debugfs_demux_dir, dmxdev,
+ &dbgfs_filters_fops);
return 0;
}
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 48c6cf9..ad007f4 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -33,7 +33,7 @@
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-
+#include <linux/kthread.h>
#include <linux/dvb/dmx.h>
#include "dvbdev.h"
@@ -57,10 +57,87 @@
struct dmxdev_feed {
u16 pid;
+ struct dmx_indexing_params idx_params;
+ struct dmx_cipher_operations cipher_ops;
struct dmx_ts_feed *ts;
struct list_head next;
};
+struct dmxdev_sec_feed {
+ struct dmx_section_feed *feed;
+ struct dmx_cipher_operations cipher_ops;
+};
+
+struct dmxdev_events_queue {
+ /*
+ * indices used to manage events queue.
+ * read_index advanced when relevant data is read
+ * from the buffer.
+ * notified_index is the index from which next events
+ * are returned.
+ * read_index <= notified_index <= write_index
+ *
+ * If user reads the data without getting the respective
+ * event first, the read/notified indices are updated
+ * automatically to reflect the actual data that exist
+ * in the buffer.
+ */
+ u32 read_index;
+ u32 write_index;
+ u32 notified_index;
+
+ /* Bytes read by user without having respective event in the queue */
+ u32 bytes_read_no_event;
+
+ /* internal tracking of PES and recording events */
+ u32 current_event_data_size;
+ u32 current_event_start_offset;
+
+ /* current setting of the events masking */
+ struct dmx_events_mask event_mask;
+
+ /*
+ * indicates if an event used for data-reading from demux
+ * filter is enabled or not. These are events on which
+ * user may wait for before calling read() on the demux filter.
+ */
+ int data_read_event_masked;
+
+ /*
+ * holds the current number of pending events in the
+ * events queue that are considered as a wake-up source
+ */
+ u32 wakeup_events_counter;
+
+ struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE];
+};
+
+#define DMX_MIN_INSERTION_REPETITION_TIME 25 /* in msec */
+struct ts_insertion_buffer {
+ /* work scheduled for insertion of this buffer */
+ struct delayed_work dwork;
+
+ struct list_head next;
+
+ /* buffer holding TS packets for insertion */
+ char *buffer;
+
+ /* buffer size */
+ size_t size;
+
+ /* buffer ID from user */
+ u32 identifier;
+
+ /* repetition time for the buffer insertion */
+ u32 repetition_time;
+
+ /* the recording filter to which this buffer belongs */
+ struct dmxdev_filter *dmxdevfilter;
+
+ /* indication whether insertion should be aborted */
+ int abort;
+};
+
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -69,7 +146,7 @@
union {
/* list of TS and PES feeds (struct dmxdev_feed) */
struct list_head ts;
- struct dmx_section_feed *sec;
+ struct dmxdev_sec_feed sec;
} feed;
union {
@@ -77,19 +154,37 @@
struct dmx_pes_filter_params pes;
} params;
+ struct dmxdev_events_queue events;
+
enum dmxdev_type type;
enum dmxdev_state state;
struct dmxdev *dev;
struct dvb_ringbuffer buffer;
+ void *priv_buff_handle;
+ enum dmx_buffer_mode buffer_mode;
struct mutex mutex;
+ /* for recording output */
+ enum dmx_tsp_format_t dmx_tsp_format;
+ u32 rec_chunk_size;
+
+ /* list of buffers used for insertion (struct ts_insertion_buffer) */
+ struct list_head insertion_buffers;
+
+ /* End-of-stream indication has been received */
+ int eos_state;
+
/* only for sections */
struct timer_list timer;
int todo;
u8 secheader[3];
-};
+ struct dmx_secure_mode sec_mode;
+
+ /* Decoder buffer(s) related */
+ struct dmx_decoder_buffers decoder_buffers;
+};
struct dmxdev {
struct dvb_device *dvbdev;
@@ -100,18 +195,52 @@
int filternum;
int capabilities;
+#define DMXDEV_CAP_DUPLEX 0x01
+
+ enum dmx_playback_mode_t playback_mode;
+ dmx_source_t source;
unsigned int exit:1;
-#define DMXDEV_CAP_DUPLEX 1
+ unsigned int dvr_in_exit:1;
+ unsigned int dvr_processing_input:1;
+
struct dmx_frontend *dvr_orig_fe;
struct dvb_ringbuffer dvr_buffer;
+ void *dvr_priv_buff_handle;
+ enum dmx_buffer_mode dvr_buffer_mode;
+ struct dmxdev_events_queue dvr_output_events;
+ struct dmxdev_filter *dvr_feed;
+ int dvr_feeds_count;
+
+ struct dvb_ringbuffer dvr_input_buffer;
+ enum dmx_buffer_mode dvr_input_buffer_mode;
+ struct task_struct *dvr_input_thread;
+ /* DVR commands (data feed / OOB command) queue */
+ struct dvb_ringbuffer dvr_cmd_buffer;
+
#define DVR_BUFFER_SIZE (10*188*1024)
struct mutex mutex;
spinlock_t lock;
+ spinlock_t dvr_in_lock;
};
+enum dvr_cmd {
+ DVR_DATA_FEED_CMD,
+ DVR_OOB_CMD
+};
+
+struct dvr_command {
+ enum dvr_cmd type;
+ union {
+ struct dmx_oob_command oobcmd;
+ size_t data_feed_count;
+ } cmd;
+};
+
+#define DVR_CMDS_BUFFER_SIZE (sizeof(struct dvr_command)*500)
+
int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
void dvb_dmxdev_release(struct dmxdev *dmxdev);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index a0cf7b0..474684f 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -55,11 +55,151 @@
MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
"when set to 0, drop packets with the TEI bit set (1 by default)");
+/* counter advancing for each new dvb-demux device */
+static int dvb_demux_index;
+
+static int dvb_demux_performancecheck;
+module_param(dvb_demux_performancecheck, int, 0644);
+MODULE_PARM_DESC(dvb_demux_performancecheck,
+ "enable transport stream performance check, reported through debugfs");
+
#define dprintk_tscheck(x...) do { \
if (dvb_demux_tscheck && printk_ratelimit()) \
printk(x); \
} while (0)
+static const struct dvb_dmx_video_patterns mpeg2_seq_hdr = {
+ {0x00, 0x00, 0x01, 0xB3},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_gop = {
+ {0x00, 0x00, 0x01, 0xB8},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_MPEG_GOP
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_iframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x08},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_I_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_pframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x10},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_P_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns mpeg2_bframe = {
+ {0x00, 0x00, 0x01, 0x00, 0x00, 0x18},
+ {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38},
+ 6,
+ DMX_IDX_MPEG_B_FRAME_START
+};
+
+static const struct dvb_dmx_video_patterns h264_sps = {
+ {0x00, 0x00, 0x01, 0x07},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SPS
+};
+
+static const struct dvb_dmx_video_patterns h264_pps = {
+ {0x00, 0x00, 0x01, 0x08},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_PPS
+};
+
+static const struct dvb_dmx_video_patterns h264_idr = {
+ {0x00, 0x00, 0x01, 0x05, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_IDR_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_idr = {
+ {0x00, 0x00, 0x01, 0x01, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x1F, 0x80},
+ 5,
+ DMX_IDX_H264_NON_IDR_START
+};
+
+/*
+ * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ * I-Slice NAL idc = 3, NAL type = 5, 01100101 mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_idr_islice = {
+ {0x00, 0x00, 0x01, 0x65, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+ 5,
+ DMX_IDX_H264_IDR_ISLICE_START
+};
+
+/*
+ * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ * P-Slice NAL idc = 2, NAL type = 1, 01000001 mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_non_idr_pslice = {
+ {0x00, 0x00, 0x01, 0x41, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+ 5,
+ DMX_IDX_H264_NON_IDR_PSLICE_START
+};
+
+/*
+ * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits)
+ * B-Slice NAL idc = 0, NAL type = 1, 00000001 mask 0x7F
+ */
+static const struct dvb_dmx_video_patterns h264_non_idr_bslice = {
+ {0x00, 0x00, 0x01, 0x01, 0x80},
+ {0xFF, 0xFF, 0xFF, 0x7F, 0x80},
+ 5,
+ DMX_IDX_H264_NON_IDR_BSLICE_START
+};
+
+static const struct dvb_dmx_video_patterns h264_non_access_unit_del = {
+ {0x00, 0x00, 0x01, 0x09},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_ACCESS_UNIT_DEL
+};
+
+static const struct dvb_dmx_video_patterns h264_non_sei = {
+ {0x00, 0x00, 0x01, 0x06},
+ {0xFF, 0xFF, 0xFF, 0x1F},
+ 4,
+ DMX_IDX_H264_SEI
+};
+
+static const struct dvb_dmx_video_patterns vc1_seq_hdr = {
+ {0x00, 0x00, 0x01, 0x0F},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_SEQ_HEADER
+};
+
+static const struct dvb_dmx_video_patterns vc1_entry_point = {
+ {0x00, 0x00, 0x01, 0x0E},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_ENTRY_POINT
+};
+
+static const struct dvb_dmx_video_patterns vc1_frame = {
+ {0x00, 0x00, 0x01, 0x0D},
+ {0xFF, 0xFF, 0xFF, 0xFF},
+ 4,
+ DMX_IDX_VC1_FRAME_START
+};
+
+
/******************************************************************************
* static inlined helper functions
******************************************************************************/
@@ -69,9 +209,9 @@
return 3 + ((buf[1] & 0x0f) << 8) + buf[2];
}
-static inline u16 ts_pid(const u8 *buf)
+static inline u8 ts_scrambling_ctrl(const u8 *buf)
{
- return ((buf[1] & 0x1f) << 8) + buf[2];
+ return (buf[3] >> 6) & 0x3;
}
static inline u8 payload(const u8 *tsp)
@@ -100,37 +240,355 @@
memcpy(d, s, len);
}
+static u32 dvb_dmx_calc_time_delta(ktime_t past_time)
+{
+ ktime_t curr_time = ktime_get();
+ s64 delta_time_us = ktime_us_delta(curr_time, past_time);
+
+ return (u32)delta_time_us;
+}
+
/******************************************************************************
* Software filter functions
******************************************************************************/
+/*
+ * Check if two patterns are identical, taking mask into consideration.
+ * @pattern1: the first byte pattern to compare.
+ * @pattern2: the second byte pattern to compare.
+ * @mask: the bit mask to use.
+ * @pattern_size: the length of both patterns and the mask, in bytes.
+ *
+ * Return: 1 if patterns match, 0 otherwise.
+ */
+static inline int dvb_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2,
+ const u8 *mask, size_t pattern_size)
+{
+ int i;
+
+ /*
+ * Assumption: it is OK to access pattern1, pattern2 and mask.
+ * This function performs no sanity checks to keep things fast.
+ */
+
+ for (i = 0; i < pattern_size; i++)
+ if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i]))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * dvb_dmx_video_pattern_search -
+ * search for framing patterns in a given buffer.
+ *
+ * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01.
+ * If this string is found, go over all the given patterns (all must start
+ * with this string) and search for their ending in the buffer.
+ *
+ * Assumption: the patterns we look for do not spread over more than two
+ * buffers.
+ *
+ * @paterns: the full patterns information to look for.
+ * @patterns_num: the number of patterns to look for.
+ * @buf: the buffer to search.
+ * @buf_size: the size of the buffer to search. we search the entire buffer.
+ * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started at the last buffer.
+ * Updated in this function for use in the next lookup.
+ * @results: lookup results (offset, type, used_prefix_size) per found pattern,
+ * up to DVB_DMX_MAX_FOUND_PATTERNS.
+ *
+ * Return:
+ * Number of patterns found (up to DVB_DMX_MAX_FOUND_PATTERNS).
+ * 0 if pattern was not found.
+ * error value on failure.
+ */
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf,
+ size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results)
+{
+ int i, j;
+ unsigned int current_size;
+ u32 prefix;
+ int found = 0;
+ int start_offset = 0;
+ /* the starting common substring to look for */
+ u8 string[] = {0x00, 0x00, 0x01};
+ /* the mask for the starting string */
+ u8 string_mask[] = {0xFF, 0xFF, 0xFF};
+ /* the size of the starting string (in bytes) */
+ size_t string_size = 3;
+
+ if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL))
+ return -EINVAL;
+
+ memset(results, 0, sizeof(struct dvb_dmx_video_patterns_results));
+
+ /*
+ * handle prefix - disregard string, simply check all patterns,
+ * looking for a matching suffix at the very beginning of the buffer.
+ */
+ for (j = 0; (j < patterns_num) && !found; j++) {
+ prefix = prefix_size_masks->size_mask[j];
+ current_size = 32;
+ while (prefix) {
+ if (prefix & (0x1 << (current_size - 1))) {
+ /*
+ * check that we don't look further
+ * than buf_size boundary
+ */
+ if ((int)(patterns[j]->size - current_size) >
+ buf_size)
+ break;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + current_size),
+ buf, (patterns[j]->mask + current_size),
+ (patterns[j]->size - current_size))) {
+
+ /*
+ * pattern found using prefix at the
+ * very beginning of the buffer, so
+ * offset is 0, but we already zeroed
+ * everything in the beginning of the
+ * function. that's why the next line
+ * is commented.
+ */
+ /* results->info[found].offset = 0; */
+ results->info[found].type =
+ patterns[j]->type;
+ results->info[found].used_prefix_size =
+ current_size;
+ found++;
+ /*
+ * save offset to start looking from
+ * in the buffer, to avoid reusing the
+ * data of a pattern we already found.
+ */
+ start_offset = (patterns[j]->size -
+ current_size);
+
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * we don't want to search for the same
+ * pattern with several possible prefix
+ * sizes if we have already found it,
+ * so we break from the inner loop.
+ * since we incremented 'found', we
+ * will not search for additional
+ * patterns using a prefix - that would
+ * imply ambiguous patterns where one
+ * pattern can be included in another.
+ * the for loop will exit.
+ */
+ break;
+ }
+ }
+ prefix &= ~(0x1 << (current_size - 1));
+ current_size--;
+ }
+ }
+
+ /*
+ * Search buffer for entire pattern, starting with the string.
+ * Note the external for loop does not execute if buf_size is
+ * smaller than string_size (the cast to int is required, since
+ * size_t is unsigned).
+ */
+ for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) {
+ if (dvb_dmx_patterns_match(string, (buf + i), string_mask,
+ string_size)) {
+ /* now search for patterns: */
+ for (j = 0; j < patterns_num; j++) {
+ /* avoid overflow to next buffer */
+ if ((i + patterns[j]->size) > buf_size)
+ continue;
+
+ if (dvb_dmx_patterns_match(
+ (patterns[j]->pattern + string_size),
+ (buf + i + string_size),
+ (patterns[j]->mask + string_size),
+ (patterns[j]->size - string_size))) {
+
+ results->info[found].offset = i;
+ results->info[found].type =
+ patterns[j]->type;
+ /*
+ * save offset to start next prefix
+ * lookup, to avoid reusing the data
+ * of any pattern we already found.
+ */
+ if ((i + patterns[j]->size) >
+ start_offset)
+ start_offset = (i +
+ patterns[j]->size);
+ /*
+ * did not use a prefix to find this
+ * pattern, but we zeroed everything
+ * in the beginning of the function.
+ * So no need to zero used_prefix_size
+ * for results->info[found]
+ */
+
+ found++;
+ if (found >= DVB_DMX_MAX_FOUND_PATTERNS)
+ goto next_prefix_lookup;
+ /*
+ * theoretically we don't have to break
+ * here, but we don't want to search
+ * for the other matching patterns on
+ * the very same same place in the
+ * buffer. That would mean the
+ * (pattern & mask) combinations are
+ * not unique. So we break from inner
+ * loop and move on to the next place
+ * in the buffer.
+ */
+ break;
+ }
+ }
+ }
+ }
+
+next_prefix_lookup:
+ /* check for possible prefix sizes for the next buffer */
+ for (j = 0; j < patterns_num; j++) {
+ prefix_size_masks->size_mask[j] = 0;
+ for (i = 1; i < patterns[j]->size; i++) {
+ /*
+ * avoid looking outside of the buffer
+ * or reusing previously used data.
+ */
+ if (i > (buf_size - start_offset))
+ break;
+
+ if (dvb_dmx_patterns_match(patterns[j]->pattern,
+ (buf + buf_size - i),
+ patterns[j]->mask, i)) {
+ prefix_size_masks->size_mask[j] |=
+ (1 << (i - 1));
+ }
+ }
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(dvb_dmx_video_pattern_search);
+
+/**
+ * dvb_dmx_notify_section_event() - Notify demux event for all filters of a
+ * specified section feed.
+ *
+ * @feed: dvb_demux_feed object
+ * @event: demux event to notify
+ * @should_lock: specifies whether the function should lock the demux
+ *
+ * Caller is responsible for locking the demux properly, either by doing the
+ * locking itself and setting 'should_lock' to 0, or have the function do it
+ * by setting 'should_lock' to 1.
+ */
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+ struct dmx_data_ready *event, int should_lock)
+{
+ struct dvb_demux_filter *f;
+
+ if (feed == NULL || event == NULL || feed->type != DMX_TYPE_SEC)
+ return -EINVAL;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return -EINVAL;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+
+ f = feed->filter;
+ while (f && feed->feed.sec.is_filtering) {
+ feed->data_ready_cb.sec(&f->filter, event);
+ f = f->next;
+ }
+
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dvb_dmx_notify_section_event);
+
+static int dvb_dmx_check_pes_end(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready data;
+
+ if (!feed->pusi_seen)
+ return 0;
+
+ data.status = DMX_OK_PES_END;
+ data.data_length = 0;
+ data.pes_end.start_gap = 0;
+ data.pes_end.actual_length = feed->peslen;
+ data.pes_end.disc_indicator_set = 0;
+ data.pes_end.pes_length_mismatch = 0;
+ data.pes_end.stc = 0;
+ data.pes_end.tei_counter = feed->pes_tei_counter;
+ data.pes_end.cont_err_counter = feed->pes_cont_err_counter;
+ data.pes_end.ts_packets_num = feed->pes_ts_packets_num;
+
+ return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
const u8 *buf)
{
int count = payload(buf);
int p;
- //int ccok;
- //u8 cc;
+ int ccok;
+ u8 cc;
+ int ret;
if (count == 0)
return -1;
p = 188 - count;
- /*
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ feed->first_cc = 0;
feed->cc = cc;
- if (!ccok)
- printk("missed packet!\n");
- */
- if (buf[1] & 0x40) // PUSI ?
- feed->peslen = 0xfffa;
+ /* PUSI ? */
+ if (buf[1] & 0x40) {
+ dvb_dmx_check_pes_end(feed);
+ feed->pusi_seen = 1;
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->pes_ts_packets_num = 0;
+ }
- feed->peslen += count;
+ if (feed->pusi_seen == 0)
+ return 0;
- return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+ ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+
+ /* Verify TS packet was copied successfully */
+ if (!ret) {
+ feed->pes_cont_err_counter += !ccok;
+ feed->pes_tei_counter += (buf[1] & 0x80) ? 1 : 0;
+ feed->pes_ts_packets_num++;
+ feed->peslen += count;
+ }
+
+ return ret;
}
static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -169,10 +627,28 @@
return 0;
if (sec->check_crc) {
+ ktime_t pre_crc_time = ktime_set(0, 0);
+
+ if (dvb_demux_performancecheck)
+ pre_crc_time = ktime_get();
+
section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
if (section_syntax_indicator &&
- demux->check_crc32(feed, sec->secbuf, sec->seclen))
+ demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
+ if (dvb_demux_performancecheck)
+ demux->total_crc_time +=
+ dvb_dmx_calc_time_delta(pre_crc_time);
+
+ /* Notify on CRC error */
+ feed->cb.sec(NULL, 0, NULL, 0,
+ &f->filter);
+
return -1;
+ }
+
+ if (dvb_demux_performancecheck)
+ demux->total_crc_time +=
+ dvb_dmx_calc_time_delta(pre_crc_time);
}
do {
@@ -287,7 +763,7 @@
return 0;
}
-static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+static int dvb_dmx_swfilter_section_one_packet(struct dvb_demux_feed *feed,
const u8 *buf)
{
u8 p, count;
@@ -302,7 +778,16 @@
p = 188 - count; /* payload start */
cc = buf[3] & 0x0f;
- ccok = ((feed->cc + 1) & 0x0f) == cc;
+ if (feed->first_cc)
+ ccok = 1;
+ else
+ ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+ /* discard TS packets holding sections with TEI bit set */
+ if (buf[1] & 0x80)
+ return -EINVAL;
+
+ feed->first_cc = 0;
feed->cc = cc;
if (buf[3] & 0x20) {
@@ -356,28 +841,668 @@
return 0;
}
-static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
- const u8 *buf)
+/*
+ * dvb_dmx_swfilter_section_packet - wrapper for section filtering of single
+ * TS packet.
+ *
+ * @feed: dvb demux feed
+ * @buf: buffer containing the TS packet
+ * @should_lock: specifies demux locking semantics: if not set, proper demux
+ * locking is expected to have been done by the caller.
+ *
+ * Return error status
+ */
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
+ const u8 *buf, int should_lock)
{
+ int ret;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock)) {
+ pr_err("%s: demux spinlock should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+
+ ret = dvb_dmx_swfilter_section_one_packet(feed, buf);
+
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_section_packet);
+
+static int dvb_demux_idx_event_sort(struct dmx_index_event_info *curr,
+ struct dmx_index_event_info *new)
+{
+ if (curr->match_tsp_num > new->match_tsp_num)
+ return 0;
+
+ if (curr->match_tsp_num < new->match_tsp_num)
+ return 1;
+ /*
+ * In case TSP numbers are equal, sort according to event type giving
+ * priority to PUSI events first, then RAI and finally framing events.
+ */
+ if ((curr->type & DMX_IDX_RAI && new->type & DMX_IDX_PUSI) ||
+ (!(curr->type & DMX_IDX_PUSI) && !(curr->type & DMX_IDX_RAI) &&
+ new->type & (DMX_IDX_PUSI | DMX_IDX_RAI)))
+ return 0;
+
+ return 1;
+}
+
+static int dvb_demux_save_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event,
+ int traverse_from_tail)
+{
+ struct dmx_index_entry *idx_entry;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *pos;
+
+ /* get entry from free list */
+ if (list_empty(&feed->rec_info->idx_info.free_list)) {
+ pr_err("%s: index free list is empty\n", __func__);
+ return -ENOMEM;
+ }
+
+ idx_entry = list_first_entry(&feed->rec_info->idx_info.free_list,
+ struct dmx_index_entry, next);
+ list_del(&idx_entry->next);
+
+ idx_entry->event = *idx_event;
+
+ pos = &feed->rec_info->idx_info.ready_list;
+ if (traverse_from_tail) {
+ list_for_each_entry_reverse(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (dvb_demux_idx_event_sort(&curr_entry->event,
+ idx_event)) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(curr_entry,
+ &feed->rec_info->idx_info.ready_list, next) {
+ if (!dvb_demux_idx_event_sort(&curr_entry->event,
+ idx_event)) {
+ pos = &curr_entry->next;
+ break;
+ }
+ }
+ }
+
+ if (traverse_from_tail)
+ list_add(&idx_entry->next, pos);
+ else
+ list_add_tail(&idx_entry->next, pos);
+
+ return 0;
+}
+
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event, int should_lock)
+{
+ int ret;
+
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return -EINVAL;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+ ret = dvb_demux_save_idx_event(feed, idx_event, 1);
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(dvb_demux_push_idx_event);
+
+static inline void dvb_dmx_notify_indexing(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready dmx_data_ready;
+ struct dmx_index_entry *curr_entry;
+ struct list_head *n, *pos;
+
+ dmx_data_ready.status = DMX_OK_IDX;
+
+ list_for_each_safe(pos, n, &feed->rec_info->idx_info.ready_list) {
+ curr_entry = list_entry(pos, struct dmx_index_entry, next);
+
+ if ((feed->rec_info->idx_info.min_pattern_tsp_num == (u64)-1) ||
+ (curr_entry->event.match_tsp_num <=
+ feed->rec_info->idx_info.min_pattern_tsp_num)) {
+ dmx_data_ready.idx_event = curr_entry->event;
+ feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+ list_del(&curr_entry->next);
+ list_add_tail(&curr_entry->next,
+ &feed->rec_info->idx_info.free_list);
+ }
+ }
+}
+
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock)
+{
+ if (!should_lock && !spin_is_locked(&feed->demux->lock))
+ return;
+
+ if (should_lock)
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_notify_indexing(feed);
+ if (should_lock)
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_notify_idx_events);
+
+static void dvb_dmx_process_pattern_result(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ int mpeg_frame_start;
+ int h264_frame_start;
+ int vc1_frame_start;
+ int seq_start;
+ u64 frame_end_in_seq;
+ struct dmx_index_event_info idx_event;
+
+ idx_event.pid = feed->pid;
+ if (patterns->info[pattern].used_prefix_size) {
+ idx_event.match_tsp_num = prev_match_tsp;
+ idx_event.last_pusi_tsp_num = prev_pusi_tsp;
+ idx_event.stc = prev_stc;
+ } else {
+ idx_event.match_tsp_num = curr_match_tsp;
+ idx_event.last_pusi_tsp_num = curr_pusi_tsp;
+ idx_event.stc = curr_stc;
+ }
+
+ /* notify on frame-end if needed */
+ if (feed->prev_frame_valid) {
+ if (feed->prev_frame_type & DMX_IDX_MPEG_I_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_I_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_P_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_P_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_MPEG_B_FRAME_START) {
+ idx_event.type = DMX_IDX_MPEG_B_FRAME_END;
+ frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_IDR_START) {
+ idx_event.type = DMX_IDX_H264_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type & DMX_IDX_H264_NON_IDR_START) {
+ idx_event.type = DMX_IDX_H264_NON_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type &
+ DMX_IDX_H264_IDR_ISLICE_START) {
+ idx_event.type = DMX_IDX_H264_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type &
+ DMX_IDX_H264_NON_IDR_PSLICE_START) {
+ idx_event.type = DMX_IDX_H264_NON_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else if (feed->prev_frame_type &
+ DMX_IDX_H264_NON_IDR_BSLICE_START) {
+ idx_event.type = DMX_IDX_H264_NON_IDR_END;
+ frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END;
+ } else {
+ idx_event.type = DMX_IDX_VC1_FRAME_END;
+ frame_end_in_seq = DMX_IDX_VC1_FIRST_SEQ_FRAME_END;
+ }
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ if (feed->first_frame_in_seq_notified &&
+ feed->idx_params.types & frame_end_in_seq) {
+ idx_event.type = frame_end_in_seq;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->first_frame_in_seq_notified = 0;
+ }
+ }
+
+ seq_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_SEQ_HEADER | DMX_IDX_H264_SPS |
+ DMX_IDX_VC1_SEQ_HEADER);
+
+ /* did we find start of sequence/SPS? */
+ if (seq_start) {
+ feed->first_frame_in_seq = 1;
+ feed->first_frame_in_seq_notified = 0;
+ feed->prev_frame_valid = 0;
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ return;
+ }
+
+ mpeg_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_MPEG_I_FRAME_START |
+ DMX_IDX_MPEG_P_FRAME_START |
+ DMX_IDX_MPEG_B_FRAME_START);
+
+ h264_frame_start = patterns->info[pattern].type &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_NON_IDR_START);
+
+ vc1_frame_start = patterns->info[pattern].type &
+ DMX_IDX_VC1_FRAME_START;
+
+ if (!mpeg_frame_start && !h264_frame_start && !vc1_frame_start) {
+ /* neither sequence nor frame, notify on the entry if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ feed->prev_frame_valid = 0;
+ return;
+ }
+
+ /* notify on first frame in sequence/sps if needed */
+ if (feed->first_frame_in_seq) {
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 1;
+ if (mpeg_frame_start)
+ idx_event.type = DMX_IDX_MPEG_FIRST_SEQ_FRAME_START;
+ else if (h264_frame_start)
+ idx_event.type = DMX_IDX_H264_FIRST_SPS_FRAME_START;
+ else
+ idx_event.type = DMX_IDX_VC1_FIRST_SEQ_FRAME_START;
+
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /* notify on frame start if needed */
+ idx_event.type = patterns->info[pattern].type;
+ if (feed->idx_params.types & idx_event.type)
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+
+ feed->prev_frame_valid = 1;
+ feed->prev_frame_type = patterns->info[pattern].type;
+}
+
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp)
+{
+ spin_lock(&feed->demux->lock);
+ dvb_dmx_process_pattern_result(feed,
+ patterns, pattern,
+ curr_stc, prev_stc,
+ curr_match_tsp, prev_match_tsp,
+ curr_pusi_tsp, prev_pusi_tsp);
+ spin_unlock(&feed->demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_process_idx_pattern);
+
+static void dvb_dmx_index(struct dvb_demux_feed *feed,
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ int i;
+ int p;
+ u64 stc;
+ int found_patterns;
+ int count = payload(buf);
+ u64 min_pattern_tsp_num;
+ struct dvb_demux_feed *tmp_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dmx_index_event_info idx_event;
+ struct dvb_dmx_video_patterns_results patterns;
+
+ if (feed->demux->convert_ts)
+ feed->demux->convert_ts(feed, timestamp, &stc);
+ else
+ stc = 0;
+
+ idx_event.pid = feed->pid;
+ idx_event.stc = stc;
+ idx_event.match_tsp_num = feed->rec_info->ts_output_count;
+
+ /* PUSI ? */
+ if (buf[1] & 0x40) {
+ feed->curr_pusi_tsp_num = feed->rec_info->ts_output_count;
+ if (feed->idx_params.types & DMX_IDX_PUSI) {
+ idx_event.type = DMX_IDX_PUSI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+ }
+
+ /*
+ * if we still did not encounter a TS packet with PUSI indication,
+ * we cannot report index entries yet as we need to provide
+ * the TS packet number with PUSI indication preceding the TS
+ * packet pointed by the reported index entry.
+ */
+ if (feed->curr_pusi_tsp_num == (u64)-1) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ if ((feed->idx_params.types & DMX_IDX_RAI) && /* index RAI? */
+ (buf[3] & 0x20) && /* adaptation field exists? */
+ (buf[4] > 0) && /* adaptation field len > 0 ? */
+ (buf[5] & 0x40)) { /* RAI is set? */
+ idx_event.type = DMX_IDX_RAI;
+ idx_event.last_pusi_tsp_num =
+ feed->curr_pusi_tsp_num;
+ dvb_demux_save_idx_event(feed, &idx_event, 1);
+ }
+
+ /*
+ * if no pattern search is required, or the TS packet has no payload,
+ * pattern search is not executed.
+ */
+ if (!feed->pattern_num || !count) {
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ p = 188 - count; /* payload start */
+
+ found_patterns =
+ dvb_dmx_video_pattern_search(feed->patterns,
+ feed->pattern_num, &buf[p], count,
+ &feed->prefix_size, &patterns);
+
+ for (i = 0; i < found_patterns; i++)
+ dvb_dmx_process_pattern_result(feed, &patterns, i,
+ stc, feed->prev_stc,
+ feed->rec_info->ts_output_count, feed->prev_tsp_num,
+ feed->curr_pusi_tsp_num, feed->prev_pusi_tsp_num);
+
+ feed->prev_tsp_num = feed->rec_info->ts_output_count;
+ feed->prev_pusi_tsp_num = feed->curr_pusi_tsp_num;
+ feed->prev_stc = stc;
+ feed->last_pattern_tsp_num = feed->rec_info->ts_output_count;
+
+ /*
+ * it is possible to have a TS packet that has a prefix of
+ * a video pattern but the video pattern is not identified yet
+ * until we get the next TS packet of that PID. When we get
+ * the next TS packet of that PID, pattern-search would
+ * detect that we have a new index entry that starts in the
+ * previous TS packet.
+ * In order to notify the user on index entries with match_tsp_num
+ * in ascending order, index events with match_tsp_num up to
+ * the last_pattern_tsp_num are notified now to the user,
+ * the rest can't be notified now as we might hit the above
+ * scenario and cause the events not to be notified with
+ * ascending order of match_tsp_num.
+ */
+ if (feed->rec_info->idx_info.pattern_search_feeds_num == 1) {
+ /*
+ * optimization for case we have only one PID
+ * with video pattern search, in this case
+ * min_pattern_tsp_num is simply updated to the new
+ * TS packet number of the PID with pattern search.
+ */
+ feed->rec_info->idx_info.min_pattern_tsp_num =
+ feed->last_pattern_tsp_num;
+ dvb_dmx_notify_indexing(feed);
+ return;
+ }
+
+ /*
+ * if we have more than one PID with pattern search,
+ * min_pattern_tsp_num needs to be updated now based on
+ * last_pattern_tsp_num of all PIDs with pattern search.
+ */
+ min_pattern_tsp_num = (u64)-1;
+ i = feed->rec_info->idx_info.pattern_search_feeds_num;
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state != DMX_STATE_GO) ||
+ (tmp_feed->type != DMX_TYPE_TS) ||
+ (tmp_feed->feed.ts.buffer.ringbuff !=
+ feed->feed.ts.buffer.ringbuff))
+ continue;
+
+ if ((tmp_feed->last_pattern_tsp_num != (u64)-1) &&
+ ((min_pattern_tsp_num == (u64)-1) ||
+ (tmp_feed->last_pattern_tsp_num <
+ min_pattern_tsp_num)))
+ min_pattern_tsp_num = tmp_feed->last_pattern_tsp_num;
+
+ if (tmp_feed->pattern_num) {
+ i--;
+ if (i == 0)
+ break;
+ }
+ }
+
+ feed->rec_info->idx_info.min_pattern_tsp_num = min_pattern_tsp_num;
+
+ /* notify all index entries up to min_pattern_tsp_num */
+ dvb_dmx_notify_indexing(feed);
+}
+
+static inline void dvb_dmx_swfilter_output_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ /*
+ * if we output 192 packet with timestamp at head of packet,
+ * output the timestamp now before the 188 TS packet
+ */
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts);
+
+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+
+ /*
+ * if we output 192 packet with timestamp at tail of packet,
+ * output the timestamp now after the 188 TS packet
+ */
+ if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
+ feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
+ 0, &feed->feed.ts);
+
+ if (feed->idx_params.enable)
+ dvb_dmx_index(feed, buf, timestamp);
+
+ feed->rec_info->ts_output_count++;
+}
+
+static inline void dvb_dmx_configure_decoder_fullness(
+ struct dvb_demux *demux,
+ int initialize)
+{
+ struct dvb_demux_feed *feed;
+ int j;
+
+ for (j = 0; j < demux->feednum; j++) {
+ feed = &demux->feed[j];
+
+ if ((feed->state != DMX_STATE_GO) ||
+ (feed->type != DMX_TYPE_TS) ||
+ !(feed->ts_type & TS_DECODER))
+ continue;
+
+ if (initialize) {
+ if (demux->decoder_fullness_init)
+ demux->decoder_fullness_init(feed);
+ } else {
+ if (demux->decoder_fullness_abort)
+ demux->decoder_fullness_abort(feed);
+ }
+ }
+}
+
+static inline int dvb_dmx_swfilter_buffer_check(
+ struct dvb_demux *demux,
+ u16 pid)
+{
+ int desired_space;
+ int ret;
+ struct dmx_ts_feed *ts;
+ struct dvb_demux_filter *f;
+ struct dvb_demux_feed *feed;
+ int was_locked;
+ int i, j;
+
+ if (likely(spin_is_locked(&demux->lock)))
+ was_locked = 1;
+ else
+ was_locked = 0;
+
+ /*
+ * Check that there's enough free space for data output.
+ * If there no space, wait for it (block).
+ * Since this function is called while spinlock
+ * is acquired, the lock should be released first.
+ * Once we get control back, lock is acquired back
+ * and checks that the filter is still valid.
+ */
+ for (j = 0; j < demux->feednum; j++) {
+ feed = &demux->feed[j];
+
+ if (demux->sw_filter_abort)
+ return -ENODEV;
+
+ if ((feed->state != DMX_STATE_GO) ||
+ ((feed->pid != pid) && (feed->pid != 0x2000)))
+ continue;
+
+ if (feed->secure_mode.is_secured &&
+ !dvb_dmx_is_rec_feed(feed))
+ return 0;
+
+ if (feed->type == DMX_TYPE_TS) {
+ desired_space = 192; /* upper bound */
+ ts = &feed->feed.ts;
+
+ if (feed->ts_type & TS_PACKET) {
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->buffer_ctrl.ts(ts,
+ desired_space, 1);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ continue;
+ }
+
+ if (demux->sw_filter_abort)
+ return -ENODEV;
+
+ if (!ts->is_filtering)
+ continue;
+
+ if ((feed->ts_type & TS_DECODER) &&
+ (demux->decoder_fullness_wait)) {
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->decoder_fullness_wait(
+ feed,
+ desired_space);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ continue;
+ }
+
+ continue;
+ }
+
+ /* else - section case */
+ desired_space = feed->feed.sec.tsfeedp + 188; /* upper bound */
+ for (i = 0; i < demux->filternum; i++) {
+ if (demux->sw_filter_abort)
+ return -EPERM;
+
+ if (!feed->feed.sec.is_filtering)
+ continue;
+
+ f = &demux->filter[i];
+ if (f->feed != feed)
+ continue;
+
+ if (likely(was_locked))
+ spin_unlock(&demux->lock);
+
+ ret = demux->buffer_ctrl.sec(&f->filter,
+ desired_space, 1);
+
+ if (likely(was_locked))
+ spin_lock(&demux->lock);
+
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
+ const u8 *buf, const u8 timestamp[TIMESTAMP_LEN])
+{
+ u16 pid = ts_pid(buf);
+ u8 scrambling_bits = ts_scrambling_ctrl(buf);
+ struct dmx_data_ready dmx_data_ready;
+
+ /*
+ * Notify on scrambling status change only when we move
+ * from clear (0) to non-clear and vise-versa
+ */
+ if ((scrambling_bits && !feed->scrambling_bits) ||
+ (!scrambling_bits && feed->scrambling_bits)) {
+ dmx_data_ready.status = DMX_OK_SCRAMBLING_STATUS;
+ dmx_data_ready.data_length = 0;
+ dmx_data_ready.scrambling_bits.pid = pid;
+ dmx_data_ready.scrambling_bits.old_value =
+ feed->scrambling_bits;
+ dmx_data_ready.scrambling_bits.new_value = scrambling_bits;
+
+ if (feed->type == DMX_TYPE_SEC)
+ dvb_dmx_notify_section_event(feed, &dmx_data_ready, 0);
+ else if (feed->feed.ts.is_filtering)
+ feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready);
+ }
+
+ feed->scrambling_bits = scrambling_bits;
+
switch (feed->type) {
case DMX_TYPE_TS:
if (!feed->feed.ts.is_filtering)
break;
if (feed->ts_type & TS_PACKET) {
- if (feed->ts_type & TS_PAYLOAD_ONLY)
- dvb_dmx_swfilter_payload(feed, buf);
- else
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+ if (feed->ts_type & TS_PAYLOAD_ONLY) {
+ if (!feed->secure_mode.is_secured)
+ dvb_dmx_swfilter_payload(feed, buf);
+ } else {
+ dvb_dmx_swfilter_output_packet(feed,
+ buf, timestamp);
+ }
}
- if (feed->ts_type & TS_DECODER)
+ if ((feed->ts_type & TS_DECODER) &&
+ !feed->secure_mode.is_secured)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
break;
case DMX_TYPE_SEC:
- if (!feed->feed.sec.is_filtering)
+ if (!feed->feed.sec.is_filtering ||
+ feed->secure_mode.is_secured)
break;
- if (dvb_dmx_swfilter_section_packet(feed, buf) < 0)
+ if (dvb_dmx_swfilter_section_one_packet(feed, buf) < 0)
feed->feed.sec.seclen = feed->feed.sec.secbufp = 0;
break;
@@ -391,7 +1516,8 @@
((f)->feed.ts.is_filtering) && \
(((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET))
-static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
+static void dvb_dmx_swfilter_one_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
{
struct dvb_demux_feed *feed;
u16 pid = ts_pid(buf);
@@ -412,12 +1538,11 @@
* 188 * 8;
/* convert to 1024 basis */
speed_bytes = 1000 * div64_u64(speed_bytes,
- 1024);
+ 1024);
speed_timedelta = ktime_ms_delta(cur_time,
demux->speed_last_time);
- printk(KERN_INFO "TS speed %llu Kbits/sec \n",
- div64_u64(speed_bytes,
- speed_timedelta));
+ pr_info("TS speed %llu Kbits/sec\n",
+ div64_u64(speed_bytes, speed_timedelta));
}
demux->speed_last_time = cur_time;
@@ -426,11 +1551,12 @@
}
if (buf[1] & 0x80) {
- dprintk_tscheck("TEI detected. "
- "PID=0x%x data1=0x%x\n",
- pid, buf[1]);
- /* data in this packet can't be trusted - drop it unless
- * module option dvb_demux_feed_err_pkts is set */
+ dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", pid,
+ buf[1]);
+ /*
+ * data in this packet can't be trusted - drop it unless
+ * module option dvb_demux_feed_err_pkts is set
+ */
if (!dvb_demux_feed_err_pkts)
return;
} else /* if TEI bit is set, pid may be wrong- skip pkt counter */
@@ -439,10 +1565,12 @@
if (pid < MAX_PID) {
if (buf[3] & 0x10)
demux->cnt_storage[pid] =
- (demux->cnt_storage[pid] + 1) & 0xf;
+ (demux->cnt_storage[pid] + 1) &
+ 0xf;
if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
- dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
+ dprintk_tscheck(
+ "TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
pid, demux->cnt_storage[pid],
buf[3] & 0xf);
demux->cnt_storage[pid] = buf[3] & 0xf;
@@ -451,48 +1579,76 @@
/* end check */
}
+ if (demux->playback_mode == DMX_PB_MODE_PULL)
+ if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0)
+ return;
+
list_for_each_entry(feed, &demux->feed_list, list_head) {
if ((feed->pid != pid) && (feed->pid != 0x2000))
continue;
- /* copy each packet only once to the dvr device, even
- * if a PID is in multiple filters (e.g. video + PCR) */
+ /*
+ * copy each packet only once to the dvr device, even
+ * if a PID is in multiple filters (e.g. video + PCR)
+ */
if ((DVR_FEED(feed)) && (dvr_done++))
continue;
if (feed->pid == pid)
- dvb_dmx_swfilter_packet_type(feed, buf);
- else if (feed->pid == 0x2000)
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+ dvb_dmx_swfilter_packet_type(feed, buf, timestamp);
+ else if ((feed->pid == 0x2000) &&
+ (feed->feed.ts.is_filtering))
+ dvb_dmx_swfilter_output_packet(feed, buf, timestamp);
}
}
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN])
+{
+ spin_lock(&demux->lock);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
+ spin_unlock(&demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_packet);
+
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
- unsigned long flags;
+ ktime_t pre_time = ktime_set(0, 0);
+ u8 timestamp[TIMESTAMP_LEN] = {0};
- spin_lock_irqsave(&demux->lock, flags);
+ if (dvb_demux_performancecheck)
+ pre_time = ktime_get();
+
+ spin_lock(&demux->lock);
+
+ demux->sw_filter_abort = 0;
+ dvb_dmx_configure_decoder_fullness(demux, 1);
while (count--) {
if (buf[0] == 0x47)
- dvb_dmx_swfilter_packet(demux, buf);
+ dvb_dmx_swfilter_one_packet(demux, buf, timestamp);
buf += 188;
}
- spin_unlock_irqrestore(&demux->lock, flags);
-}
+ spin_unlock(&demux->lock);
+ if (dvb_demux_performancecheck)
+ demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
+}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
static inline int find_next_packet(const u8 *buf, int pos, size_t count,
- const int pktsize)
+ const int pktsize, const int leadingbytes)
{
int start = pos, lost;
while (pos < count) {
- if (buf[pos] == 0x47 ||
- (pktsize == 204 && buf[pos] == 0xB8))
+ if ((buf[pos] == 0x47 && !leadingbytes) ||
+ (pktsize == 204 && buf[pos] == 0xB8) ||
+ (pktsize == 192 && leadingbytes &&
+ (pos+leadingbytes < count) &&
+ buf[pos+leadingbytes] == 0x47))
break;
pos++;
}
@@ -501,8 +1657,11 @@
if (lost) {
/* This garbage is part of a valid packet? */
int backtrack = pos - pktsize;
+
if (backtrack >= 0 && (buf[backtrack] == 0x47 ||
- (pktsize == 204 && buf[backtrack] == 0xB8)))
+ (pktsize == 204 && buf[backtrack] == 0xB8) ||
+ (pktsize == 192 &&
+ buf[backtrack+leadingbytes] == 0x47)))
return backtrack;
}
@@ -511,13 +1670,20 @@
/* Filter all pktsize= 188 or 204 sized packets and skip garbage. */
static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf,
- size_t count, const int pktsize)
+ size_t count, const int pktsize, const int leadingbytes)
{
int p = 0, i, j;
const u8 *q;
- unsigned long flags;
+ ktime_t pre_time;
+ u8 timestamp[TIMESTAMP_LEN];
- spin_lock_irqsave(&demux->lock, flags);
+ if (dvb_demux_performancecheck)
+ pre_time = ktime_get();
+
+ spin_lock(&demux->lock);
+
+ demux->sw_filter_abort = 0;
+ dvb_dmx_configure_decoder_fullness(demux, 1);
if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */
i = demux->tsbufp;
@@ -528,14 +1694,36 @@
goto bailout;
}
memcpy(&demux->tsbuf[i], buf, j);
- if (demux->tsbuf[0] == 0x47) /* double check */
- dvb_dmx_swfilter_packet(demux, demux->tsbuf);
+
+ if (pktsize == 192) {
+ if (leadingbytes)
+ memcpy(timestamp, &demux->tsbuf[p],
+ TIMESTAMP_LEN);
+ else
+ memcpy(timestamp, &demux->tsbuf[188],
+ TIMESTAMP_LEN);
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
+
+ if (pktsize == 192 &&
+ leadingbytes &&
+ demux->tsbuf[leadingbytes] == 0x47) /* double check */
+ dvb_dmx_swfilter_one_packet(demux,
+ demux->tsbuf + TIMESTAMP_LEN, timestamp);
+ else if (demux->tsbuf[0] == 0x47) /* double check */
+ dvb_dmx_swfilter_one_packet(demux,
+ demux->tsbuf, timestamp);
demux->tsbufp = 0;
p += j;
}
while (1) {
- p = find_next_packet(buf, p, count, pktsize);
+ p = find_next_packet(buf, p, count, pktsize, leadingbytes);
+
+ if (demux->sw_filter_abort)
+ goto bailout;
+
if (p >= count)
break;
if (count - p < pktsize)
@@ -548,7 +1736,19 @@
demux->tsbuf[0] = 0x47;
q = demux->tsbuf;
}
- dvb_dmx_swfilter_packet(demux, q);
+
+ if (pktsize == 192) {
+ if (leadingbytes) {
+ q = &buf[p+leadingbytes];
+ memcpy(timestamp, &buf[p], TIMESTAMP_LEN);
+ } else {
+ memcpy(timestamp, &buf[p+188], TIMESTAMP_LEN);
+ }
+ } else {
+ memset(timestamp, 0, TIMESTAMP_LEN);
+ }
+
+ dvb_dmx_swfilter_one_packet(demux, q, timestamp);
p += pktsize;
}
@@ -561,33 +1761,65 @@
}
bailout:
- spin_unlock_irqrestore(&demux->lock, flags);
+ spin_unlock(&demux->lock);
+
+ if (dvb_demux_performancecheck)
+ demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
}
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- _dvb_dmx_swfilter(demux, buf, count, 188);
+ _dvb_dmx_swfilter(demux, buf, count, 188, 0);
}
EXPORT_SYMBOL(dvb_dmx_swfilter);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- _dvb_dmx_swfilter(demux, buf, count, 204);
+ _dvb_dmx_swfilter(demux, buf, count, 204, 0);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
{
- unsigned long flags;
-
- spin_lock_irqsave(&demux->lock, flags);
+ spin_lock(&demux->lock);
demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
- spin_unlock_irqrestore(&demux->lock, flags);
+ spin_unlock(&demux->lock);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
+void dvb_dmx_swfilter_format(
+ struct dvb_demux *demux,
+ const u8 *buf,
+ size_t count,
+ enum dmx_tsp_format_t tsp_format)
+{
+ switch (tsp_format) {
+ case DMX_TSP_FORMAT_188:
+ _dvb_dmx_swfilter(demux, buf, count, 188, 0);
+ break;
+
+ case DMX_TSP_FORMAT_192_TAIL:
+ _dvb_dmx_swfilter(demux, buf, count, 192, 0);
+ break;
+
+ case DMX_TSP_FORMAT_192_HEAD:
+ _dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN);
+ break;
+
+ case DMX_TSP_FORMAT_204:
+ _dvb_dmx_swfilter(demux, buf, count, 204, 0);
+ break;
+
+ default:
+ pr_err("%s: invalid TS packet format (format=%d)\n", __func__,
+ tsp_format);
+ break;
+ }
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_format);
+
static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux)
{
int i;
@@ -620,6 +1852,311 @@
return &demux->feed[i];
}
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern)
+{
+ switch (dmx_idx_pattern) {
+ case DMX_IDX_MPEG_SEQ_HEADER:
+ return &mpeg2_seq_hdr;
+
+ case DMX_IDX_MPEG_GOP:
+ return &mpeg2_gop;
+
+ case DMX_IDX_MPEG_I_FRAME_START:
+ return &mpeg2_iframe;
+
+ case DMX_IDX_MPEG_P_FRAME_START:
+ return &mpeg2_pframe;
+
+ case DMX_IDX_MPEG_B_FRAME_START:
+ return &mpeg2_bframe;
+
+ case DMX_IDX_H264_SPS:
+ return &h264_sps;
+
+ case DMX_IDX_H264_PPS:
+ return &h264_pps;
+
+ case DMX_IDX_H264_IDR_START:
+ return &h264_idr;
+
+ case DMX_IDX_H264_NON_IDR_START:
+ return &h264_non_idr;
+
+ case DMX_IDX_H264_IDR_ISLICE_START:
+ return &h264_idr_islice;
+
+ case DMX_IDX_H264_NON_IDR_PSLICE_START:
+ return &h264_non_idr_pslice;
+
+ case DMX_IDX_H264_NON_IDR_BSLICE_START:
+ return &h264_non_idr_bslice;
+
+ case DMX_IDX_H264_ACCESS_UNIT_DEL:
+ return &h264_non_access_unit_del;
+
+ case DMX_IDX_H264_SEI:
+ return &h264_non_sei;
+
+ case DMX_IDX_VC1_SEQ_HEADER:
+ return &vc1_seq_hdr;
+
+ case DMX_IDX_VC1_ENTRY_POINT:
+ return &vc1_entry_point;
+
+ case DMX_IDX_VC1_FRAME_START:
+ return &vc1_frame;
+
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(dvb_dmx_get_pattern);
+
+static void dvb_dmx_init_idx_state(struct dvb_demux_feed *feed)
+{
+ feed->prev_tsp_num = (u64)-1;
+ feed->curr_pusi_tsp_num = (u64)-1;
+ feed->prev_pusi_tsp_num = (u64)-1;
+ feed->prev_frame_valid = 0;
+ feed->first_frame_in_seq = 0;
+ feed->first_frame_in_seq_notified = 0;
+ feed->last_pattern_tsp_num = (u64)-1;
+ feed->pattern_num = 0;
+ memset(&feed->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+
+ if (feed->idx_params.types &
+ (DMX_IDX_MPEG_SEQ_HEADER |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_MPEG_GOP)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 I-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_I_FRAME_START | DMX_IDX_MPEG_I_FRAME_END |
+ DMX_IDX_MPEG_P_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 P-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_P_FRAME_START | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ /* MPEG2 B-frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_MPEG_B_FRAME_START | DMX_IDX_MPEG_B_FRAME_END |
+ DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_P_FRAME_END |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_START |
+ DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_SPS |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_PPS)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ feed->pattern_num++;
+ }
+
+ /* H264 IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_IDR_START | DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ feed->pattern_num++;
+ }
+
+ /* H264 non-IDR */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_NON_IDR_START | DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ feed->pattern_num++;
+ }
+
+ /* H264 IDR ISlice */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_IDR_ISLICE_START | DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_IDR_ISLICE_START);
+ feed->pattern_num++;
+ }
+ /* H264 non-IDR PSlice */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_NON_IDR_PSLICE_START | DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_PSLICE_START);
+ feed->pattern_num++;
+ }
+ /* H264 non-IDR BSlice */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_H264_NON_IDR_BSLICE_START | DMX_IDX_H264_NON_IDR_END |
+ DMX_IDX_H264_IDR_END |
+ DMX_IDX_H264_FIRST_SPS_FRAME_START |
+ DMX_IDX_H264_FIRST_SPS_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_BSLICE_START);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_H264_SEI)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_SEQ_HEADER |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ feed->pattern_num++;
+ }
+
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types & DMX_IDX_VC1_ENTRY_POINT)) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ feed->pattern_num++;
+ }
+
+ /* VC1 frame */
+ if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) &&
+ (feed->idx_params.types &
+ (DMX_IDX_VC1_FRAME_START | DMX_IDX_VC1_FRAME_END |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_START |
+ DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) {
+ feed->patterns[feed->pattern_num] =
+ dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ feed->pattern_num++;
+ }
+
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num++;
+}
+
+static struct dvb_demux_rec_info *dvb_dmx_alloc_rec_info(
+ struct dmx_ts_feed *ts_feed)
+{
+ int i;
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ struct dvb_demux_rec_info *rec_info;
+ struct dvb_demux_feed *tmp_feed;
+
+ /* check if this feed share recording buffer with other active feeds */
+ list_for_each_entry(tmp_feed, &demux->feed_list, list_head) {
+ if ((tmp_feed->state == DMX_STATE_GO) &&
+ (tmp_feed->type == DMX_TYPE_TS) &&
+ (tmp_feed != feed) &&
+ (tmp_feed->feed.ts.buffer.ringbuff ==
+ ts_feed->buffer.ringbuff)) {
+ /* indexing information is shared between the feeds */
+ tmp_feed->rec_info->ref_count++;
+ return tmp_feed->rec_info;
+ }
+ }
+
+ /* Need to allocate a new indexing info */
+ for (i = 0; i < demux->feednum; i++)
+ if (!demux->rec_info_pool[i].ref_count)
+ break;
+
+ if (i == demux->feednum)
+ return NULL;
+
+ rec_info = &demux->rec_info_pool[i];
+ rec_info->ref_count++;
+ INIT_LIST_HEAD(&rec_info->idx_info.free_list);
+ INIT_LIST_HEAD(&rec_info->idx_info.ready_list);
+
+ for (i = 0; i < DMX_IDX_EVENT_QUEUE_SIZE; i++)
+ list_add(&rec_info->idx_info.events[i].next,
+ &rec_info->idx_info.free_list);
+
+ rec_info->ts_output_count = 0;
+ rec_info->idx_info.min_pattern_tsp_num = (u64)-1;
+ rec_info->idx_info.pattern_search_feeds_num = 0;
+ rec_info->idx_info.indexing_feeds_num = 0;
+
+ return rec_info;
+}
+
+static void dvb_dmx_free_rec_info(struct dmx_ts_feed *ts_feed)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ if (!feed->rec_info || !feed->rec_info->ref_count) {
+ pr_err("%s: invalid idx info state\n", __func__);
+ return;
+ }
+
+ feed->rec_info->ref_count--;
+}
+
static int dvb_demux_feed_find(struct dvb_demux_feed *feed)
{
struct dvb_demux_feed *entry;
@@ -635,7 +2172,7 @@
{
spin_lock_irq(&feed->demux->lock);
if (dvb_demux_feed_find(feed)) {
- printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -649,7 +2186,7 @@
{
spin_lock_irq(&feed->demux->lock);
if (!(dvb_demux_feed_find(feed))) {
- printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+ pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
__func__, feed->type, feed->state, feed->pid);
goto out;
}
@@ -733,7 +2270,34 @@
return -ENODEV;
}
- if ((ret = demux->start_feed(feed)) < 0) {
+ feed->first_cc = 1;
+ feed->scrambling_bits = 0;
+
+ if ((feed->ts_type & TS_PACKET) &&
+ !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+ feed->rec_info = dvb_dmx_alloc_rec_info(ts_feed);
+ if (!feed->rec_info) {
+ mutex_unlock(&demux->mutex);
+ return -ENOMEM;
+ }
+ if (feed->idx_params.enable) {
+ dvb_dmx_init_idx_state(feed);
+ feed->rec_info->idx_info.indexing_feeds_num++;
+ if (demux->set_indexing)
+ demux->set_indexing(feed);
+ }
+ } else {
+ feed->pattern_num = 0;
+ feed->rec_info = NULL;
+ }
+
+ ret = demux->start_feed(feed);
+ if (ret < 0) {
+ if ((feed->ts_type & TS_PACKET) &&
+ !(feed->ts_type & TS_PAYLOAD_ONLY)) {
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
+ }
mutex_unlock(&demux->mutex);
return ret;
}
@@ -771,11 +2335,337 @@
ts_feed->is_filtering = 0;
feed->state = DMX_STATE_ALLOCATED;
spin_unlock_irq(&demux->lock);
+
+ if (feed->rec_info) {
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ if (feed->idx_params.enable)
+ feed->rec_info->idx_info.indexing_feeds_num--;
+ dvb_dmx_free_rec_info(ts_feed);
+ feed->rec_info = NULL;
+ }
+
mutex_unlock(&demux->mutex);
return ret;
}
+static int dmx_ts_feed_decoder_buff_status(struct dmx_ts_feed *ts_feed,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret;
+
+ mutex_lock(&demux->mutex);
+
+ if (feed->state < DMX_STATE_GO) {
+ mutex_unlock(&demux->mutex);
+ return -EINVAL;
+ }
+
+ if (!demux->decoder_buffer_status) {
+ mutex_unlock(&demux->mutex);
+ return -ENODEV;
+ }
+
+ ret = demux->decoder_buffer_status(feed, dmx_buffer_status);
+
+ mutex_unlock(&demux->mutex);
+
+ return ret;
+}
+
+static int dmx_ts_feed_reuse_decoder_buffer(struct dmx_ts_feed *ts_feed,
+ int cookie)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret;
+
+ mutex_lock(&demux->mutex);
+
+ if (feed->state < DMX_STATE_GO) {
+ mutex_unlock(&demux->mutex);
+ return -EINVAL;
+ }
+
+ if (!demux->reuse_decoder_buffer) {
+ mutex_unlock(&demux->mutex);
+ return -ENODEV;
+ }
+
+ ret = demux->reuse_decoder_buffer(feed, cookie);
+
+ mutex_unlock(&demux->mutex);
+
+ return ret;
+}
+
+static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed,
+ dmx_ts_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.ts = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_ts_set_secure_mode(struct dmx_ts_feed *feed,
+ struct dmx_secure_mode *secure_mode)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EBUSY;
+ }
+
+ dvbdmxfeed->secure_mode = *secure_mode;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_ts_set_cipher_ops(struct dmx_ts_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+ dvbdmx->set_cipher_op)
+ ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+
+ if (!ret)
+ dvbdmxfeed->cipher_ops = *cipher_ops;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dmx_ts_set_video_codec(
+ struct dmx_ts_feed *ts_feed,
+ enum dmx_video_codec video_codec)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+
+ feed->video_codec = video_codec;
+
+ return 0;
+}
+
+static int dmx_ts_set_idx_params(struct dmx_ts_feed *ts_feed,
+ struct dmx_indexing_params *idx_params)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+ int idx_enabled;
+ int ret = 0;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if ((feed->state == DMX_STATE_GO) &&
+ !feed->rec_info) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ idx_enabled = feed->idx_params.enable;
+ feed->idx_params = *idx_params;
+
+ if (feed->state == DMX_STATE_GO) {
+ spin_lock_irq(&dvbdmx->lock);
+ if (feed->pattern_num)
+ feed->rec_info->idx_info.pattern_search_feeds_num--;
+ if (idx_enabled && !idx_params->enable)
+ feed->rec_info->idx_info.indexing_feeds_num--;
+ if (!idx_enabled && idx_params->enable)
+ feed->rec_info->idx_info.indexing_feeds_num++;
+ dvb_dmx_init_idx_state(feed);
+ spin_unlock_irq(&dvbdmx->lock);
+
+ if (dvbdmx->set_indexing)
+ ret = dvbdmx->set_indexing(feed);
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+
+ return ret;
+}
+
+static int dvbdmx_ts_feed_oob_cmd(struct dmx_ts_feed *ts_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dmx_data_ready data;
+ struct dvb_demux *dvbdmx = feed->demux;
+ int ret = 0;
+ int secure_non_rec = feed->secure_mode.is_secured &&
+ !dvb_dmx_is_rec_feed(feed);
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Decoder & non-recording secure feeds are handled by plug-in */
+ if ((feed->ts_type & TS_DECODER) || secure_non_rec) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ }
+
+ if (!(feed->ts_type & (TS_PAYLOAD_ONLY | TS_PACKET)) ||
+ secure_non_rec) {
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ data.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ if (feed->ts_type & TS_PAYLOAD_ONLY)
+ dvb_dmx_check_pes_end(feed);
+
+ data.status = DMX_OK_EOS;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dvbdmx_ts_get_scrambling_bits(struct dmx_ts_feed *ts_feed,
+ u8 *value)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+
+ if (!ts_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return -EINVAL;
+ }
+
+ *value = feed->scrambling_bits;
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
+static int dvbdmx_ts_insertion_insert_buffer(struct dmx_ts_feed *ts_feed,
+ char *data, size_t size)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+ if (!ts_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return 0;
+ }
+
+ feed->cb.ts(data, size, NULL, 0, ts_feed);
+
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
+static int dmx_ts_set_tsp_out_format(
+ struct dmx_ts_feed *ts_feed,
+ enum dmx_tsp_format_t tsp_format)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ feed->tsp_out_format = tsp_format;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+/**
+ * dvbdmx_ts_reset_pes_state() - Reset the current PES length and PES counters
+ *
+ * @feed: dvb demux feed object
+ */
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed)
+{
+ unsigned long flags;
+
+ /*
+ * Reset PES state.
+ * PUSI seen indication is kept so we can get partial PES.
+ */
+ spin_lock_irqsave(&feed->demux->lock, flags);
+
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->pes_ts_packets_num = 0;
+
+ spin_unlock_irqrestore(&feed->demux->lock, flags);
+}
+EXPORT_SYMBOL(dvbdmx_ts_reset_pes_state);
+
+static int dvbdmx_ts_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&demux->mutex))
+ return -ERESTARTSYS;
+
+ dvbdmx_ts_reset_pes_state(feed);
+
+ if ((feed->ts_type & TS_DECODER) && demux->flush_decoder_buffer)
+ /* Call decoder specific flushing if one exists */
+ ret = demux->flush_decoder_buffer(feed, length);
+
+ mutex_unlock(&demux->mutex);
+ return ret;
+}
+
static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
struct dmx_ts_feed **ts_feed,
dmx_ts_cb callback)
@@ -795,8 +2685,21 @@
feed->cb.ts = callback;
feed->demux = demux;
feed->pid = 0xffff;
- feed->peslen = 0xfffa;
+ feed->peslen = 0;
+ feed->pes_tei_counter = 0;
+ feed->pes_ts_packets_num = 0;
+ feed->pes_cont_err_counter = 0;
+ feed->secure_mode.is_secured = 0;
feed->buffer = NULL;
+ feed->tsp_out_format = DMX_TSP_FORMAT_188;
+ feed->idx_params.enable = 0;
+
+ /* default behaviour - pass first PES data even if it is
+ * partial PES data from previous PES that we didn't receive its header.
+ * Override this to 0 in your start_feed function in order to handle
+ * first PES differently.
+ */
+ feed->pusi_seen = 1;
(*ts_feed) = &feed->feed.ts;
(*ts_feed)->parent = dmx;
@@ -805,6 +2708,22 @@
(*ts_feed)->start_filtering = dmx_ts_feed_start_filtering;
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
+ (*ts_feed)->set_video_codec = dmx_ts_set_video_codec;
+ (*ts_feed)->set_idx_params = dmx_ts_set_idx_params;
+ (*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format;
+ (*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
+ (*ts_feed)->reuse_decoder_buffer = dmx_ts_feed_reuse_decoder_buffer;
+ (*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
+ (*ts_feed)->notify_data_read = NULL;
+ (*ts_feed)->set_secure_mode = dmx_ts_set_secure_mode;
+ (*ts_feed)->set_cipher_ops = dmx_ts_set_cipher_ops;
+ (*ts_feed)->oob_command = dvbdmx_ts_feed_oob_cmd;
+ (*ts_feed)->get_scrambling_bits = dvbdmx_ts_get_scrambling_bits;
+ (*ts_feed)->ts_insertion_init = NULL;
+ (*ts_feed)->ts_insertion_terminate = NULL;
+ (*ts_feed)->ts_insertion_insert_buffer =
+ dvbdmx_ts_insertion_insert_buffer;
+ (*ts_feed)->flush_buffer = dvbdmx_ts_flush_buffer;
if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
feed->state = DMX_STATE_FREE;
@@ -840,7 +2759,7 @@
feed->state = DMX_STATE_FREE;
feed->filter->state = DMX_STATE_FREE;
-
+ ts_feed->priv = NULL;
dvb_demux_feed_del(feed);
feed->pid = 0xffff;
@@ -966,6 +2885,8 @@
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = 0;
dvbdmxfeed->feed.sec.seclen = 0;
+ dvbdmxfeed->first_cc = 1;
+ dvbdmxfeed->scrambling_bits = 0;
if (!dvbdmx->start_feed) {
mutex_unlock(&dvbdmx->mutex);
@@ -996,6 +2917,11 @@
mutex_lock(&dvbdmx->mutex);
+ if (dvbdmxfeed->state < DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
if (!dvbdmx->stop_feed) {
mutex_unlock(&dvbdmx->mutex);
return -ENODEV;
@@ -1012,6 +2938,66 @@
return ret;
}
+
+static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed,
+ dmx_section_data_ready_cb callback)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ dvbdmxfeed->data_ready_cb.sec = callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_section_set_secure_mode(struct dmx_section_feed *feed,
+ struct dmx_secure_mode *secure_mode)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (dvbdmxfeed->state == DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EBUSY;
+ }
+
+ dvbdmxfeed->secure_mode = *secure_mode;
+ mutex_unlock(&dvbdmx->mutex);
+ return 0;
+}
+
+static int dmx_section_set_cipher_ops(struct dmx_section_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&dvbdmx->mutex))
+ return -ERESTARTSYS;
+
+ if ((dvbdmxfeed->state == DMX_STATE_GO) &&
+ dvbdmx->set_cipher_op) {
+ ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops);
+ }
+
+ if (!ret)
+ dvbdmxfeed->cipher_ops = *cipher_ops;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
struct dmx_section_filter *filter)
{
@@ -1045,12 +3031,82 @@
f->next = f->next->next;
}
+ filter->priv = NULL;
dvbdmxfilter->state = DMX_STATE_FREE;
spin_unlock_irq(&dvbdmx->lock);
mutex_unlock(&dvbdmx->mutex);
return 0;
}
+static int dvbdmx_section_feed_oob_cmd(struct dmx_section_feed *section_feed,
+ struct dmx_oob_command *cmd)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+ struct dvb_demux *dvbdmx = feed->demux;
+ struct dmx_data_ready data;
+ int ret = 0;
+
+ data.data_length = 0;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ if (feed->state != DMX_STATE_GO) {
+ mutex_unlock(&dvbdmx->mutex);
+ return -EINVAL;
+ }
+
+ /* Secure section feeds are handled by the plug-in */
+ if (feed->secure_mode.is_secured) {
+ if (feed->demux->oob_command)
+ ret = feed->demux->oob_command(feed, cmd);
+ else
+ ret = 0;
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+ }
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ data.status = DMX_OK_EOS;
+ break;
+
+ case DMX_OOB_CMD_MARKER:
+ data.status = DMX_OK_MARKER;
+ data.marker.id = cmd->params.marker.id;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ ret = dvb_dmx_notify_section_event(feed, &data, 1);
+
+ mutex_unlock(&dvbdmx->mutex);
+ return ret;
+}
+
+static int dvbdmx_section_get_scrambling_bits(
+ struct dmx_section_feed *section_feed, u8 *value)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed;
+ struct dvb_demux *demux = feed->demux;
+
+ spin_lock(&demux->lock);
+
+ if (!section_feed->is_filtering) {
+ spin_unlock(&demux->lock);
+ return -EINVAL;
+ }
+
+ *value = feed->scrambling_bits;
+ spin_unlock(&demux->lock);
+
+ return 0;
+}
+
static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
struct dmx_section_feed **feed,
dmx_section_cb callback)
@@ -1070,11 +3126,14 @@
dvbdmxfeed->cb.sec = callback;
dvbdmxfeed->demux = dvbdmx;
dvbdmxfeed->pid = 0xffff;
+ dvbdmxfeed->secure_mode.is_secured = 0;
+ dvbdmxfeed->tsp_out_format = DMX_TSP_FORMAT_188;
dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
dvbdmxfeed->feed.sec.tsfeedp = 0;
dvbdmxfeed->filter = NULL;
dvbdmxfeed->buffer = NULL;
+ dvbdmxfeed->idx_params.enable = 0;
(*feed) = &dvbdmxfeed->feed.sec;
(*feed)->is_filtering = 0;
@@ -1086,6 +3145,13 @@
(*feed)->start_filtering = dmx_section_feed_start_filtering;
(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
(*feed)->release_filter = dmx_section_feed_release_filter;
+ (*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
+ (*feed)->notify_data_read = NULL;
+ (*feed)->set_secure_mode = dmx_section_set_secure_mode;
+ (*feed)->set_cipher_ops = dmx_section_set_cipher_ops;
+ (*feed)->oob_command = dvbdmx_section_feed_oob_cmd;
+ (*feed)->get_scrambling_bits = dvbdmx_section_get_scrambling_bits;
+ (*feed)->flush_buffer = NULL;
mutex_unlock(&dvbdmx->mutex);
return 0;
@@ -1108,7 +3174,7 @@
dvbdmxfeed->buffer = NULL;
#endif
dvbdmxfeed->state = DMX_STATE_FREE;
-
+ feed->priv = NULL;
dvb_demux_feed_del(dvbdmxfeed);
dvbdmxfeed->pid = 0xffff;
@@ -1144,23 +3210,18 @@
return 0;
}
-static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t count)
+static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count)
{
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
- void *p;
- if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE))
+ if (!demux->frontend || !buf || demux->dvr_input_protected ||
+ (demux->frontend->source != DMX_MEMORY_FE))
return -EINVAL;
-
- p = memdup_user(buf, count);
- if (IS_ERR(p))
- return PTR_ERR(p);
- if (mutex_lock_interruptible(&dvbdemux->mutex)) {
- kfree(p);
+ if (mutex_lock_interruptible(&dvbdemux->mutex))
return -ERESTARTSYS;
- }
- dvb_dmx_swfilter(dvbdemux, p, count);
- kfree(p);
+
+ dvb_dmx_swfilter_format(dvbdemux, buf, count, dvbdemux->tsp_format);
+
mutex_unlock(&dvbdemux->mutex);
if (signal_pending(current))
@@ -1168,6 +3229,40 @@
return count;
}
+static int dvbdmx_write_cancel(struct dmx_demux *demux)
+{
+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+ spin_lock_irq(&dvbdmx->lock);
+
+ /* cancel any pending wait for decoder's buffers */
+ dvbdmx->sw_filter_abort = 1;
+ dvbdmx->tsbufp = 0;
+ dvb_dmx_configure_decoder_fullness(dvbdmx, 0);
+
+ spin_unlock_irq(&dvbdmx->lock);
+
+ return 0;
+}
+
+static int dvbdmx_set_playback_mode(struct dmx_demux *demux,
+ enum dmx_playback_mode_t mode,
+ dmx_ts_fullness ts_fullness_callback,
+ dmx_section_fullness sec_fullness_callback)
+{
+ struct dvb_demux *dvbdmx = (struct dvb_demux *)demux;
+
+ mutex_lock(&dvbdmx->mutex);
+
+ dvbdmx->playback_mode = mode;
+ dvbdmx->buffer_ctrl.ts = ts_fullness_callback;
+ dvbdmx->buffer_ctrl.sec = sec_fullness_callback;
+
+ mutex_unlock(&dvbdmx->mutex);
+
+ return 0;
+}
+
static int dvbdmx_add_frontend(struct dmx_demux *demux,
struct dmx_frontend *frontend)
{
@@ -1225,7 +3320,7 @@
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
mutex_lock(&dvbdemux->mutex);
-
+ dvbdemux->sw_filter_abort = 0;
demux->frontend = NULL;
mutex_unlock(&dvbdemux->mutex);
return 0;
@@ -1235,7 +3330,50 @@
{
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
- memcpy(pids, dvbdemux->pids, 5 * sizeof(u16));
+ /* 4 Demux Instances each with group of 5 pids */
+ memcpy(pids, dvbdemux->pids, DMX_PES_OTHER*sizeof(u16));
+ return 0;
+}
+
+static int dvbdmx_get_tsp_size(struct dmx_demux *demux)
+{
+ int tsp_size;
+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+ mutex_lock(&dvbdemux->mutex);
+ tsp_size = dvbdemux->ts_packet_size;
+ mutex_unlock(&dvbdemux->mutex);
+
+ return tsp_size;
+}
+
+static int dvbdmx_set_tsp_format(
+ struct dmx_demux *demux,
+ enum dmx_tsp_format_t tsp_format)
+{
+ struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
+
+ if ((tsp_format > DMX_TSP_FORMAT_204) ||
+ (tsp_format < DMX_TSP_FORMAT_188))
+ return -EINVAL;
+
+ mutex_lock(&dvbdemux->mutex);
+
+ dvbdemux->tsp_format = tsp_format;
+ switch (tsp_format) {
+ case DMX_TSP_FORMAT_188:
+ dvbdemux->ts_packet_size = 188;
+ break;
+ case DMX_TSP_FORMAT_192_TAIL:
+ case DMX_TSP_FORMAT_192_HEAD:
+ dvbdemux->ts_packet_size = 192;
+ break;
+ case DMX_TSP_FORMAT_204:
+ dvbdemux->ts_packet_size = 204;
+ break;
+ }
+
+ mutex_unlock(&dvbdemux->mutex);
return 0;
}
@@ -1257,13 +3395,50 @@
dvbdemux->filter = NULL;
return -ENOMEM;
}
+
+ dvbdemux->rec_info_pool = vmalloc(dvbdemux->feednum *
+ sizeof(struct dvb_demux_rec_info));
+ if (!dvbdemux->rec_info_pool) {
+ vfree(dvbdemux->feed);
+ vfree(dvbdemux->filter);
+ dvbdemux->feed = NULL;
+ dvbdemux->filter = NULL;
+ return -ENOMEM;
+ }
+
+ dvbdemux->sw_filter_abort = 0;
+ dvbdemux->total_process_time = 0;
+ dvbdemux->total_crc_time = 0;
+ snprintf(dvbdemux->alias,
+ MAX_DVB_DEMUX_NAME_LEN,
+ "demux%d",
+ dvb_demux_index++);
+
+ dvbdemux->dmx.debugfs_demux_dir =
+ debugfs_create_dir(dvbdemux->alias, NULL);
+
+ if (dvbdemux->dmx.debugfs_demux_dir != NULL) {
+ debugfs_create_u32(
+ "total_processing_time", 0664,
+ dvbdemux->dmx.debugfs_demux_dir,
+ &dvbdemux->total_process_time);
+
+ debugfs_create_u32(
+ "total_crc_time", 0664,
+ dvbdemux->dmx.debugfs_demux_dir,
+ &dvbdemux->total_crc_time);
+ }
+
for (i = 0; i < dvbdemux->filternum; i++) {
dvbdemux->filter[i].state = DMX_STATE_FREE;
dvbdemux->filter[i].index = i;
}
+
for (i = 0; i < dvbdemux->feednum; i++) {
dvbdemux->feed[i].state = DMX_STATE_FREE;
dvbdemux->feed[i].index = i;
+
+ dvbdemux->rec_info_pool[i].ref_count = 0;
}
dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
@@ -1283,6 +3458,9 @@
dvbdemux->recording = 0;
dvbdemux->tsbufp = 0;
+ dvbdemux->tsp_format = DMX_TSP_FORMAT_188;
+ dvbdemux->ts_packet_size = 188;
+
if (!dvbdemux->check_crc32)
dvbdemux->check_crc32 = dvb_dmx_crc32;
@@ -1294,10 +3472,14 @@
dmx->open = dvbdmx_open;
dmx->close = dvbdmx_close;
dmx->write = dvbdmx_write;
+ dmx->write_cancel = dvbdmx_write_cancel;
+ dmx->set_playback_mode = dvbdmx_set_playback_mode;
dmx->allocate_ts_feed = dvbdmx_allocate_ts_feed;
dmx->release_ts_feed = dvbdmx_release_ts_feed;
dmx->allocate_section_feed = dvbdmx_allocate_section_feed;
dmx->release_section_feed = dvbdmx_release_section_feed;
+ dmx->map_buffer = NULL;
+ dmx->unmap_buffer = NULL;
dmx->add_frontend = dvbdmx_add_frontend;
dmx->remove_frontend = dvbdmx_remove_frontend;
@@ -1306,6 +3488,9 @@
dmx->disconnect_frontend = dvbdmx_disconnect_frontend;
dmx->get_pes_pids = dvbdmx_get_pes_pids;
+ dmx->set_tsp_format = dvbdmx_set_tsp_format;
+ dmx->get_tsp_size = dvbdmx_get_tsp_size;
+
mutex_init(&dvbdemux->mutex);
spin_lock_init(&dvbdemux->lock);
@@ -1316,9 +3501,14 @@
void dvb_dmx_release(struct dvb_demux *dvbdemux)
{
+ if (dvbdemux->dmx.debugfs_demux_dir != NULL)
+ debugfs_remove_recursive(dvbdemux->dmx.debugfs_demux_dir);
+
+ dvb_demux_index--;
vfree(dvbdemux->cnt_storage);
vfree(dvbdemux->filter);
vfree(dvbdemux->feed);
+ vfree(dvbdemux->rec_info_pool);
}
EXPORT_SYMBOL(dvb_dmx_release);
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 5ed3cab..7ba053d 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -27,6 +27,7 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/debugfs.h>
#include "demux.h"
@@ -44,6 +45,8 @@
#define MAX_PID 0x1fff
+#define TIMESTAMP_LEN 4
+
#define SPEED_PKTS_INTERVAL 50000
struct dvb_demux_filter {
@@ -64,6 +67,92 @@
#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
+
+struct dmx_index_entry {
+ struct dmx_index_event_info event;
+ struct list_head next;
+};
+
+#define DMX_IDX_EVENT_QUEUE_SIZE DMX_EVENT_QUEUE_SIZE
+
+struct dvb_demux_rec_info {
+ /* Reference counter for number of feeds using this information */
+ int ref_count;
+
+ /* Counter for number of TS packets output to recording buffer */
+ u64 ts_output_count;
+
+ /* Indexing information */
+ struct {
+ /*
+ * Minimum TS packet number encountered in recording filter
+ * among all feeds that search for video patterns
+ */
+ u64 min_pattern_tsp_num;
+
+ /* Number of indexing-enabled feeds */
+ u8 indexing_feeds_num;
+
+ /* Number of feeds with video pattern search request */
+ u8 pattern_search_feeds_num;
+
+ /* Index entries pool */
+ struct dmx_index_entry events[DMX_IDX_EVENT_QUEUE_SIZE];
+
+ /* List of free entries that can be used for new index events */
+ struct list_head free_list;
+
+ /* List holding ready index entries not notified to user yet */
+ struct list_head ready_list;
+ } idx_info;
+};
+
+#define DVB_DMX_MAX_PATTERN_LEN 6
+struct dvb_dmx_video_patterns {
+ /* the byte pattern to look for */
+ u8 pattern[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the byte mask to use (same length as pattern) */
+ u8 mask[DVB_DMX_MAX_PATTERN_LEN];
+
+ /* the length of the pattern, in bytes */
+ size_t size;
+
+ /* the type of the pattern. One of DMX_IDX_* definitions */
+ u64 type;
+};
+
+#define DVB_DMX_MAX_FOUND_PATTERNS 20
+#define DVB_DMX_MAX_SEARCH_PATTERN_NUM 20
+struct dvb_dmx_video_prefix_size_masks {
+ /*
+ * a bit mask (per pattern) of possible prefix sizes to use
+ * when searching for a pattern that started in the previous TS packet.
+ * Updated by dvb_dmx_video_pattern_search for use in the next lookup.
+ */
+ u32 size_mask[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
+struct dvb_dmx_video_patterns_results {
+ struct {
+ /*
+ * The offset in the buffer where the pattern was found.
+ * If a pattern is found using a prefix (i.e. started on the
+ * previous buffer), offset is zero.
+ */
+ u32 offset;
+
+ /*
+ * The type of the pattern found.
+ * One of DMX_IDX_* definitions.
+ */
+ u64 type;
+
+ /* The prefix size that was used to find this pattern */
+ u32 used_prefix_size;
+ } info[DVB_DMX_MAX_FOUND_PATTERNS];
+};
+
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -75,6 +164,11 @@
dmx_section_cb sec;
} cb;
+ union {
+ dmx_ts_data_ready_cb ts;
+ dmx_section_data_ready_cb sec;
+ } data_ready_cb;
+
struct dvb_demux *demux;
void *priv;
int type;
@@ -82,6 +176,9 @@
u16 pid;
u8 *buffer;
int buffer_size;
+ enum dmx_tsp_format_t tsp_out_format;
+ struct dmx_secure_mode secure_mode;
+ struct dmx_cipher_operations cipher_ops;
ktime_t timeout;
struct dvb_demux_filter *filter;
@@ -90,12 +187,34 @@
enum dmx_ts_pes pes_type;
int cc;
+ int first_cc;
int pusi_seen; /* prevents feeding of garbage from previous section */
+ u8 scrambling_bits;
+
+ struct dvb_demux_rec_info *rec_info;
+ u64 prev_tsp_num;
+ u64 prev_stc;
+ u64 curr_pusi_tsp_num;
+ u64 prev_pusi_tsp_num;
+ int prev_frame_valid;
+ u64 prev_frame_type;
+ int first_frame_in_seq;
+ int first_frame_in_seq_notified;
+ u64 last_pattern_tsp_num;
+ int pattern_num;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
u16 peslen;
+ u32 pes_tei_counter;
+ u32 pes_cont_err_counter;
+ u32 pes_ts_packets_num;
struct list_head list_head;
unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+
+ enum dmx_video_codec video_codec;
+ struct dmx_indexing_params idx_params;
};
struct dvb_demux {
@@ -107,10 +226,27 @@
int (*stop_feed)(struct dvb_demux_feed *feed);
int (*write_to_decoder)(struct dvb_demux_feed *feed,
const u8 *buf, size_t len);
+ int (*decoder_fullness_init)(struct dvb_demux_feed *feed);
+ int (*decoder_fullness_wait)(struct dvb_demux_feed *feed,
+ size_t required_space);
+ int (*decoder_fullness_abort)(struct dvb_demux_feed *feed);
+ int (*decoder_buffer_status)(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+ int (*reuse_decoder_buffer)(struct dvb_demux_feed *feed,
+ int cookie);
+ int (*set_cipher_op)(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
u32 (*check_crc32)(struct dvb_demux_feed *feed,
const u8 *buf, size_t len);
void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
const u8 *src, size_t len);
+ int (*oob_command)(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+ void (*convert_ts)(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+ int (*set_indexing)(struct dvb_demux_feed *feed);
+ int (*flush_decoder_buffer)(struct dvb_demux_feed *feed, size_t length);
int users;
#define MAX_DVB_DEMUX_USERS 10
@@ -136,10 +272,35 @@
ktime_t speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ enum dmx_tsp_format_t tsp_format;
+ size_t ts_packet_size;
+
+ enum dmx_playback_mode_t playback_mode;
+ int sw_filter_abort;
+
+ struct {
+ dmx_ts_fullness ts;
+ dmx_section_fullness sec;
+ } buffer_ctrl;
+
+ struct dvb_demux_rec_info *rec_info_pool;
+
+ /*
+ * the following is used for debugfs exposing info
+ * about dvb demux performance.
+ */
+#define MAX_DVB_DEMUX_NAME_LEN 10
+ char alias[MAX_DVB_DEMUX_NAME_LEN];
+
+ u32 total_process_time;
+ u32 total_crc_time;
};
int dvb_dmx_init(struct dvb_demux *dvbdemux);
void dvb_dmx_release(struct dvb_demux *dvbdemux);
+int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, const u8 *buf,
+ int should_lock);
void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
size_t count);
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
@@ -147,5 +308,141 @@
size_t count);
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
size_t count);
+void dvb_dmx_swfilter_format(
+ struct dvb_demux *demux, const u8 *buf,
+ size_t count,
+ enum dmx_tsp_format_t tsp_format);
+void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
+ const u8 timestamp[TIMESTAMP_LEN]);
+const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern);
+int dvb_dmx_video_pattern_search(
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int patterns_num,
+ const u8 *buf, size_t buf_size,
+ struct dvb_dmx_video_prefix_size_masks *prefix_size_masks,
+ struct dvb_dmx_video_patterns_results *results);
+int dvb_demux_push_idx_event(struct dvb_demux_feed *feed,
+ struct dmx_index_event_info *idx_event, int should_lock);
+void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed,
+ struct dvb_dmx_video_patterns_results *patterns, int pattern,
+ u64 curr_stc, u64 prev_stc,
+ u64 curr_match_tsp, u64 prev_match_tsp,
+ u64 curr_pusi_tsp, u64 prev_pusi_tsp);
+void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock);
+int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed,
+ struct dmx_data_ready *event, int should_lock);
+void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed);
+
+/**
+ * dvb_dmx_is_video_feed - Returns whether the PES feed
+ * is video one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is video feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_PES_VIDEO0) ||
+ (feed->pes_type == DMX_PES_VIDEO1) ||
+ (feed->pes_type == DMX_PES_VIDEO2) ||
+ (feed->pes_type == DMX_PES_VIDEO3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_audio_feed - Returns whether the PES feed
+ * is audio one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is audio feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_audio_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_PES_AUDIO0) ||
+ (feed->pes_type == DMX_PES_AUDIO1) ||
+ (feed->pes_type == DMX_PES_AUDIO2) ||
+ (feed->pes_type == DMX_PES_AUDIO3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_pcr_feed - Returns whether the PES feed
+ * is PCR one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is PCR feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (~TS_DECODER))
+ return 0;
+
+ if ((feed->pes_type == DMX_PES_PCR0) ||
+ (feed->pes_type == DMX_PES_PCR1) ||
+ (feed->pes_type == DMX_PES_PCR2) ||
+ (feed->pes_type == DMX_PES_PCR3))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * dvb_dmx_is_sec_feed - Returns whether this is a section feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is a section feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed)
+{
+ return (feed->type == DMX_TYPE_SEC);
+}
+
+/**
+ * dvb_dmx_is_rec_feed - Returns whether this is a recording feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is recording feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed)
+{
+ if (feed->type != DMX_TYPE_TS)
+ return 0;
+
+ if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
+ return 0;
+
+ return 1;
+}
+
+static inline u16 ts_pid(const u8 *buf)
+{
+ return ((buf[1] & 0x1f) << 8) + buf[2];
+}
+
#endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 9914f69..efb7d52 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -997,7 +997,7 @@
netdev_dbg(dev, "start filtering\n");
priv->secfeed->start_filtering(priv->secfeed);
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
- ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
+ ktime_t timeout = ktime_set(0, 10*NSEC_PER_MSEC); // 10 msec
/* we have payloads encapsulated in TS */
netdev_dbg(dev, "alloc tsfeed\n");
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 7df7fb3..d4514c1 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -37,6 +37,8 @@
#define PKT_READY 0
#define PKT_DISPOSED 1
+#define PKT_PENDING 2
+
void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
@@ -209,18 +211,19 @@
}
ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
- const u8 __user *buf, size_t len)
+ const u8 __user *buf, size_t len)
{
- int status;
size_t todo = len;
size_t split;
+ ssize_t oldpwrite = rbuf->pwrite;
- split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
+ split = (rbuf->pwrite + len > rbuf->size) ?
+ rbuf->size - rbuf->pwrite :
+ 0;
if (split > 0) {
- status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
- if (status)
- return len - todo;
+ if (copy_from_user(rbuf->data + rbuf->pwrite, buf, split))
+ return -EFAULT;
buf += split;
todo -= split;
/* smp_store_release() for write pointer update to ensure that
@@ -230,9 +233,12 @@
*/
smp_store_release(&rbuf->pwrite, 0);
}
- status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
- if (status)
- return len - todo;
+
+ if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) {
+ /* smp_store_release() for write pointer update */
+ smp_store_release(&rbuf->pwrite, oldpwrite);
+ return -EFAULT;
+ }
/* smp_store_release() for write pointer update, see above */
smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
@@ -253,6 +259,31 @@
return status;
}
+ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf, size_t len)
+{
+ ssize_t oldpwrite = rbuf->pwrite;
+
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8);
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff);
+ DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_PENDING);
+
+ return oldpwrite;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_start);
+
+int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx)
+{
+ idx = (idx + 2) % rbuf->size;
+
+ if (rbuf->data[idx] != PKT_PENDING)
+ return -EINVAL;
+
+ rbuf->data[idx] = PKT_READY;
+
+ return 0;
+}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_close);
+
ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 __user *buf, size_t len)
{
@@ -260,6 +291,9 @@
size_t split;
size_t pktlen;
+ if (DVB_RINGBUFFER_PEEK(rbuf, (idx+2)) != PKT_READY)
+ return -EINVAL;
+
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
@@ -280,6 +314,7 @@
return len;
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read_user);
ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8* buf, size_t len)
@@ -288,6 +323,9 @@
size_t split;
size_t pktlen;
+ if (rbuf->data[(idx + 2) % rbuf->size] != PKT_READY)
+ return -EINVAL;
+
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
if (offset > pktlen) return -EINVAL;
@@ -305,6 +343,7 @@
memcpy(buf, rbuf->data+idx, todo);
return len;
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_read);
void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
{
@@ -324,6 +363,7 @@
}
}
}
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_dispose);
ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen)
{
@@ -339,7 +379,10 @@
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
- consumed = (idx - rbuf->pread) % rbuf->size;
+ if (idx >= rbuf->pread)
+ consumed = idx - rbuf->pread;
+ else
+ consumed = rbuf->size - (rbuf->pread - idx);
while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
@@ -352,6 +395,9 @@
return idx;
}
+ if (curpktstatus == PKT_PENDING)
+ return -EFAULT;
+
consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE;
idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size;
}
@@ -359,8 +405,7 @@
// no packets available
return -1;
}
-
-
+EXPORT_SYMBOL(dvb_ringbuffer_pkt_next);
EXPORT_SYMBOL(dvb_ringbuffer_init);
EXPORT_SYMBOL(dvb_ringbuffer_empty);
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index bbe9487..900630e 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -124,6 +124,9 @@
*/
#define DVB_RINGBUFFER_PEEK(rbuf, offs) \
((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
+#define DVB_RINGBUFFER_PUSH(rbuf, num) \
+ ((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size))
+
/**
* DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
@@ -274,7 +277,35 @@
* in bytes.
* returns Packet index (if >=0), or -1 if no packets available.
*/
-extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
- size_t idx, size_t *pktlen);
+extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx,
+ size_t *pktlen);
+
+
+/**
+ * Start a new packet that will be written directly by the user to the packet
+ * buffer.
+ * The function only writes the header of the packet into the packet buffer,
+ * and the packet is in pending state (can't be read by the reader) until it is
+ * closed using dvb_ringbuffer_pkt_close. You must write the data into the
+ * packet buffer using dvb_ringbuffer_write followed by
+ * dvb_ringbuffer_pkt_close.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @len: Size of the packet's data
+ * returns Index of the packet's header that was started.
+ */
+extern ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf,
+ size_t len);
+
+/**
+ * Close a packet that was started using dvb_ringbuffer_pkt_start.
+ * The packet will be marked as ready to be ready.
+ *
+ * @rbuf: Ringbuffer concerned.
+ * @idx: Packet index that was returned by dvb_ringbuffer_pkt_start
+ * returns error status, -EINVAL if the provided index is invalid
+ */
+extern int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx);
+
#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index c68239e..98b067b 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -468,7 +468,7 @@
while ((entity_err = media_entity_graph_walk_next(graph))) {
/* don't let the stream_count go negative */
- if (entity->stream_count > 0) {
+ if (entity_err->stream_count > 0) {
entity_err->stream_count--;
if (entity_err->stream_count == 0)
entity_err->pipe = NULL;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556e..61611d1 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3691,7 +3691,14 @@
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
- core->tvnorm = V4L2_STD_NTSC_M;
+ /*
+ * Note: Setting initial standard here would cause first call to
+ * cx88_set_tvnorm() to return without programming any registers. Leave
+ * it blank for at this point and it will get set later in
+ * cx8800_initdev()
+ */
+ core->tvnorm = 0;
+
core->width = 320;
core->height = 240;
core->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d83eb3b..3b140ad 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1422,7 +1422,7 @@
/* initial device configuration */
mutex_lock(&core->lock);
- cx88_set_tvnorm(core, core->tvnorm);
+ cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
v4l2_ctrl_handler_setup(&core->video_hdl);
v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 2dac48f..dca0592 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index d5d873c..484819d 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -16,3 +16,5 @@
source "drivers/media/platform/msm/vidc/Kconfig"
source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/dvb/Kconfig"
+source "drivers/media/platform/msm/broadcast/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index adeb2aa..e64bcd1 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
obj-y += sde/
obj-$(CONFIG_SPECTRA_CAMERA) += camera/
+obj-y += broadcast/
+obj-$(CONFIG_DVB_MPQ) += dvb/
diff --git a/drivers/media/platform/msm/broadcast/Kconfig b/drivers/media/platform/msm/broadcast/Kconfig
new file mode 100644
index 0000000..cdd1b20
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Kconfig
@@ -0,0 +1,14 @@
+#
+# MSM Broadcast subsystem drivers
+#
+
+config TSPP
+ depends on ARCH_QCOM
+ tristate "TSPP (Transport Stream Packet Processor) Support"
+ ---help---
+ Transport Stream Packet Processor v1 is used to offload the
+ processing of MPEG transport streams from the main processor.
+ It is used to process incoming transport streams from TSIF
+ to supports use-cases such as transport stream live play
+ and recording.
+ This can also be compiled as a loadable module.
diff --git a/drivers/media/platform/msm/broadcast/Makefile b/drivers/media/platform/msm/broadcast/Makefile
new file mode 100644
index 0000000..3735bdc
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for MSM Broadcast subsystem drivers.
+#
+obj-$(CONFIG_TSPP) += tspp.o
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
new file mode 100644
index 0000000..2c90e47
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -0,0 +1,3328 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h> /* Just for modules */
+#include <linux/kernel.h> /* Only for KERN_INFO */
+#include <linux/err.h> /* Error macros */
+#include <linux/list.h> /* Linked list */
+#include <linux/cdev.h>
+#include <linux/init.h> /* Needed for the macros */
+#include <linux/io.h> /* IO macros */
+#include <linux/device.h> /* Device drivers need this */
+#include <linux/sched.h> /* Externally defined globals */
+#include <linux/pm_runtime.h> /* Runtime power management */
+#include <linux/fs.h>
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kfree, kzalloc */
+#include <linux/ioport.h> /* XXX_ mem_region */
+#include <asm/dma-iommu.h>
+#include <linux/dma-mapping.h> /* dma_XXX */
+#include <linux/dmapool.h> /* DMA pools */
+#include <linux/delay.h> /* msleep */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/poll.h> /* poll() file op */
+#include <linux/wait.h> /* wait() macros, sleeping */
+#include <linux/bitops.h> /* BIT() macro */
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <linux/msm-sps.h> /* BAM stuff */
+#include <linux/wakelock.h> /* Locking functions */
+#include <linux/timer.h> /* Timer services */
+#include <linux/jiffies.h> /* Jiffies counter */
+#include <linux/qcom_tspp.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/string.h>
+#include <linux/msm-bus.h>
+#include <linux/interrupt.h> /* tasklet */
+#include <asm/arch_timer.h> /* Timer */
+#include <linux/dma-buf.h>
+
+/*
+ * General defines
+ */
+#define TSPP_TSIF_INSTANCES 2
+#define TSPP_GPIOS_PER_TSIF 4
+#define TSPP_FILTER_TABLES 3
+#define TSPP_MAX_DEVICES 1
+#define TSPP_NUM_CHANNELS 16
+#define TSPP_NUM_PRIORITIES 16
+#define TSPP_NUM_KEYS 8
+#define INVALID_CHANNEL 0xFFFFFFFF
+#define TSPP_BAM_DEFAULT_IPC_LOGLVL 2
+
+#define TSPP_SMMU_IOVA_START (0x10000000)
+#define TSPP_SMMU_IOVA_SIZE (0x40000000)
+
+/*
+ * BAM descriptor FIFO size (in number of descriptors).
+ * Max number of descriptors allowed by SPS which is 8K-1.
+ */
+#define TSPP_SPS_DESCRIPTOR_COUNT (8 * 1024 - 1)
+#define TSPP_PACKET_LENGTH 188
+#define TSPP_MIN_BUFFER_SIZE (TSPP_PACKET_LENGTH)
+
+/* Max descriptor buffer size allowed by SPS */
+#define TSPP_MAX_BUFFER_SIZE (32 * 1024 - 1)
+
+/*
+ * Returns whether to use DMA pool for TSPP output buffers.
+ * For buffers smaller than page size, using DMA pool
+ * provides better memory utilization as dma_alloc_coherent
+ * allocates minimum of page size.
+ */
+#define TSPP_USE_DMA_POOL(buff_size) ((buff_size) < PAGE_SIZE)
+
+/*
+ * Max allowed TSPP buffers/descriptors.
+ * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors.
+ */
+#define TSPP_NUM_BUFFERS (TSPP_SPS_DESCRIPTOR_COUNT - 1)
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT 60
+#define SPS_DESCRIPTOR_SIZE 8
+#define MIN_ACCEPTABLE_BUFFER_COUNT 2
+#define TSPP_DEBUG(msg...)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF (0x0)
+#define TSIF_TIME_LIMIT_OFF (0x4)
+#define TSIF_CLK_REF_OFF (0x8)
+#define TSIF_LPBK_FLAGS_OFF (0xc)
+#define TSIF_LPBK_DATA_OFF (0x10)
+#define TSIF_TEST_CTL_OFF (0x14)
+#define TSIF_TEST_MODE_OFF (0x18)
+#define TSIF_TEST_RESET_OFF (0x1c)
+#define TSIF_TEST_EXPORT_OFF (0x20)
+#define TSIF_TEST_CURRENT_OFF (0x24)
+#define TSIF_TTS_CTL_OFF (0x38)
+
+#define TSIF_DATA_PORT_OFF (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET BIT(26)
+#define TSIF_STS_CTL_OVERFLOW BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC BIT(24)
+#define TSIF_STS_CTL_TIMEOUT BIT(23)
+#define TSIF_STS_CTL_INV_SYNC BIT(21)
+#define TSIF_STS_CTL_INV_NULL BIT(20)
+#define TSIF_STS_CTL_INV_ERROR BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE BIT(18)
+#define TSIF_STS_CTL_INV_DATA BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK BIT(16)
+#define TSIF_STS_CTL_SPARE BIT(15)
+#define TSIF_STS_CTL_EN_NULL BIT(11)
+#define TSIF_STS_CTL_EN_ERROR BIT(10)
+#define TSIF_STS_CTL_LAST_BIT BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM BIT(8)
+#define TSIF_STS_CTL_EN_TCR BIT(7)
+#define TSIF_STS_CTL_TEST_MODE BIT(6)
+#define TSIF_STS_CTL_MODE_2 BIT(5)
+#define TSIF_STS_CTL_EN_DM BIT(4)
+#define TSIF_STS_CTL_STOP BIT(3)
+#define TSIF_STS_CTL_START BIT(0)
+
+/* bits for TSIF_TTS_CTRL register */
+#define TSIF_TTS_CTL_TTS_ENDIANNESS BIT(4)
+#define TSIF_TTS_CTL_TTS_SOURCE BIT(3)
+#define TSIF_TTS_CTL_TTS_LENGTH_1 BIT(1)
+#define TSIF_TTS_CTL_TTS_LENGTH_0 BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST 0x00
+#define TSPP_CLK_CONTROL 0x04
+#define TSPP_CONFIG 0x08
+#define TSPP_CONTROL 0x0C
+#define TSPP_PS_DISABLE 0x10
+#define TSPP_MSG_IRQ_STATUS 0x14
+#define TSPP_MSG_IRQ_MASK 0x18
+#define TSPP_IRQ_STATUS 0x1C
+#define TSPP_IRQ_MASK 0x20
+#define TSPP_IRQ_CLEAR 0x24
+#define TSPP_PIPE_ERROR_STATUS(_n) (0x28 + (_n << 2))
+#define TSPP_STATUS 0x68
+#define TSPP_CURR_TSP_HEADER 0x6C
+#define TSPP_CURR_PID_FILTER 0x70
+#define TSPP_SYSTEM_KEY(_n) (0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n) (0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET 0x9C
+#define TSPP_KEY_VALID 0xA0
+#define TSPP_KEY_ERROR 0xA4
+#define TSPP_TEST_CTRL 0xA8
+#define TSPP_VERSION 0xAC
+#define TSPP_GENERICS 0xB0
+#define TSPP_NOP 0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET BIT(0)
+
+/* TSPP_CLK_CONTROL */
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON BIT(0)
+
+/* TSPP_CONFIG */
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n) BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR BIT(1)
+#define TSPP_PIP_PS_LOST_START BIT(0)
+
+/* TSPP_STATUS */
+#define TSPP_STATUS_TSP_PKT_AVAIL BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ BIT(2)
+#define TSPP_CURR_FILTER_TABLE BIT(0)
+
+/* TSPP_GENERICS */
+#define TSPP_GENERICS_CRYPTO_GEN BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES BIT(7)
+#define TSPP_GENERICS_MAX_PIPES BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0 0x800
+#define TSPP_PID_FILTER_TABLE1 0x880
+#define TSPP_PID_FILTER_TABLE2 0x900
+#define TSPP_GLOBAL_PERFORMANCE 0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT 0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE 0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n) (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY 0xCD0
+
+struct debugfs_entry {
+ const char *name;
+ mode_t mode;
+ int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+ {"sts_ctl", 0644, TSIF_STS_CTL_OFF},
+ {"time_limit", 0644, TSIF_TIME_LIMIT_OFF},
+ {"clk_ref", 0644, TSIF_CLK_REF_OFF},
+ {"lpbk_flags", 0644, TSIF_LPBK_FLAGS_OFF},
+ {"lpbk_data", 0644, TSIF_LPBK_DATA_OFF},
+ {"test_ctl", 0644, TSIF_TEST_CTL_OFF},
+ {"test_mode", 0644, TSIF_TEST_MODE_OFF},
+ {"test_reset", 0200, TSIF_TEST_RESET_OFF},
+ {"test_export", 0644, TSIF_TEST_EXPORT_OFF},
+ {"test_current", 0444, TSIF_TEST_CURRENT_OFF},
+ {"data_port", 0400, TSIF_DATA_PORT_OFF},
+ {"tts_source", 0600, TSIF_TTS_CTL_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+ {"rst", 0644, TSPP_RST},
+ {"clk_control", 0644, TSPP_CLK_CONTROL},
+ {"config", 0644, TSPP_CONFIG},
+ {"control", 0644, TSPP_CONTROL},
+ {"ps_disable", 0644, TSPP_PS_DISABLE},
+ {"msg_irq_status", 0644, TSPP_MSG_IRQ_STATUS},
+ {"msg_irq_mask", 0644, TSPP_MSG_IRQ_MASK},
+ {"irq_status", 0644, TSPP_IRQ_STATUS},
+ {"irq_mask", 0644, TSPP_IRQ_MASK},
+ {"irq_clear", 0644, TSPP_IRQ_CLEAR},
+ /* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+ {"status", 0644, TSPP_STATUS},
+ {"curr_tsp_header", 0644, TSPP_CURR_TSP_HEADER},
+ {"curr_pid_filter", 0644, TSPP_CURR_PID_FILTER},
+ /* {"system_key", S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+ /* {"cbc_init_val", S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+ {"data_key_reset", 0644, TSPP_DATA_KEY_RESET},
+ {"key_valid", 0644, TSPP_KEY_VALID},
+ {"key_error", 0644, TSPP_KEY_ERROR},
+ {"test_ctrl", 0644, TSPP_TEST_CTRL},
+ {"version", 0644, TSPP_VERSION},
+ {"generics", 0644, TSPP_GENERICS},
+ {"pid_filter_table0", 0644, TSPP_PID_FILTER_TABLE0},
+ {"pid_filter_table1", 0644, TSPP_PID_FILTER_TABLE1},
+ {"pid_filter_table2", 0644, TSPP_PID_FILTER_TABLE2},
+ {"tsp_total_num", 0644, TSPP_GLOBAL_PERFORMANCE},
+ {"tsp_ignored_num", 0644, TSPP_GLOBAL_PERFORMANCE + 4},
+ {"tsp_err_ind_num", 0644, TSPP_GLOBAL_PERFORMANCE + 8},
+ {"tsp_sync_err_num", 0644, TSPP_GLOBAL_PERFORMANCE + 16},
+ {"pipe_context", 0644, TSPP_PIPE_CONTEXT},
+ {"pipe_performance", 0644, TSPP_PIPE_PERFORMANCE},
+ {"data_key", 0644, TSPP_DATA_KEY}
+};
+
+struct tspp_pid_filter {
+ u32 filter; /* see FILTER_ macros */
+ u32 config; /* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK BIT(7)
+#define FILTER_TRANS_END_DISABLE BIT(6)
+#define FILTER_DEC_ON_ERROR_EN BIT(5)
+#define FILTER_DECRYPT BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p) (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p) (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b) (_p->config = \
+ (_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p) ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p) ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p) (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b) (_p->filter = \
+ (_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p) ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b) (_p->config = \
+ (_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p) ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b) (_p->config = \
+ (_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+ u32 tsp_total;
+ u32 tsp_ignored;
+ u32 tsp_error;
+ u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+ u16 pes_bytes_left;
+ u16 count;
+ u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a) (_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a) ((_a >> 12) & 0xF)
+
+#define MSEC_TO_JIFFIES(msec) ((msec) * HZ / 1000)
+
+struct tspp_pipe_performance_regs {
+ u32 tsp_total;
+ u32 ps_duplicate_tsp;
+ u32 tsp_no_payload;
+ u32 tsp_broken_ps;
+ u32 ps_total_num;
+ u32 ps_continuity_error;
+ u32 ps_length_error;
+ u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+ void __iomem *base;
+ u32 time_limit;
+ u32 ref_count;
+ enum tspp_tsif_mode mode;
+ int clock_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+ u32 tsif_irq;
+
+ /* debugfs */
+ struct dentry *dent_tsif;
+ struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+ u32 stat_rx;
+ u32 stat_overflow;
+ u32 stat_lost_sync;
+ u32 stat_timeout;
+ enum tsif_tts_source tts_source;
+ u32 lpass_timer_enable;
+};
+
+enum tspp_buf_state {
+ TSPP_BUF_STATE_EMPTY, /* buffer has been allocated, but not waiting */
+ TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+ TSPP_BUF_STATE_DATA, /* buffer is not empty and can be read */
+ TSPP_BUF_STATE_LOCKED /* buffer is being read by a client */
+};
+
+struct tspp_mem_buffer {
+ struct tspp_mem_buffer *next;
+ struct sps_mem_buffer sps;
+ struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */
+ enum tspp_buf_state state;
+ size_t filled; /* how much data this buffer is holding */
+ int read_index; /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+ struct tspp_device *pdev; /* can use container_of instead? */
+ struct sps_pipe *pipe;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct tspp_mem_buffer *data; /* list of buffers */
+ struct tspp_mem_buffer *read; /* first buffer ready to be read */
+ struct tspp_mem_buffer *waiting; /* first outstanding transfer */
+ struct tspp_mem_buffer *locked; /* buffer currently being read */
+ wait_queue_head_t in_queue; /* set when data is received */
+ u32 id; /* channel id (0-15) */
+ int used; /* is this channel in use? */
+ int key; /* which encryption key index is used */
+ u32 buffer_size; /* size of the sps transfer buffers */
+ u32 max_buffers; /* how many buffers should be allocated */
+ u32 buffer_count; /* how many buffers are actually allocated */
+ u32 filter_count; /* how many filters have been added to this channel */
+ u32 int_freq; /* generate interrupts every x descriptors */
+ enum tspp_source src;
+ enum tspp_mode mode;
+ tspp_notifier *notifier; /* used only with kernel api */
+ void *notify_data; /* data to be passed with the notifier */
+ u32 expiration_period_ms; /* notification on partially filled buffers */
+ struct timer_list expiration_timer;
+ struct dma_pool *dma_pool;
+ tspp_memfree *memfree; /* user defined memory free function */
+ void *user_info; /* user cookie passed to memory alloc/free function */
+};
+
+struct tspp_pid_filter_table {
+ struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+ u32 even_lsb;
+ u32 even_msb;
+ u32 odd_lsb;
+ u32 odd_msb;
+};
+
+struct tspp_key_table {
+ struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+struct tspp_pinctrl {
+ struct pinctrl *pinctrl;
+
+ struct pinctrl_state *disabled;
+ struct pinctrl_state *tsif0_mode1;
+ struct pinctrl_state *tsif0_mode2;
+ struct pinctrl_state *tsif1_mode1;
+ struct pinctrl_state *tsif1_mode2;
+ struct pinctrl_state *dual_mode1;
+ struct pinctrl_state *dual_mode2;
+
+ bool tsif0_active;
+ bool tsif1_active;
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+ struct list_head devlist; /* list of all devices */
+ struct platform_device *pdev;
+ void __iomem *base;
+ uint32_t tsif_bus_client;
+ unsigned int tspp_irq;
+ unsigned int bam_irq;
+ unsigned long bam_handle;
+ struct sps_bam_props bam_props;
+ struct wakeup_source ws;
+ spinlock_t spinlock;
+ struct tasklet_struct tlet;
+ struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+ /* clocks */
+ struct clk *tsif_pclk;
+ struct clk *tsif_ref_clk;
+ /* regulators */
+ struct regulator *tsif_vreg;
+ /* data */
+ struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES];
+ struct tspp_channel channels[TSPP_NUM_CHANNELS];
+ struct tspp_key_table *tspp_key_table;
+ struct tspp_global_performance_regs *tspp_global_performance;
+ struct tspp_pipe_context_regs *tspp_pipe_context;
+ struct tspp_pipe_performance_regs *tspp_pipe_performance;
+ bool req_irqs;
+ /* pinctrl */
+ struct mutex mutex;
+ struct tspp_pinctrl pinctrl;
+ unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */
+ struct dma_iommu_mapping *iommu_mapping;
+
+ struct dentry *dent;
+ struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+};
+
+static int tspp_key_entry;
+static u32 channel_id; /* next channel id number to assign */
+
+static LIST_HEAD(tspp_devices);
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev)
+{
+ struct tspp_device *device = dev;
+ u32 status, mask;
+ u32 data;
+
+ status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+ mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+ status &= mask;
+
+ if (!status) {
+ dev_warn(&device->pdev->dev, "Spurious interrupt");
+ return IRQ_NONE;
+ }
+
+ /* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+ if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+ /* read the key error info */
+ data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+ dev_info(&device->pdev->dev, "key error 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+ data = readl_relaxed(device->base + TSPP_KEY_VALID);
+ dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+ dev_info(&device->pdev->dev, "key switched");
+
+ if (status & 0xffff)
+ dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
+
+ writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+ struct tspp_tsif_device *tsif_device = dev;
+ u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+ if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC |
+ TSIF_STS_CTL_TIMEOUT)))
+ return IRQ_NONE;
+
+ if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+ tsif_device->stat_overflow++;
+
+ if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+ tsif_device->stat_lost_sync++;
+
+ if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+ tsif_device->stat_timeout++;
+
+ iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+ struct tspp_device *pdev;
+
+ if (!notify || !notify->user)
+ return;
+
+ pdev = notify->user;
+ tasklet_schedule(&pdev->tlet);
+}
+
+static void tspp_expiration_timer(unsigned long data)
+{
+ struct tspp_device *pdev = (struct tspp_device *)data;
+
+ if (pdev)
+ tasklet_schedule(&pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+ int i;
+ int complete;
+ unsigned long flags;
+ struct sps_iovec iovec;
+ struct tspp_channel *channel;
+ struct tspp_device *device = (struct tspp_device *)data;
+
+ spin_lock_irqsave(&device->spinlock, flags);
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ complete = 0;
+ channel = &device->channels[i];
+
+ if (!channel->used || !channel->waiting)
+ continue;
+
+ /* stop the expiration timer */
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ /* get completions */
+ while (channel->waiting->state == TSPP_BUF_STATE_WAITING) {
+ if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+ pr_err("tspp: Error in iovec on channel %i",
+ channel->id);
+ break;
+ }
+ if (iovec.size == 0)
+ break;
+
+ if (DESC_FULL_ADDR(iovec.flags, iovec.addr)
+ != channel->waiting->sps.phys_base)
+ pr_err("tspp: buffer mismatch %pa",
+ &channel->waiting->sps.phys_base);
+
+ complete = 1;
+ channel->waiting->state = TSPP_BUF_STATE_DATA;
+ channel->waiting->filled = iovec.size;
+ channel->waiting->read_index = 0;
+
+ if (channel->src == TSPP_SOURCE_TSIF0)
+ device->tsif[0].stat_rx++;
+ else if (channel->src == TSPP_SOURCE_TSIF1)
+ device->tsif[1].stat_rx++;
+
+ /* update the pointers */
+ channel->waiting = channel->waiting->next;
+ }
+
+ /* wake any waiting processes */
+ if (complete) {
+ wake_up_interruptible(&channel->in_queue);
+
+ /* call notifiers */
+ if (channel->notifier)
+ channel->notifier(channel->id,
+ channel->notify_data);
+ }
+
+ /* restart expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+ }
+
+ spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+static int tspp_config_gpios(struct tspp_device *device,
+ enum tspp_source source,
+ int enable)
+{
+ int ret;
+ struct pinctrl_state *s;
+ struct tspp_pinctrl *p = &device->pinctrl;
+ bool mode2;
+
+ /*
+ * TSIF devices are handled separately, however changing of the pinctrl
+ * state must be protected from race condition.
+ */
+ if (mutex_lock_interruptible(&device->mutex))
+ return -ERESTARTSYS;
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif1_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ } else {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif0_active = enable;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif0_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ } else {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif1_active = enable;
+ break;
+ default:
+ pr_err("%s: invalid source %d\n", __func__, source);
+ mutex_unlock(&device->mutex);
+ return -EINVAL;
+ }
+
+ if (ret)
+ pr_err("%s: failed to change pinctrl state, ret=%d\n",
+ __func__, ret);
+
+ mutex_unlock(&device->mutex);
+ return ret;
+}
+
+static int tspp_get_pinctrl(struct tspp_device *device)
+{
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state;
+
+ pinctrl = devm_pinctrl_get(&device->pdev->dev);
+ if (IS_ERR_OR_NULL(pinctrl)) {
+ pr_err("%s: Unable to get pinctrl handle\n", __func__);
+ return -EINVAL;
+ }
+ device->pinctrl.pinctrl = pinctrl;
+
+ state = pinctrl_lookup_state(pinctrl, "disabled");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "disabled");
+ return -EINVAL;
+ }
+ device->pinctrl.disabled = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode1");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode2");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode1");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode2");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2");
+ if (IS_ERR_OR_NULL(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode2 = state;
+
+ device->pinctrl.tsif0_active = false;
+ device->pinctrl.tsif1_active = false;
+
+ return 0;
+}
+
+
+/*** Clock functions ***/
+static int tspp_clock_start(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't start clocks, invalid device\n");
+ return -EINVAL;
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 1);
+ if (rc) {
+ pr_err("tspp: Can't enable bus\n");
+ return -EBUSY;
+ }
+ }
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
+ if (rc) {
+ pr_err("Unable to set CX voltage.\n");
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return rc;
+ }
+ }
+
+ if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
+ pr_err("tspp: Can't start pclk");
+
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ if (device->tsif_ref_clk &&
+ clk_prepare_enable(device->tsif_ref_clk) != 0) {
+ pr_err("tspp: Can't start ref clk");
+ clk_disable_unprepare(device->tsif_pclk);
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void tspp_clock_stop(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't stop clocks, invalid device\n");
+ return;
+ }
+
+ if (device->tsif_pclk)
+ clk_disable_unprepare(device->tsif_pclk);
+
+ if (device->tsif_ref_clk)
+ clk_disable_unprepare(device->tsif_ref_clk);
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
+ if (rc)
+ pr_err("Unable to set CX voltage.\n");
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ if (rc)
+ pr_err("tspp: Can't disable bus\n");
+ }
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+ int start_hardware = 0;
+ u32 ctl;
+
+ if (tsif_device->ref_count == 0) {
+ start_hardware = 1;
+ } else if (tsif_device->ref_count > 0) {
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if ((ctl & TSIF_STS_CTL_START) != 1) {
+ /* this hardware should already be running */
+ pr_warn("tspp: tsif hw not started but ref count > 0");
+ start_hardware = 1;
+ }
+ }
+
+ if (start_hardware) {
+ ctl = TSIF_STS_CTL_EN_IRQ |
+ TSIF_STS_CTL_EN_DM |
+ TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC;
+
+ if (tsif_device->clock_inverse)
+ ctl |= TSIF_STS_CTL_INV_CLOCK;
+
+ if (tsif_device->data_inverse)
+ ctl |= TSIF_STS_CTL_INV_DATA;
+
+ if (tsif_device->sync_inverse)
+ ctl |= TSIF_STS_CTL_INV_SYNC;
+
+ if (tsif_device->enable_inverse)
+ ctl |= TSIF_STS_CTL_INV_ENABLE;
+
+ switch (tsif_device->mode) {
+ case TSPP_TSIF_MODE_LOOPBACK:
+ ctl |= TSIF_STS_CTL_EN_NULL |
+ TSIF_STS_CTL_EN_ERROR |
+ TSIF_STS_CTL_TEST_MODE;
+ break;
+ case TSPP_TSIF_MODE_1:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM;
+ if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+ ctl |= TSIF_STS_CTL_EN_TCR;
+ break;
+ case TSPP_TSIF_MODE_2:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_MODE_2;
+ if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER)
+ ctl |= TSIF_STS_CTL_EN_TCR;
+ break;
+ default:
+ pr_warn("tspp: unknown tsif mode 0x%x",
+ tsif_device->mode);
+ }
+
+ writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+ /* write Status control register */
+ wmb();
+ writel_relaxed(tsif_device->time_limit,
+ tsif_device->base + TSIF_TIME_LIMIT_OFF);
+ /* assure register configuration is done before starting TSIF */
+ wmb();
+ writel_relaxed(ctl | TSIF_STS_CTL_START,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF start configuration */
+ wmb();
+ }
+
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if (!(ctl & TSIF_STS_CTL_START))
+ return -EBUSY;
+
+ tsif_device->ref_count++;
+ return 0;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+ if (tsif_device->ref_count == 0)
+ return;
+
+ tsif_device->ref_count--;
+
+ if (tsif_device->ref_count == 0) {
+ writel_relaxed(TSIF_STS_CTL_STOP,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF stop configuration */
+ wmb();
+ }
+}
+
+/*** local TSPP functions ***/
+static int tspp_channels_in_use(struct tspp_device *pdev)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ count += (pdev->channels[i].used ? 1 : 0);
+
+ return count;
+}
+
+static struct tspp_device *tspp_find_by_id(int id)
+{
+ struct tspp_device *dev;
+
+ list_for_each_entry(dev, &tspp_devices, devlist) {
+ if (dev->pdev->id == id)
+ return dev;
+ }
+ return NULL;
+}
+
+static int tspp_get_key_entry(void)
+{
+ int i;
+
+ for (i = 0; i < TSPP_NUM_KEYS; i++) {
+ if (!(tspp_key_entry & (1 << i))) {
+ tspp_key_entry |= (1 << i);
+ return i;
+ }
+ }
+ return 1 < TSPP_NUM_KEYS;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+ if (entry > TSPP_NUM_KEYS) {
+ pr_err("tspp_free_key_entry: index out of bounds");
+ return;
+ }
+
+ tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_iommu_init(struct tspp_device *device)
+{
+ struct dma_iommu_mapping *iommu_map;
+
+ iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+ TSPP_SMMU_IOVA_START,
+ TSPP_SMMU_IOVA_SIZE);
+ if (IS_ERR(iommu_map)) {
+ dev_err(&device->pdev->dev, "iommu_create_mapping failure\n");
+ return PTR_ERR(iommu_map);
+ }
+ if (arm_iommu_attach_device(&device->pdev->dev, iommu_map)) {
+ dev_err(&device->pdev->dev, "can't arm_iommu_attach_device\n");
+ arm_iommu_release_mapping(iommu_map);
+ return -EIO;
+ }
+
+ device->iommu_mapping = iommu_map;
+ return 0;
+}
+
+static void tspp_iommu_release_iomapping(struct tspp_device *device)
+{
+ if (device->iommu_mapping)
+ arm_iommu_release_mapping(device->iommu_mapping);
+
+ device->iommu_mapping = NULL;
+}
+
+static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
+ u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
+{
+ if (size < TSPP_MIN_BUFFER_SIZE ||
+ size > TSPP_MAX_BUFFER_SIZE) {
+ pr_err("tspp: bad buffer size %i", size);
+ return -ENOMEM;
+ }
+
+ if (alloc) {
+ TSPP_DEBUG("tspp using alloc function");
+ desc->virt_base = alloc(channel_id, size,
+ &desc->phys_base, &desc->dma_base, user);
+ } else {
+ if (!dma_pool)
+ desc->virt_base = dma_alloc_coherent(NULL, size,
+ &desc->phys_base, GFP_KERNEL);
+ else
+ desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &desc->phys_base);
+
+ if (desc->virt_base == 0) {
+ pr_err("tspp: dma buffer allocation failed %i\n", size);
+ return -ENOMEM;
+ }
+ }
+
+ desc->size = size;
+ return 0;
+}
+
+static int tspp_queue_buffer(struct tspp_channel *channel,
+ struct tspp_mem_buffer *buffer)
+{
+ int rc;
+ u32 flags = 0;
+
+ /* make sure the interrupt frequency is valid */
+ if (channel->int_freq < 1)
+ channel->int_freq = 1;
+
+ /* generate interrupt according to requested frequency */
+ if (buffer->desc.id % channel->int_freq == channel->int_freq-1)
+ flags = SPS_IOVEC_FLAG_INT;
+
+ /* start the transfer */
+ rc = sps_transfer_one(channel->pipe,
+ buffer->sps.phys_base,
+ buffer->sps.size,
+ flags ? channel->pdev : NULL,
+ flags);
+ if (rc < 0)
+ return rc;
+
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+ u32 i, val;
+
+ /* stop all TSIFs */
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+ tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+ pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+ pdev->tsif[i].clock_inverse = 0;
+ pdev->tsif[i].data_inverse = 0;
+ pdev->tsif[i].sync_inverse = 0;
+ pdev->tsif[i].enable_inverse = 0;
+ pdev->tsif[i].lpass_timer_enable = 0;
+ }
+ writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+ /* assure state is reset before continuing with configuration */
+ wmb();
+
+ /* TSPP tables */
+ for (i = 0; i < TSPP_FILTER_TABLES; i++)
+ memset_io(pdev->filters[i],
+ 0, sizeof(struct tspp_pid_filter_table));
+
+ /* disable all filters */
+ val = (2 << TSPP_NUM_CHANNELS) - 1;
+ writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+ /* TSPP registers */
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is set to 0 */
+ wmb();
+ memset_io(pdev->tspp_global_performance, 0,
+ sizeof(struct tspp_global_performance_regs));
+ memset_io(pdev->tspp_pipe_context, 0,
+ sizeof(struct tspp_pipe_context_regs));
+ memset_io(pdev->tspp_pipe_performance, 0,
+ sizeof(struct tspp_pipe_performance_regs));
+ /* assure tspp pipe context registers are set to 0 */
+ wmb();
+ writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is reset */
+ wmb();
+
+ val = readl_relaxed(pdev->base + TSPP_CONFIG);
+ val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_MASK);
+ TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+ writel_relaxed(val, pdev->base + TSPP_CONFIG);
+ writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK);
+ writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+ writel_relaxed(0, pdev->base + TSPP_RST);
+ /* assure tspp reset clear */
+ wmb();
+
+ tspp_key_entry = 0;
+
+ return 0;
+}
+
+static void tspp_channel_init(struct tspp_channel *channel,
+ struct tspp_device *pdev)
+{
+ channel->pdev = pdev;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->waiting = NULL;
+ channel->locked = NULL;
+ channel->id = channel_id++;
+ channel->used = 0;
+ channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
+ channel->max_buffers = TSPP_NUM_BUFFERS;
+ channel->buffer_count = 0;
+ channel->filter_count = 0;
+ channel->int_freq = 1;
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ init_waitqueue_head(&channel->in_queue);
+}
+
+static void tspp_set_tsif_mode(struct tspp_channel *channel,
+ enum tspp_tsif_mode mode)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ pr_warn("tspp: can't set mode for non-tsif source %d",
+ channel->src);
+ return;
+ }
+ channel->pdev->tsif[index].mode = mode;
+}
+
+static void tspp_set_signal_inversion(struct tspp_channel *channel,
+ int clock_inverse, int data_inverse,
+ int sync_inverse, int enable_inverse)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ return;
+ }
+ channel->pdev->tsif[index].clock_inverse = clock_inverse;
+ channel->pdev->tsif[index].data_inverse = data_inverse;
+ channel->pdev->tsif[index].sync_inverse = sync_inverse;
+ channel->pdev->tsif[index].enable_inverse = enable_inverse;
+}
+
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement */
+ return 1;
+ }
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+ u32 new_size;
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ break;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ break;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement - give the user what he asks for */
+ alignment = 1;
+ break;
+ }
+ /* align up */
+ new_size = (((size + alignment - 1) / alignment) * alignment);
+ return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+ int i;
+ struct tspp_mem_buffer *pbuf, *temp;
+
+ pbuf = channel->data;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (pbuf->desc.phys_base) {
+ if (channel->memfree) {
+ channel->memfree(channel_id,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base,
+ channel->user_info);
+ } else {
+ if (!channel->dma_pool)
+ dma_free_coherent(
+ &channel->pdev->pdev->dev,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ else
+ dma_pool_free(channel->dma_pool,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ }
+ pbuf->desc.phys_base = 0;
+ }
+ pbuf->desc.virt_base = 0;
+ pbuf->state = TSPP_BUF_STATE_EMPTY;
+ temp = pbuf;
+ pbuf = pbuf->next;
+ kfree(temp);
+ }
+}
+
+static int msm_tspp_req_irqs(struct tspp_device *device)
+{
+ int rc;
+ int i;
+ int j;
+
+ rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+ dev_name(&device->pdev->dev), device);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSPP IRQ %d : %d",
+ device->tspp_irq, rc);
+ return rc;
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ rc = request_irq(device->tsif[i].tsif_irq,
+ tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev),
+ &device->tsif[i]);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSIF%d IRQ: %d",
+ i, rc);
+ goto failed;
+ }
+ }
+ device->req_irqs = true;
+ return 0;
+
+failed:
+ free_irq(device->tspp_irq, device);
+ for (j = 0; j < i; j++)
+ free_irq(device->tsif[j].tsif_irq, device);
+
+ return rc;
+}
+
+static inline void msm_tspp_free_irqs(struct tspp_device *device)
+{
+ int i;
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ if (device->tsif[i].tsif_irq)
+ free_irq(device->tsif[i].tsif_irq, &device->tsif[i]);
+ }
+
+ if (device->tspp_irq)
+ free_irq(device->tspp_irq, device);
+ device->req_irqs = false;
+}
+
+/*** TSPP API functions ***/
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source)
+{
+ u32 val;
+ int rc;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+ bool req_irqs = false;
+
+ TSPP_DEBUG("tspp_open_stream %i %i %i %i",
+ dev, channel_id, source->source, source->mode);
+
+ if (dev >= TSPP_MAX_DEVICES) {
+ pr_err("tspp: device id out of range");
+ return -ENODEV;
+ }
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_str: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->src = source->source;
+ tspp_set_tsif_mode(channel, source->mode);
+ tspp_set_signal_inversion(channel, source->clk_inverse,
+ source->data_inverse, source->sync_inverse,
+ source->enable_inverse);
+
+ /* Request IRQ resources on first open */
+ if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 ||
+ source->source == TSPP_SOURCE_TSIF1)) {
+ rc = msm_tspp_req_irqs(pdev);
+ if (rc) {
+ pr_err("tspp: error requesting irqs\n");
+ return rc;
+ }
+ req_irqs = true;
+ }
+
+ switch (source->source) {
+ case TSPP_SOURCE_TSIF0:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif0 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF0 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif0");
+ goto free_irq;
+ }
+ if (pdev->tsif[0].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif1 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF1 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif1");
+ goto free_irq;
+ }
+ if (pdev->tsif[1].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ default:
+ pr_err("tspp: channel %i invalid source %i",
+ channel->id, source->source);
+ return -EBUSY;
+ }
+
+ return 0;
+
+free_irq:
+ /* Free irqs only if were requested during opening of this stream */
+ if (req_irqs)
+ msm_tspp_free_irqs(pdev);
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_stream);
+
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_stream(u32 dev, u32 channel_id)
+{
+ u32 val;
+ u32 prev_ref_count = 0;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_cs: can't find device %i", dev);
+ return -EBUSY;
+ }
+ channel = &pdev->channels[channel_id];
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ prev_ref_count = pdev->tsif[0].ref_count;
+ tspp_stop_tsif(&pdev->tsif[0]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ prev_ref_count = pdev->tsif[1].ref_count;
+ tspp_stop_tsif(&pdev->tsif[1]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ case TSPP_SOURCE_NONE:
+ break;
+ }
+
+ channel->src = TSPP_SOURCE_NONE;
+
+ /* Free requested interrupts to save power */
+ if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 &&
+ prev_ref_count)
+ msm_tspp_free_irqs(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_stream);
+
+static int tspp_init_sps_device(struct tspp_device *dev)
+{
+ int ret;
+
+ ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle);
+ if (ret) {
+ pr_err("tspp: failed to register bam device, err-%d\n", ret);
+ return ret;
+ }
+
+ ret = sps_device_reset(dev->bam_handle);
+ if (ret) {
+ sps_deregister_bam_device(dev->bam_handle);
+ pr_err("tspp: error resetting bam device, err=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_open_channel(u32 dev, u32 channel_id)
+{
+ int rc = 0;
+ struct sps_connect *config;
+ struct sps_register_event *event;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_oc: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ if (channel->used) {
+ pr_err("tspp channel already in use");
+ return -EBUSY;
+ }
+
+ config = &channel->config;
+ event = &channel->event;
+
+ /* start the clocks if needed */
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) {
+ rc = tspp_init_sps_device(pdev);
+ if (rc) {
+ pr_err("tspp: failed to init sps device, err=%d\n",
+ rc);
+ tspp_clock_stop(pdev);
+ return rc;
+ }
+ }
+
+ __pm_stay_awake(&pdev->ws);
+ }
+
+ /* mark it as used */
+ channel->used = 1;
+
+ /* start the bam */
+ channel->pipe = sps_alloc_endpoint();
+ if (channel->pipe == 0) {
+ pr_err("tspp: error allocating endpoint");
+ rc = -ENOMEM;
+ goto err_sps_alloc;
+ }
+
+ /* get default configuration */
+ sps_get_config(channel->pipe, config);
+
+ config->source = pdev->bam_handle;
+ config->destination = SPS_DEV_HANDLE_MEM;
+ config->mode = SPS_MODE_SRC;
+ config->options =
+ SPS_O_AUTO_ENABLE | /* connection is auto-enabled */
+ SPS_O_STREAMING | /* streaming mode */
+ SPS_O_DESC_DONE | /* interrupt on end of descriptor */
+ SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */
+ SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */
+ config->src_pipe_index = channel->id;
+ config->desc.size =
+ TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE;
+ config->desc.base = dma_alloc_coherent(&pdev->pdev->dev,
+ config->desc.size,
+ &config->desc.phys_base,
+ GFP_KERNEL);
+ if (config->desc.base == 0) {
+ pr_err("tspp: error allocating sps descriptors");
+ rc = -ENOMEM;
+ goto err_desc_alloc;
+ }
+
+ memset(config->desc.base, 0, config->desc.size);
+
+ rc = sps_connect(channel->pipe, config);
+ if (rc) {
+ pr_err("tspp: error connecting bam");
+ goto err_connect;
+ }
+
+ event->mode = SPS_TRIGGER_CALLBACK;
+ event->options = SPS_O_DESC_DONE;
+ event->callback = tspp_sps_complete_cb;
+ event->xfer_done = NULL;
+ event->user = pdev;
+
+ rc = sps_register_event(channel->pipe, event);
+ if (rc) {
+ pr_err("tspp: error registering event");
+ goto err_event;
+ }
+
+ init_timer(&channel->expiration_timer);
+ channel->expiration_timer.function = tspp_expiration_timer;
+ channel->expiration_timer.data = (unsigned long)pdev;
+ channel->expiration_timer.expires = 0xffffffffL;
+
+ rc = pm_runtime_get(&pdev->pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->pdev->dev,
+ "Runtime PM: Unable to wake up tspp device, rc = %d",
+ rc);
+ }
+ return 0;
+
+err_event:
+ sps_disconnect(channel->pipe);
+err_connect:
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+err_desc_alloc:
+ sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+ channel->used = 0;
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_channel);
+
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_channel(u32 dev, u32 channel_id)
+{
+ int i;
+ int id;
+ int table_idx;
+ u32 val;
+ unsigned long flags;
+
+ struct sps_connect *config;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_close: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* if the channel is not used, we are done */
+ if (!channel->used)
+ return 0;
+
+ /*
+ * Need to protect access to used and waiting fields, as they are
+ * used by the tasklet which is invoked from interrupt context
+ */
+ spin_lock_irqsave(&pdev->spinlock, flags);
+ channel->used = 0;
+ channel->waiting = NULL;
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+
+ config = &channel->config;
+ pdev = channel->pdev;
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* unregister all filters for this channel */
+ for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *filter =
+ &pdev->filters[table_idx]->filter[i];
+ id = FILTER_GET_PIPE_NUMBER0(filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(filter))
+ tspp_free_key_entry(
+ FILTER_GET_KEY_NUMBER(filter));
+ filter->config = 0;
+ filter->filter = 0;
+ }
+ }
+ }
+ channel->filter_count = 0;
+
+ /* disconnect the bam */
+ if (sps_disconnect(channel->pipe) != 0)
+ pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+ /* destroy the buffers */
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+
+ sps_free_endpoint(channel->pipe);
+
+ tspp_destroy_buffers(channel_id, channel);
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ channel->buffer_count = 0;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->locked = NULL;
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ sps_deregister_bam_device(pdev->bam_handle);
+ pdev->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ __pm_relax(&pdev->ws);
+ tspp_clock_stop(pdev);
+ }
+
+ pm_runtime_put(&pdev->pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_channel);
+
+/**
+ * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return error status
+ *
+ * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz.
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter)
+{
+ struct tspp_device *pdev;
+ struct tspp_tsif_device *tsif_device;
+
+ if (!tcr_counter)
+ return -EINVAL;
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev);
+ return -ENODEV;
+ }
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ tsif_device = &pdev->tsif[0];
+ break;
+
+ case TSPP_SOURCE_TSIF1:
+ tsif_device = &pdev->tsif[1];
+ break;
+
+ default:
+ tsif_device = NULL;
+ break;
+ }
+
+ if (tsif_device && tsif_device->ref_count)
+ *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
+ else
+ *tcr_counter = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_get_ref_clk_counter);
+
+/**
+ * tspp_get_lpass_time_counter - return the LPASS Timer counter value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return error status
+ *
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+ u64 *lpass_time_counter)
+{
+ return -EPERM;
+}
+
+EXPORT_SYMBOL(tspp_get_lpass_time_counter);
+
+/**
+ * tspp_get_tts_source - Return the TTS source value.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @tts_source:Updated TTS source type
+ *
+ * Return error status
+ *
+ */
+int tspp_get_tts_source(u32 dev, int *tts_source)
+{
+ struct tspp_device *pdev;
+
+ if (tts_source == NULL)
+ return -EINVAL;
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get_tts_source: can't find device %i\n",
+ dev);
+ return -ENODEV;
+ }
+
+ *tts_source = pdev->tts_source;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_get_tts_source);
+
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_add_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int i, rc;
+ int other_channel;
+ int entry;
+ u32 val, pid, enabled;
+ struct tspp_device *pdev;
+ struct tspp_pid_filter p;
+ struct tspp_channel *channel;
+
+ TSPP_DEBUG("tspp: add filter");
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_add: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ if (filter->source > TSPP_SOURCE_MEM) {
+ pr_err("tspp invalid source");
+ return -ENOSR;
+ }
+
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+
+ channel->mode = filter->mode;
+ /*
+ * if buffers are already allocated, verify they fulfil
+ * the alignment requirements.
+ */
+ if ((channel->buffer_count > 0) &&
+ (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+ pr_warn("tspp: buffers allocated with incorrect alignment\n");
+
+ if (filter->mode == TSPP_MODE_PES) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &pdev->filters[channel->src]->filter[i];
+ pid = FILTER_GET_PIPE_PID((tspp_filter));
+ enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+ if (enabled && (pid == filter->pid)) {
+ other_channel =
+ FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ pr_err("tspp: pid 0x%x already in use by channel %i",
+ filter->pid, other_channel);
+ return -EBADSLT;
+ }
+ }
+ }
+
+ /* make sure this priority is not already in use */
+ enabled = FILTER_GET_PIPE_PROCESS0(
+ (&(pdev->filters[channel->src]->filter[filter->priority])));
+ if (enabled) {
+ pr_err("tspp: filter priority %i source %i is already enabled\n",
+ filter->priority, channel->src);
+ return -ENOSR;
+ }
+
+ if (channel->mode == TSPP_MODE_PES) {
+ /*
+ * if we are already processing in PES mode, disable pipe
+ * (channel) and filter to be updated
+ */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | (1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+ }
+
+ /* update entry */
+ p.filter = 0;
+ p.config = FILTER_TRANS_END_DISABLE;
+ FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+ FILTER_SET_PIPE_PID((&p), filter->pid);
+ FILTER_SET_PID_MASK((&p), filter->mask);
+ FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+ FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+ if (filter->decrypt) {
+ entry = tspp_get_key_entry();
+ if (entry == -1) {
+ pr_err("tspp: no more keys available!");
+ } else {
+ p.config |= FILTER_DECRYPT;
+ FILTER_SET_KEY_NUMBER((&p), entry);
+ }
+ }
+
+ pdev->filters[channel->src]->
+ filter[filter->priority].config = p.config;
+ pdev->filters[channel->src]->
+ filter[filter->priority].filter = p.filter;
+
+ /*
+ * allocate buffers if needed (i.e. if user did has not already called
+ * tspp_allocate_buffers() explicitly).
+ */
+ if (channel->buffer_count == 0) {
+ channel->buffer_size =
+ tspp_align_buffer_size_by_mode(channel->buffer_size,
+ channel->mode);
+ rc = tspp_allocate_buffers(dev, channel->id,
+ channel->max_buffers,
+ channel->buffer_size,
+ channel->int_freq, NULL, NULL, NULL);
+ if (rc != 0) {
+ pr_err("tspp: tspp_allocate_buffers failed\n");
+ return rc;
+ }
+ }
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ channel->filter_count++;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_add_filter);
+
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_remove_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int entry;
+ u32 val;
+ struct tspp_device *pdev;
+ int src;
+ struct tspp_pid_filter *tspp_filter;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ if (!filter) {
+ pr_err("tspp: NULL filter pointer");
+ return -EINVAL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_remove: can't find device %i", dev);
+ return -ENODEV;
+ }
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+ channel = &pdev->channels[channel_id];
+
+ src = channel->src;
+ if ((src == TSPP_SOURCE_TSIF0) || (src == TSPP_SOURCE_TSIF1))
+ tspp_filter = &(pdev->filters[src]->filter[filter->priority]);
+ else {
+ pr_err("tspp_remove: wrong source type %d", src);
+ return -EINVAL;
+ }
+
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* update data keys */
+ if (tspp_filter->config & FILTER_DECRYPT) {
+ entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+ tspp_free_key_entry(entry);
+ }
+
+ /* update pid table */
+ tspp_filter->config = 0;
+ tspp_filter->filter = 0;
+
+ channel->filter_count--;
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_remove_filter);
+
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
+{
+ int i;
+ int id;
+ int key_index;
+ int data;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_set: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* read the key index used by this channel */
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &(pdev->filters[channel->src]->filter[i]);
+ id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+ key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+ break;
+ }
+ }
+ }
+ if (i == TSPP_NUM_PRIORITIES) {
+ pr_err("tspp: no encryption on this channel");
+ return -ENOKEY;
+ }
+
+ if (key->parity == TSPP_KEY_PARITY_EVEN) {
+ pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].even_msb = key->msb;
+ } else {
+ pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].odd_msb = key->msb;
+ }
+ data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_set_key);
+
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @notify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return error status
+ *
+ */
+int tspp_register_notification(u32 dev, u32 channel_id,
+ tspp_notifier *notify, void *userdata, u32 timer_ms)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_reg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = notify;
+ channel->notify_data = userdata;
+ channel->expiration_period_ms = timer_ms;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_register_notification);
+
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_unregister_notification(u32 dev, u32 channel_id)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_unreg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = NULL;
+ channel->notify_data = 0;
+ return 0;
+}
+EXPORT_SYMBOL(tspp_unregister_notification);
+
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
+{
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return NULL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get: can't find device %i", dev);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (!channel->read) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_warn("tspp: no buffer to get on channel %i!",
+ channel->id);
+ return NULL;
+ }
+
+ buffer = channel->read;
+ /* see if we have any buffers ready to read */
+ if (buffer->state != TSPP_BUF_STATE_DATA) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ return NULL;
+ }
+
+ if (buffer->state == TSPP_BUF_STATE_DATA) {
+ /* mark the buffer as busy */
+ buffer->state = TSPP_BUF_STATE_LOCKED;
+
+ /* increment the pointer along the list */
+ channel->read = channel->read->next;
+ }
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return &buffer->desc;
+}
+EXPORT_SYMBOL(tspp_get_buffer);
+
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return error status
+ *
+ */
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
+{
+ int i, found = 0;
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (descriptor_id > channel->buffer_count)
+ pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id);
+
+ /* find the correct descriptor */
+ buffer = channel->locked;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (buffer->desc.id == descriptor_id) {
+ found = 1;
+ break;
+ }
+ buffer = buffer->next;
+ }
+ channel->locked = channel->locked->next;
+
+ if (!found) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: cant find desc %i", descriptor_id);
+ return -EINVAL;
+ }
+
+ /* make sure the buffer is in the expected state */
+ if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: buffer %i not locked", descriptor_id);
+ return -EINVAL;
+ }
+ /* unlock the buffer and requeue it */
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ if (tspp_queue_buffer(channel, buffer))
+ pr_warn("tspp: can't requeue buffer");
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_release_buffer);
+
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+ u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ struct tspp_mem_buffer *last = NULL;
+
+ TSPP_DEBUG("tspp_allocate_buffers");
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("%s: channel id out of range", __func__);
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("%s: can't find device %i", __func__, dev);
+ return -ENODEV;
+ }
+
+ if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+ pr_err("%s: tspp requires a minimum of %i buffers\n",
+ __func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+ return -EINVAL;
+ }
+
+ if (count > TSPP_NUM_BUFFERS) {
+ pr_err("%s: tspp requires a maximum of %i buffers\n",
+ __func__, TSPP_NUM_BUFFERS);
+ return -EINVAL;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ /* allow buffer allocation only if there was no previous buffer
+ * allocation for this channel.
+ */
+ if (channel->buffer_count > 0) {
+ pr_err("%s: buffers already allocated for channel %u",
+ __func__, channel_id);
+ return -EINVAL;
+ }
+
+ channel->max_buffers = count;
+
+ /* set up interrupt frequency */
+ if (int_freq > channel->max_buffers) {
+ int_freq = channel->max_buffers;
+ pr_warn("%s: setting interrupt frequency to %u\n",
+ __func__, int_freq);
+ }
+ channel->int_freq = int_freq;
+ /*
+ * it is the responsibility of the caller to tspp_allocate_buffers(),
+ * whether it's the user or the driver, to make sure the size parameter
+ * is compatible to the channel mode.
+ */
+ channel->buffer_size = size;
+
+ /* save user defined memory free function for later use */
+ channel->memfree = memfree;
+ channel->user_info = user;
+
+ /*
+ * For small buffers, create a DMA pool so that memory
+ * is not wasted through dma_alloc_coherent.
+ */
+ if (TSPP_USE_DMA_POOL(channel->buffer_size)) {
+ channel->dma_pool = dma_pool_create("tspp",
+ &pdev->pdev->dev, channel->buffer_size, 0, 0);
+ if (!channel->dma_pool) {
+ pr_err("%s: Can't allocate memory pool\n", __func__);
+ return -ENOMEM;
+ }
+ } else {
+ channel->dma_pool = NULL;
+ }
+
+
+ for (channel->buffer_count = 0;
+ channel->buffer_count < channel->max_buffers;
+ channel->buffer_count++) {
+
+ /* allocate the descriptor */
+ struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
+ kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
+ if (!desc) {
+ pr_warn("%s: Can't allocate desc %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ desc->desc.id = channel->buffer_count;
+ /* allocate the buffer */
+ if (tspp_alloc_buffer(channel_id, &desc->desc,
+ channel->buffer_size, channel->dma_pool,
+ alloc, user) != 0) {
+ kfree(desc);
+ pr_warn("%s: Can't allocate buffer %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ /* add the descriptor to the list */
+ desc->filled = 0;
+ desc->read_index = 0;
+ if (!channel->data) {
+ channel->data = desc;
+ desc->next = channel->data;
+ } else {
+ if (last != NULL)
+ last->next = desc;
+ }
+ last = desc;
+ desc->next = channel->data;
+
+ /* prepare the sps descriptor */
+ desc->sps.phys_base = ((alloc != NULL) ? desc->desc.dma_base :
+ desc->desc.phys_base);
+ desc->sps.base = desc->desc.virt_base;
+ desc->sps.size = desc->desc.size;
+
+ /* start the transfer */
+ if (tspp_queue_buffer(channel, desc))
+ pr_err("%s: can't queue buffer %i",
+ __func__, desc->desc.id);
+ }
+
+ if (channel->buffer_count < channel->max_buffers) {
+ /*
+ * we failed to allocate the requested number of buffers.
+ * we don't allow a partial success, so need to clean up here.
+ */
+ tspp_destroy_buffers(channel_id, channel);
+ channel->buffer_count = 0;
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ channel->waiting = channel->data;
+ channel->read = channel->data;
+ channel->locked = channel->data;
+
+ /* Now that buffers are scheduled to HW, kick data expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_allocate_buffers);
+
+/**
+ * tspp_attach_ion_dma_buff- attach ion dma buffer to TSPP device
+ * It will attach the DMA buffer to TSPP device to go through SMMU.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @ion_dma_buf: It contains required members for ION buffer dma mapping.
+ *
+ * Return error status
+ *
+ */
+int tspp_attach_ion_dma_buff(u32 dev, struct tspp_ion_dma_buf_info *ion_dma_buf)
+{
+ struct tspp_device *pdev;
+ int dir = DMA_FROM_DEVICE;
+ int ret = -1;
+
+ if (NULL == ion_dma_buf || NULL == ion_dma_buf->dbuf) {
+ pr_err("tspp: invalid input argument");
+ return -EINVAL;
+ }
+
+ if (dev >= TSPP_MAX_DEVICES) {
+ pr_err("tspp: device id out of range");
+ return -ENODEV;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ ion_dma_buf->attach = dma_buf_attach(ion_dma_buf->dbuf,
+ &pdev->pdev->dev);
+ if (IS_ERR_OR_NULL(ion_dma_buf->attach)) {
+ dev_err(&pdev->pdev->dev, "%s: dma_buf_attach fail", __func__);
+ return -ENODEV;
+ }
+ ion_dma_buf->table = dma_buf_map_attachment(ion_dma_buf->attach, dir);
+ if (IS_ERR_OR_NULL(ion_dma_buf->table)) {
+ dev_err(&pdev->pdev->dev, "dma_buf_map_attachment fail");
+ dma_buf_detach(ion_dma_buf->dbuf, ion_dma_buf->attach);
+ return -ENODEV;
+ }
+ ret = dma_map_sg(&pdev->pdev->dev, ion_dma_buf->table->sgl,
+ ion_dma_buf->table->nents, dir);
+ if (ret <= 0) {
+ dev_err(&pdev->pdev->dev, "dma_map_sg failed! ret=%d\n", ret);
+ goto unmap_attachment;
+ }
+ if (ion_dma_buf->table->nents > 1) {
+ dev_err(&pdev->pdev->dev, "no of sg table entries %d > 1\n",
+ ion_dma_buf->table->nents);
+ goto unmap_attachment;
+ }
+
+ ion_dma_buf->dma_map_base = sg_dma_address(ion_dma_buf->table->sgl);
+ ion_dma_buf->smmu_map = true;
+ return 0;
+
+unmap_attachment:
+ dma_buf_unmap_attachment(ion_dma_buf->attach, ion_dma_buf->table, dir);
+ dma_buf_detach(ion_dma_buf->dbuf, ion_dma_buf->attach);
+ dma_buf_put(ion_dma_buf->dbuf);
+
+ return ret;
+}
+EXPORT_SYMBOL(tspp_attach_ion_dma_buff);
+
+/**
+ * tspp_detach_ion_dma_buff - detach the mapped ion dma buffer from TSPP device
+ * It will detach previously mapped DMA buffer from TSPP device.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @ion_dma_buf: It contains required members for ION buffer dma mapping.
+ *
+ * Return error status
+ *
+ */
+int tspp_detach_ion_dma_buff(u32 dev, struct tspp_ion_dma_buf_info *ion_dma_buf)
+{
+ struct tspp_device *pdev;
+ int dir = DMA_FROM_DEVICE;
+
+ if (ion_dma_buf == NULL || ion_dma_buf->dbuf == NULL ||
+ ion_dma_buf->table == NULL || ion_dma_buf->table->sgl == NULL ||
+ ion_dma_buf->smmu_map == false) {
+ pr_err("tspp: invalid input argument");
+ return -EINVAL;
+ }
+
+ if (dev >= TSPP_MAX_DEVICES) {
+ pr_err("tspp: device id out of range");
+ return -ENODEV;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+
+ dma_unmap_sg(&pdev->pdev->dev, ion_dma_buf->table->sgl,
+ ion_dma_buf->table->nents, dir);
+ dma_buf_unmap_attachment(ion_dma_buf->attach, ion_dma_buf->table, dir);
+ dma_buf_detach(ion_dma_buf->dbuf, ion_dma_buf->attach);
+ dma_buf_put(ion_dma_buf->dbuf);
+
+ ion_dma_buf->smmu_map = false;
+ return 0;
+}
+EXPORT_SYMBOL(tspp_detach_ion_dma_buff);
+
+
+/*** debugfs ***/
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ writel_relaxed(val, data);
+ /* Assure register write */
+ wmb();
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ *val = 0;
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ *val = 0;
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ *val = readl_relaxed(data);
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+ debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+ int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tsif%i", instance);
+ tsif_device->dent_tsif = debugfs_create_dir(
+ name, NULL);
+ if (tsif_device->dent_tsif) {
+ int i;
+ void __iomem *base = tsif_device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+ tsif_device->debugfs_tsif_regs[i] =
+ debugfs_create_file(
+ debugfs_tsif_regs[i].name,
+ debugfs_tsif_regs[i].mode,
+ tsif_device->dent_tsif,
+ base + debugfs_tsif_regs[i].offset,
+ &fops_iomem_x32);
+ }
+
+ debugfs_create_u32(
+ "stat_rx_chunks", 0664,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_rx);
+
+ debugfs_create_u32(
+ "stat_overflow", 0664,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_overflow);
+
+ debugfs_create_u32(
+ "stat_lost_sync", 0664,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_lost_sync);
+
+ debugfs_create_u32(
+ "stat_timeout", 0664,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_timeout);
+ }
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+ int i;
+
+ debugfs_remove_recursive(tsif_device->dent_tsif);
+ tsif_device->dent_tsif = NULL;
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+ tsif_device->debugfs_tsif_regs[i] = NULL;
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tspp%i", instance);
+ device->dent = debugfs_create_dir(
+ name, NULL);
+ if (device->dent) {
+ int i;
+ void __iomem *base = device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] =
+ debugfs_create_file(
+ debugfs_tspp_regs[i].name,
+ debugfs_tspp_regs[i].mode,
+ device->dent,
+ base + debugfs_tspp_regs[i].offset,
+ &fops_iomem_x32);
+ }
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+ int i;
+
+ debugfs_remove_recursive(device->dent);
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] = NULL;
+}
+
+static int msm_tspp_map_irqs(struct platform_device *pdev,
+ struct tspp_device *device)
+{
+ int rc;
+
+ /* get IRQ numbers from platform information */
+
+ /* map TSPP IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ");
+ if (rc > 0) {
+ device->tspp_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP IRQ");
+ return -EINVAL;
+ }
+
+ /* map TSIF IRQs */
+ rc = platform_get_irq_byname(pdev, "TSIF0_IRQ");
+ if (rc > 0) {
+ device->tsif[0].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF0 IRQ");
+ return -EINVAL;
+ }
+
+ rc = platform_get_irq_byname(pdev, "TSIF1_IRQ");
+ if (rc > 0) {
+ device->tsif[1].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF1 IRQ");
+ return -EINVAL;
+ }
+
+ /* map BAM IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ");
+ if (rc > 0) {
+ device->bam_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP BAM IRQ");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_tspp_probe(struct platform_device *pdev)
+{
+ int rc = -ENODEV;
+ u32 version;
+ u32 i;
+ struct tspp_device *device;
+ struct resource *mem_tsif0;
+ struct resource *mem_tsif1;
+ struct resource *mem_tspp;
+ struct resource *mem_bam;
+ struct msm_bus_scale_pdata *tspp_bus_pdata = NULL;
+ unsigned long rate;
+
+ if (pdev->dev.of_node) {
+ /* ID is always 0 since there is only 1 instance of TSPP */
+ pdev->id = 0;
+ tspp_bus_pdata = msm_bus_cl_get_pdata(pdev);
+ } else {
+ /* must have device tree data */
+ pr_err("tspp: Device tree data not available\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* OK, we will use this device */
+ device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+ if (!device) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* set up references */
+ device->pdev = pdev;
+ platform_set_drvdata(pdev, device);
+
+ /* setup pin control */
+ rc = tspp_get_pinctrl(device);
+ if (rc) {
+ pr_err("tspp: failed to get pin control data, rc=%d\n", rc);
+ goto err_pinctrl;
+ }
+
+ /* register bus client */
+ if (tspp_bus_pdata) {
+ device->tsif_bus_client =
+ msm_bus_scale_register_client(tspp_bus_pdata);
+ if (!device->tsif_bus_client)
+ pr_err("tspp: Unable to register bus client\n");
+ } else {
+ device->tsif_bus_client = 0;
+ }
+
+ /* map regulators */
+ device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx");
+ if (IS_ERR_OR_NULL(device->tsif_vreg)) {
+ rc = PTR_ERR(device->tsif_vreg);
+ device->tsif_vreg = NULL;
+ if (rc == -ENODEV) {
+ pr_notice("%s: vdd_cx regulator will not be used\n",
+ __func__);
+ } else {
+ dev_err(&pdev->dev,
+ "failed to get CX regulator, err=%d\n", rc);
+ goto err_regulator;
+ }
+ } else {
+ /* Set an initial voltage and enable the regulator */
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPMH_REGULATOR_LEVEL_OFF,
+ RPMH_REGULATOR_LEVEL_MAX);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set CX voltage.\n");
+ goto err_regulator;
+ }
+
+ rc = regulator_enable(device->tsif_vreg);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable CX regulator.\n");
+ goto err_regulator;
+ }
+ }
+
+ /* map clocks */
+ device->tsif_pclk = clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR_OR_NULL(device->tsif_pclk)) {
+ rc = PTR_ERR(device->tsif_pclk);
+ device->tsif_pclk = NULL;
+ goto err_pclock;
+ }
+
+ device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR_OR_NULL(device->tsif_ref_clk)) {
+ rc = PTR_ERR(device->tsif_ref_clk);
+ device->tsif_ref_clk = NULL;
+ goto err_refclock;
+ }
+ rate = clk_round_rate(device->tsif_ref_clk, 1);
+ rc = clk_set_rate(device->tsif_ref_clk, rate);
+ if (rc)
+ goto err_res_tsif0;
+
+ /* map I/O memory */
+ mem_tsif0 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF0_PHYS");
+ if (!mem_tsif0) {
+ pr_err("tspp: Missing tsif0 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif0;
+ }
+ device->tsif[0].base = ioremap(mem_tsif0->start,
+ resource_size(mem_tsif0));
+ if (!device->tsif[0].base) {
+ pr_err("tspp: ioremap failed\n");
+ goto err_map_tsif0;
+ }
+
+ mem_tsif1 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF1_PHYS");
+ if (!mem_tsif1) {
+ dev_err(&pdev->dev, "Missing tsif1 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif1;
+ }
+ device->tsif[1].base = ioremap(mem_tsif1->start,
+ resource_size(mem_tsif1));
+ if (!device->tsif[1].base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_tsif1;
+ }
+
+ mem_tspp = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_PHYS");
+ if (!mem_tspp) {
+ dev_err(&pdev->dev, "Missing MEM resource");
+ rc = -ENXIO;
+ goto err_res_dev;
+ }
+ device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+ if (!device->base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_dev;
+ }
+
+ mem_bam = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS");
+ if (!mem_bam) {
+ pr_err("tspp: Missing bam MEM resource");
+ rc = -ENXIO;
+ goto err_res_bam;
+ }
+ memset(&device->bam_props, 0, sizeof(device->bam_props));
+ device->bam_props.phys_addr = mem_bam->start;
+ device->bam_props.virt_addr = ioremap(mem_bam->start,
+ resource_size(mem_bam));
+ if (!device->bam_props.virt_addr) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_bam;
+ }
+
+ if (msm_tspp_map_irqs(pdev, device))
+ goto err_irq;
+ device->req_irqs = false;
+
+ if (tspp_iommu_init(device)) {
+ dev_err(&pdev->dev, "iommu init failed");
+ goto err_iommu;
+ }
+
+ device->tts_source = TSIF_TTS_TCR;
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ device->tsif[i].tts_source = device->tts_source;
+
+ /* power management */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ tspp_debugfs_init(device, 0);
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_init(&device->tsif[i], i);
+
+ wakeup_source_init(&device->ws, dev_name(&pdev->dev));
+
+ /* set up pointers to ram-based 'registers' */
+ device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0;
+ device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1;
+ device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2;
+ device->tspp_key_table = device->base + TSPP_DATA_KEY;
+ device->tspp_global_performance =
+ device->base + TSPP_GLOBAL_PERFORMANCE;
+ device->tspp_pipe_context =
+ device->base + TSPP_PIPE_CONTEXT;
+ device->tspp_pipe_performance =
+ device->base + TSPP_PIPE_PERFORMANCE;
+
+ device->bam_props.summing_threshold = 0x10;
+ device->bam_props.irq = device->bam_irq;
+ device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+ /*add SPS BAM log level*/
+ device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL;
+
+ if (tspp_clock_start(device) != 0) {
+ dev_err(&pdev->dev, "Can't start clocks");
+ goto err_clock;
+ }
+
+ device->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ spin_lock_init(&device->spinlock);
+ mutex_init(&device->mutex);
+ tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+ (unsigned long)device);
+
+ /* initialize everything to a known state */
+ tspp_global_reset(device);
+
+ version = readl_relaxed(device->base + TSPP_VERSION);
+ /*
+ * TSPP version can be bits [7:0] or alternatively,
+ * TSPP major version is bits [31:28].
+ */
+ if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1))
+ pr_warn("tspp: unrecognized hw version=%i", version);
+
+ /* initialize the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ tspp_channel_init(&(device->channels[i]), device);
+
+ /* stop the clocks for power savings */
+ tspp_clock_stop(device);
+
+ /* everything is ok, so add the device to the list */
+ list_add_tail(&(device->devlist), &tspp_devices);
+ return 0;
+
+err_clock:
+ tspp_debugfs_exit(device);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+err_iommu:
+ tspp_iommu_release_iomapping(device);
+err_irq:
+ iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+ iounmap(device->base);
+err_map_dev:
+err_res_dev:
+ iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+ iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+err_refclock:
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+err_pclock:
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+err_regulator:
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+err_pinctrl:
+ kfree(device);
+
+out:
+ return rc;
+}
+
+static int msm_tspp_remove(struct platform_device *pdev)
+{
+ struct tspp_channel *channel;
+ u32 i;
+
+ struct tspp_device *device = platform_get_drvdata(pdev);
+
+ /* free the buffers, and delete the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ channel = &device->channels[i];
+ tspp_close_channel(device->pdev->id, i);
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+
+ mutex_destroy(&device->mutex);
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+
+ wakeup_source_trash(&device->ws);
+ if (device->req_irqs)
+ msm_tspp_free_irqs(device);
+
+ iounmap(device->bam_props.virt_addr);
+ iounmap(device->base);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ iounmap(device->tsif[i].base);
+
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+
+ tspp_iommu_release_iomapping(device);
+ arm_iommu_detach_device(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ kfree(device);
+
+ return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...");
+ return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...");
+ return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+ .runtime_suspend = tspp_runtime_suspend,
+ .runtime_resume = tspp_runtime_resume,
+};
+
+static const struct of_device_id msm_match_table[] = {
+ {.compatible = "qcom,msm_tspp"},
+ {}
+};
+
+static struct platform_driver msm_tspp_driver = {
+ .probe = msm_tspp_probe,
+ .remove = msm_tspp_remove,
+ .driver = {
+ .name = "msm_tspp",
+ .pm = &tspp_dev_pm_ops,
+ .of_match_table = msm_match_table,
+ },
+};
+
+
+static int __init mod_init(void)
+{
+ int rc;
+
+ /* register the driver, and check hardware */
+ rc = platform_driver_register(&msm_tspp_driver);
+ if (rc)
+ pr_err("tspp: platform_driver_register failed: %d", rc);
+
+ return rc;
+}
+
+static void __exit mod_exit(void)
+{
+ /* delete low level driver */
+ platform_driver_unregister(&msm_tspp_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSPP platform device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 2e71d05..800c9ea 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
-obj-$(CONFIG_SPECTRA_CAMERA) += icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index fc7a493..03f6e0c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -27,23 +27,12 @@
#include "cam_cpas_api.h"
#include "cam_hw_intf.h"
#include "cam_hw.h"
-
-#ifdef CONFIG_CAM_CDM_DBG
-#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-#ifdef CONFIG_CAM_CDM_DUMP_DBG
-#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
#define CAM_MAX_SW_CDM_VERSION_SUPPORTED 1
#define CAM_SW_CDM_INDEX 0
#define CAM_CDM_INFLIGHT_WORKS 5
-#define CAM_CDM_HW_RESET_TIMEOUT 3000
+#define CAM_CDM_HW_RESET_TIMEOUT 300
#define CAM_CDM_HW_ID_MASK 0xF
#define CAM_CDM_HW_ID_SHIFT 0x5
@@ -233,6 +222,7 @@
struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
uint8_t bl_tag;
atomic_t error;
+ atomic_t bl_done;
struct cam_cdm_hw_mem gen_irq;
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 341406a..6c8bde1 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -32,7 +30,7 @@
static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
- CDM_CDBG("CDM client get refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
client->refcount);
client->refcount++;
mutex_unlock(&client->lock);
@@ -41,12 +39,12 @@
static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
{
mutex_lock(&client->lock);
- CDM_CDBG("CDM client put refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
client->refcount);
if (client->refcount > 0) {
client->refcount--;
} else {
- pr_err("Refcount put when zero\n");
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
mutex_unlock(&client->lock);
@@ -63,16 +61,16 @@
cam_version->reserved = 0;
return true;
default:
- pr_err("CDM Version=%x not supported in util\n", ver);
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
break;
}
return false;
}
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
{
- pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+ CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
evt_data);
}
@@ -84,13 +82,14 @@
case CAM_CDM170_VERSION:
return &CDM170_ops;
default:
- pr_err("CDM Version=%x not supported in util\n", ver);
+ CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+ ver);
}
} else if (cam_version) {
if ((cam_version->major == 1) && (cam_version->minor == 0) &&
(cam_version->incr == 0))
return &CDM170_ops;
- pr_err("cam_hw_version=%x:%x:%x not supported\n",
+ CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
cam_version->major, cam_version->minor,
cam_version->incr);
}
@@ -107,7 +106,7 @@
if (node->bl_tag == tag)
return node;
}
- pr_err("Could not find the bl request for tag=%d\n", tag);
+ CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
return NULL;
}
@@ -135,11 +134,11 @@
for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
if (hw->clients[i] == NULL) {
- CDM_CDBG("Found client slot %d\n", i);
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
return i;
}
}
- pr_err("No more client slots\n");
+ CAM_ERR(CAM_CDM, "No more client slots");
return -EBUSY;
}
@@ -153,7 +152,7 @@
struct cam_cdm_client *client = NULL;
if (!cdm_hw) {
- pr_err("CDM Notify called with NULL hw info\n");
+ CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
return;
}
core = (struct cam_cdm *)cdm_hw->core_info;
@@ -166,20 +165,21 @@
client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
client = core->clients[client_idx];
if ((!client) || (client->handle != node->client_hdl)) {
- pr_err("Invalid client %pK hdl=%x\n", client,
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
node->client_hdl);
return;
}
cam_cdm_get_client_refcount(client);
if (client->data.cam_cdm_callback) {
- CDM_CDBG("Calling client=%s cb cookie=%d\n",
+ CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
client->data.identifier, node->cookie);
client->data.cam_cdm_callback(node->client_hdl,
node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
node->cookie);
- CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+ CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+ node->cookie);
} else {
- pr_err("No cb registered for client hdl=%x\n",
+ CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
node->client_hdl);
}
cam_cdm_put_client_refcount(client);
@@ -190,7 +190,7 @@
if (core->clients[i] != NULL) {
client = core->clients[i];
mutex_lock(&client->lock);
- CDM_CDBG("Found client slot %d\n", i);
+ CAM_DBG(CAM_CDM, "Found client slot %d", i);
if (client->data.cam_cdm_callback) {
if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
unsigned long iova =
@@ -203,7 +203,8 @@
(iova & 0xFFFFFFFF));
}
} else {
- pr_err("No cb registered for client hdl=%x\n",
+ CAM_ERR(CAM_CDM,
+ "No cb registered for client hdl=%x",
client->handle);
}
mutex_unlock(&client->lock);
@@ -216,7 +217,7 @@
{
struct cam_hw_info *cdm_hw = hw_priv;
struct cam_cdm *core = NULL;
- int rc = -1;
+ int rc = -EPERM;
int client_idx;
struct cam_cdm_client *client;
uint32_t *handle = start_args;
@@ -228,24 +229,26 @@
client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
client = core->clients[client_idx];
if (!client) {
- pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
return -EINVAL;
}
cam_cdm_get_client_refcount(client);
if (*handle != client->handle) {
- pr_err("client id given handle=%x invalid\n", *handle);
+ CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
cam_cdm_put_client_refcount(client);
return -EINVAL;
}
if (operation == true) {
if (true == client->stream_on) {
- pr_err("Invalid CDM client is already streamed ON\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed ON");
cam_cdm_put_client_refcount(client);
return rc;
}
} else {
if (client->stream_on == false) {
- pr_err("Invalid CDM client is already streamed Off\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM client is already streamed Off");
cam_cdm_put_client_refcount(client);
return rc;
}
@@ -265,26 +268,28 @@
rc = cam_cpas_start(core->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
- pr_err("CPAS start failed\n");
+ CAM_ERR(CAM_CDM, "CPAS start failed");
goto end;
}
- CDM_CDBG("CDM init first time\n");
+ CAM_DBG(CAM_CDM, "CDM init first time");
if (core->id == CAM_CDM_VIRTUAL) {
- CDM_CDBG("Virtual CDM HW init first time\n");
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW init first time");
rc = 0;
} else {
- CDM_CDBG("CDM HW init first time\n");
+ CAM_DBG(CAM_CDM, "CDM HW init first time");
rc = cam_hw_cdm_init(hw_priv, NULL, 0);
if (rc == 0) {
rc = cam_hw_cdm_alloc_genirq_mem(
hw_priv);
if (rc != 0) {
- pr_err("Genirqalloc failed\n");
+ CAM_ERR(CAM_CDM,
+ "Genirqalloc failed");
cam_hw_cdm_deinit(hw_priv,
NULL, 0);
}
} else {
- pr_err("CDM HW init failed\n");
+ CAM_ERR(CAM_CDM, "CDM HW init failed");
}
}
if (rc == 0) {
@@ -292,11 +297,11 @@
client->stream_on = true;
} else {
if (cam_cpas_stop(core->cpas_handle))
- pr_err("CPAS stop failed\n");
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
}
} else {
cdm_hw->open_count++;
- CDM_CDBG("CDM HW already ON count=%d\n",
+ CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
cdm_hw->open_count);
rc = 0;
client->stream_on = true;
@@ -304,35 +309,41 @@
} else {
if (cdm_hw->open_count) {
cdm_hw->open_count--;
- CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+ CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+ cdm_hw->open_count);
if (!cdm_hw->open_count) {
- CDM_CDBG("CDM Deinit now\n");
+ CAM_DBG(CAM_CDM, "CDM Deinit now");
if (core->id == CAM_CDM_VIRTUAL) {
- CDM_CDBG("Virtual CDM HW Deinit\n");
+ CAM_DBG(CAM_CDM,
+ "Virtual CDM HW Deinit");
rc = 0;
} else {
- CDM_CDBG("CDM HW Deinit now\n");
+ CAM_DBG(CAM_CDM, "CDM HW Deinit now");
rc = cam_hw_cdm_deinit(
hw_priv, NULL, 0);
if (cam_hw_cdm_release_genirq_mem(
hw_priv))
- pr_err("Genirq release failed\n");
+ CAM_ERR(CAM_CDM,
+ "Genirq release fail");
}
if (rc) {
- pr_err("Deinit failed in streamoff\n");
+ CAM_ERR(CAM_CDM,
+ "Deinit failed in streamoff");
} else {
client->stream_on = false;
rc = cam_cpas_stop(core->cpas_handle);
if (rc)
- pr_err("CPAS stop failed\n");
+ CAM_ERR(CAM_CDM,
+ "CPAS stop failed");
}
} else {
client->stream_on = false;
- CDM_CDBG("Client stream off success =%d\n",
+ CAM_DBG(CAM_CDM,
+ "Client stream off success =%d",
cdm_hw->open_count);
}
} else {
- CDM_CDBG("stream OFF CDM Invalid %d\n",
+ CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
cdm_hw->open_count);
rc = -ENXIO;
}
@@ -390,33 +401,35 @@
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
- pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
if ((req->data->type < 0) ||
(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
- pr_err("Invalid req bl cmd addr type=%d\n",
+ CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
req->data->type);
break;
}
idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
client = core->clients[idx];
if ((!client) || (req->handle != client->handle)) {
- pr_err("Invalid client %pK hdl=%x\n", client,
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
req->handle);
break;
}
cam_cdm_get_client_refcount(client);
if ((req->data->flag == true) &&
(!client->data.cam_cdm_callback)) {
- pr_err("CDM request cb without registering cb\n");
+ CAM_ERR(CAM_CDM,
+ "CDM request cb without registering cb");
cam_cdm_put_client_refcount(client);
break;
}
if (client->stream_on != true) {
- pr_err("Invalid CDM needs to be streamed ON first\n");
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM needs to be streamed ON first");
cam_cdm_put_client_refcount(client);
break;
}
@@ -434,19 +447,20 @@
struct cam_cdm_client *client;
if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
- pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+ CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
arg_size);
break;
}
mutex_lock(&cdm_hw->hw_mutex);
data = (struct cam_cdm_acquire_data *)cmd_args;
- CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+ CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
data->identifier, core->index);
idx = cam_cdm_find_free_client_slot(core);
if ((idx < 0) || (core->clients[idx])) {
mutex_unlock(&cdm_hw->hw_mutex);
- pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Fail to client slots, client=%s in hw idx=%d",
data->identifier, core->index);
break;
}
@@ -476,7 +490,8 @@
core->clients[idx] = NULL;
mutex_unlock(
&cdm_hw->hw_mutex);
- rc = -1;
+ rc = -EPERM;
+ CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
break;
}
} else {
@@ -492,7 +507,7 @@
idx);
client->stream_on = false;
data->handle = client->handle;
- CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+ CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
data->identifier, core->index);
mutex_unlock(&client->lock);
rc = 0;
@@ -504,7 +519,8 @@
struct cam_cdm_client *client;
if (sizeof(uint32_t) != arg_size) {
- pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "Invalid CDM cmd %d size=%x for handle=%x",
cmd, arg_size, *handle);
return -EINVAL;
}
@@ -512,16 +528,17 @@
mutex_lock(&cdm_hw->hw_mutex);
client = core->clients[idx];
if ((!client) || (*handle != client->handle)) {
- pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+ CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+ client, *handle);
mutex_unlock(&cdm_hw->hw_mutex);
break;
}
cam_cdm_put_client_refcount(client);
mutex_lock(&client->lock);
if (client->refcount != 0) {
- pr_err("CDM Client refcount not zero %d",
+ CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
client->refcount);
- rc = -1;
+ rc = -EPERM;
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
break;
@@ -535,12 +552,12 @@
break;
}
case CAM_CDM_HW_INTF_CMD_RESET_HW: {
- pr_err("CDM HW reset not supported for handle =%x\n",
+ CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
*((uint32_t *)cmd_args));
break;
}
default:
- pr_err("CDM HW intf command not valid =%d\n", cmd);
+ CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
break;
}
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index eb75aaa..fa3ae04 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,7 +32,7 @@
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 1105d2c..5f6895c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -31,7 +29,6 @@
#include "cam_io_util.h"
#include "cam_hw_cdm170_reg.h"
-
#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
@@ -65,20 +62,75 @@
if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
pending_bl)) {
- pr_err("Failed to read CDM pending BL's\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
}
return rc;
}
+static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
+ bool enable)
+{
+ int rc = -EIO;
+ uint32_t irq_mask = 0;
+ struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ &irq_mask)) {
+ CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
+ return rc;
+ }
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask | 0x4))) {
+ CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
+ } else {
+ atomic_inc(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
+ atomic_read(&core->bl_done));
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+ (irq_mask & 0x70003))) {
+ CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
+ } else {
+ atomic_dec(&core->bl_done);
+ rc = 0;
+ CAM_DBG(CAM_CDM, "BL done irq disable =%d",
+ atomic_read(&core->bl_done));
+ }
+ }
+ return rc;
+}
+
+static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable)
+{
+ int rc = 0;
+
+ if (enable == true) {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
+ rc = -EIO;
+ }
+ } else {
+ if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
+ rc = -EIO;
+ }
+ }
+ return rc;
+}
+
int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
{
int rc = 0;
if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
- pr_err("Failed to Write CDM HW core debug\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
}
return rc;
@@ -89,8 +141,8 @@
int rc = 0;
if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
- pr_err("Failed to Write CDM HW core debug\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+ rc = -EIO;
}
return rc;
@@ -101,31 +153,31 @@
uint32_t dump_reg = 0;
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
- pr_err("dump core en=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
- pr_err("dump scratch0=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
- pr_err("dump scratch1=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
- pr_err("dump scratch2=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
- pr_err("dump scratch3=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
- pr_err("dump scratch4=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
- pr_err("dump scratch5=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
- pr_err("dump scratch6=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
- pr_err("dump scratch7=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
}
@@ -136,67 +188,68 @@
mutex_lock(&cdm_hw->hw_mutex);
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
- pr_err("CDM HW core status=%x\n", dump_reg);
- /* First pause CDM */
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x03);
+ CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
+ /* First pause CDM, If it fails still proceed to dump debug info */
+ cam_hw_cdm_enable_core(cdm_hw, false);
cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
- pr_err("CDM HW current pending BL=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
loop_cnt = dump_reg;
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
- pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
if (core_dbg & 0x100) {
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
- pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
- pr_err("AHB dump reglastdata=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
} else {
- pr_err("CDM HW AHB dump not enable\n");
+ CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
}
if (core_dbg & 0x10000) {
int i;
- pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+ loop_cnt);
for (i = 0 ; i < loop_cnt ; i++) {
cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
&dump_reg);
- pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+ CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
&dump_reg);
- pr_err("BL(%d) len=%d tag=%d\n", i,
+ CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
}
} else {
- pr_err("CDM HW BL FIFO readback not enable\n");
+ CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
}
- pr_err("CDM HW default dump\n");
+ CAM_ERR(CAM_CDM, "CDM HW default dump");
cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
- pr_err("CDM HW core cfg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
- pr_err("CDM HW irq status=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
- pr_err("CDM HW irq set reg=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
- pr_err("CDM HW current BL base=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
- pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
- (dump_reg & 0xFF000000));
+ CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+ (dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
- pr_err("CDM HW current AHB base=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
- pr_err("CDM HW current pending BL=%x\n", dump_reg);
+ CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
/* Enable CDM back */
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 1);
+ cam_hw_cdm_enable_core(cdm_hw, true);
mutex_unlock(&cdm_hw->hw_mutex);
}
@@ -206,40 +259,53 @@
{
uint32_t pending_bl = 0;
int32_t available_bl_slots = 0;
- int rc = -1;
+ int rc = -EIO;
long time_left;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
do {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
&pending_bl)) {
- pr_err("Failed to read CDM pending BL's\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+ rc = -EIO;
break;
}
available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
if (available_bl_slots < 0) {
- pr_err("Invalid available slots %d:%d:%d\n",
+ CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
available_bl_slots, CAM_CDM_HWFIFO_SIZE,
pending_bl);
break;
}
if (bl_count < (available_bl_slots - 1)) {
- CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+ CAM_DBG(CAM_CDM,
+ "BL slot available_cnt=%d requested=%d",
(available_bl_slots - 1), bl_count);
rc = bl_count;
break;
} else if (0 == (available_bl_slots - 1)) {
+ rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Enable BL done irq failed");
+ break;
+ }
time_left = wait_for_completion_timeout(
&core->bl_complete, msecs_to_jiffies(
CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
if (time_left <= 0) {
- pr_err("CDM HW BL Wait timed out failed\n");
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "CDM HW BL Wait timed out failed");
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
+ false))
+ CAM_ERR(CAM_CDM,
+ "Disable BL done irq failed");
+ rc = -EIO;
break;
}
+ if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
+ CAM_ERR(CAM_CDM, "Disable BL done irq failed");
rc = 0;
- CDM_CDBG("CDM HW is ready for data\n");
+ CAM_DBG(CAM_CDM, "CDM HW is ready for data");
} else {
rc = (bl_count - (available_bl_slots - 1));
break;
@@ -253,12 +319,12 @@
uint32_t len, uint32_t tag)
{
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
- pr_err("Failed to write CDM base to BL base\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
return true;
}
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
- pr_err("Failed to write CDM BL len\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
return true;
}
return false;
@@ -267,7 +333,7 @@
bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
{
if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
- pr_err("Failed to write CDM commit BL\n");
+ CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
return true;
}
return false;
@@ -282,12 +348,12 @@
int rc;
if (core->bl_tag > 63) {
- pr_err("bl_tag invalid =%d\n", core->bl_tag);
+ CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
rc = -EINVAL;
goto end;
}
- CDM_CDBG("CDM write BL last cmd tag=%d total=%d\n",
- core->bl_tag, req->data->cmd_arrary_count);
+ CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
+ core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
GFP_KERNEL);
if (!node) {
@@ -307,20 +373,20 @@
((4 * core->ops->cdm_required_size_genirq()) - 1),
core->bl_tag);
if (rc) {
- pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+ CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
core->bl_tag);
list_del_init(&node->entry);
kfree(node);
- rc = -1;
+ rc = -EIO;
goto end;
}
if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
- pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+ CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
core->bl_tag);
list_del_init(&node->entry);
kfree(node);
- rc = -1;
+ rc = -EIO;
}
end:
@@ -331,27 +397,25 @@
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
- int i, rc = -1;
+ int i, rc;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
uint32_t pending_bl = 0;
int write_count = 0;
if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
- pr_info("requested BL more than max size, cnt=%d max=%d\n",
+ pr_info("requested BL more than max size, cnt=%d max=%d",
req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
}
- if (atomic_read(&core->error) != 0) {
- pr_err("HW in error state, cannot trigger transactions now\n");
- return rc;
- }
+ if (atomic_read(&core->error))
+ return -EIO;
mutex_lock(&cdm_hw->hw_mutex);
mutex_lock(&client->lock);
rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
if (rc) {
- pr_err("Cannot read the current BL depth\n");
+ CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
mutex_unlock(&client->lock);
mutex_unlock(&cdm_hw->hw_mutex);
return rc;
@@ -363,25 +427,28 @@
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
- pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "cmd len(%d) is invalid cnt=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
- if (atomic_read(&core->error) != 0) {
- pr_err("HW in error state cmd_count=%d total cnt=%d\n",
+ if (atomic_read(&core->error)) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "In error state cnt=%d total cnt=%d\n",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
if (write_count == 0) {
write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
(req->data->cmd_arrary_count - i));
if (write_count < 0) {
- pr_err("wait for bl fifo failed %d:%d\n",
+ CAM_ERR(CAM_CDM,
+ "wait for bl fifo failed %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
} else {
@@ -395,9 +462,10 @@
&len);
} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
- pr_err("Hw bl hw_iova is invalid %d:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Hw bl hw_iova is invalid %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
rc = 0;
@@ -405,48 +473,56 @@
(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
} else {
- pr_err("Only mem hdl/hw va type is supported %d\n",
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/hw va type is supported %d",
req->data->type);
- rc = -1;
+ rc = -EINVAL;
break;
}
if ((!rc) && (hw_vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
- CDM_CDBG("Got the HW VA\n");
+ CAM_DBG(CAM_CDM, "Got the HW VA");
+ if (core->bl_tag >=
+ (CAM_CDM_HWFIFO_SIZE - 1))
+ core->bl_tag = 0;
rc = cam_hw_cdm_bl_write(cdm_hw,
((uint32_t)hw_vaddr_ptr +
cdm_cmd->cmd[i].offset),
(cdm_cmd->cmd[i].len - 1), core->bl_tag);
if (rc) {
- pr_err("Hw bl write failed %d:%d\n",
+ CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EIO;
break;
}
} else {
- pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
- pr_err("Sanity check failed for %d:%d\n",
+ CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (!rc) {
- CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
- core->bl_tag++;
- CDM_CDBG("Now commit the BL\n");
+
+ CAM_DBG(CAM_CDM, "Now commit the BL");
if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
- pr_err("Cannot commit the BL %d tag=%d\n",
- i, (core->bl_tag - 1));
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "Cannot commit the BL %d tag=%d",
+ i, core->bl_tag);
+ rc = -EIO;
break;
}
- CDM_CDBG("BL commit success BL %d tag=%d\n", i,
- (core->bl_tag - 1));
+ CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
+ core->bl_tag);
+ core->bl_tag++;
if ((req->data->flag == true) &&
(i == (req->data->cmd_arrary_count -
1))) {
@@ -455,9 +531,6 @@
if (rc == 0)
core->bl_tag++;
}
- if (!rc && ((CAM_CDM_HWFIFO_SIZE - 1) ==
- core->bl_tag))
- core->bl_tag = 0;
}
}
mutex_unlock(&client->lock);
@@ -477,12 +550,12 @@
cdm_hw = payload->hw;
core = (struct cam_cdm *)cdm_hw->core_info;
- CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+ CAM_DBG(CAM_CDM, "IRQ status=%x", payload->irq_status);
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
struct cam_cdm_bl_cb_request_entry *node;
- CDM_CDBG("inline IRQ data=%x\n",
+ CAM_DBG(CAM_CDM, "inline IRQ data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
@@ -496,47 +569,56 @@
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
- pr_err("Invalid node=%pK %d\n", node,
+ CAM_ERR(CAM_CDM,
+ "Invalid node=%pK %d", node,
node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
- pr_err("Invalid node for inline irq\n");
+ CAM_ERR(CAM_CDM,
+ "Inval node, inline_irq st=%x data=%x",
+ payload->irq_status, payload->irq_data);
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
- CDM_CDBG("CDM HW reset done IRQ\n");
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
- pr_err("CDM HW BL done IRQ\n");
- complete(&core->bl_complete);
+ if (atomic_read(&core->bl_done)) {
+ CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+ complete(&core->bl_complete);
+ }
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
- pr_err("Invalid command IRQ, Need HW reset\n");
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid command IRQ, Need HW reset\n");
+ atomic_inc(&core->error);
+ cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ }
+ if (payload->irq_status &
+ CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
atomic_dec(&core->error);
}
if (payload->irq_status &
- CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
- pr_err("AHB IRQ\n");
- cam_hw_cdm_dump_core_debug_registers(cdm_hw);
- }
- if (payload->irq_status &
CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
- pr_err("Overflow IRQ\n");
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
+ atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+ atomic_dec(&core->error);
}
kfree(payload);
} else {
- pr_err("NULL payload\n");
+ CAM_ERR(CAM_CDM, "NULL payload");
}
}
@@ -552,12 +634,13 @@
core = (struct cam_cdm *)cdm_hw->core_info;
atomic_inc(&core->error);
cam_hw_cdm_dump_core_debug_registers(cdm_hw);
- pr_err("Page fault iova addr %pK\n", (void *)iova);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+ (void *)iova);
cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
(void *)iova);
atomic_dec(&core->error);
} else {
- pr_err("Invalid token\n");
+ CAM_ERR(CAM_CDM, "Invalid token");
}
}
@@ -569,32 +652,38 @@
struct cam_cdm_work_payload *payload;
bool work_status;
- CDM_CDBG("Got irq\n");
+ CAM_DBG(CAM_CDM, "Got irq");
payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
if (payload) {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
&payload->irq_status)) {
- pr_err("Failed to read CDM HW IRQ status\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
+ }
+ if (!payload->irq_status) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
+ kfree(payload);
+ return IRQ_HANDLED;
}
if (payload->irq_status &
CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
&payload->irq_data)) {
- pr_err("Failed to read CDM HW IRQ data\n");
+ CAM_ERR(CAM_CDM,
+ "Failed to read CDM HW IRQ data");
}
}
- CDM_CDBG("Got payload=%d\n", payload->irq_status);
+ CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
payload->hw = cdm_hw;
INIT_WORK((struct work_struct *)&payload->work,
cam_hw_cdm_work);
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
payload->irq_status))
- pr_err("Failed to Write CDM HW IRQ Clear\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
- pr_err("Failed to Write CDM HW IRQ cmd\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
work_status = queue_work(cdm_core->work_queue, &payload->work);
if (work_status == false) {
- pr_err("Failed to queue work for irq=%x\n",
+ CAM_ERR(CAM_CDM, "Failed to queue work for irq=%x",
payload->irq_status);
kfree(payload);
}
@@ -618,12 +707,11 @@
genirq_alloc_cmd.align = 0;
genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
- genirq_alloc_cmd.flags = 0;
- genirq_alloc_cmd.region = CAM_MEM_MGR_REGION_NON_SECURE_IO;
+ genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
&genirq_alloc_out);
if (rc) {
- pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+ CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
goto end;
}
cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
@@ -649,7 +737,7 @@
genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
if (rc)
- pr_err("Failed to put genirq cmd space for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
return rc;
}
@@ -669,28 +757,31 @@
soc_info = &cdm_hw->soc_info;
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
if (rc) {
- pr_err("Enable platform failed\n");
+ CAM_ERR(CAM_CDM, "Enable platform failed");
goto end;
}
- CDM_CDBG("Enable soc done\n");
+ CAM_DBG(CAM_CDM, "Enable soc done");
/* Before triggering the reset to HW, clear the reset complete */
+ atomic_set(&cdm_core->error, 0);
+ atomic_set(&cdm_core->bl_done, 0);
reinit_completion(&cdm_core->reset_complete);
reinit_completion(&cdm_core->bl_complete);
if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
- pr_err("Failed to Write CDM HW IRQ mask\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
goto disable_return;
}
if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
- pr_err("Failed to Write CDM HW reset\n");
+ CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
goto disable_return;
}
- CDM_CDBG("Waiting for CDM HW resetdone\n");
+ CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
@@ -701,24 +792,24 @@
* as a workaround.
*/
if (time_left <= 0) {
- pr_err("CDM HW reset Wait failed time_left=%ld\n", time_left);
+ CAM_ERR(CAM_CDM, "CDM HW reset Wait failed time_left=%ld",
+ time_left);
time_left = 1;
}
if (time_left <= 0) {
- pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+ CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
goto disable_return;
} else {
- CDM_CDBG("CDM Init success\n");
+ CAM_DBG(CAM_CDM, "CDM Init success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
- cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CGC_CFG, 0x7);
rc = 0;
goto end;
}
disable_return:
- rc = -1;
+ rc = -EIO;
cam_soc_util_disable_platform_resource(soc_info, true, true);
end:
return rc;
@@ -739,9 +830,9 @@
cdm_core = cdm_hw->core_info;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
- pr_err("disable platform failed\n");
+ CAM_ERR(CAM_CDM, "disable platform failed");
} else {
- CDM_CDBG("CDM Deinit success\n");
+ CAM_DBG(CAM_CDM, "CDM Deinit success");
cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
}
@@ -756,6 +847,8 @@
struct cam_cdm *cdm_core = NULL;
struct cam_cdm_private_dt_data *soc_private = NULL;
struct cam_cpas_register_params cpas_parms;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!cdm_hw_intf)
@@ -784,10 +877,10 @@
rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
if (rc) {
- pr_err("Failed to get dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
goto release_mem;
}
- cdm_hw_intf->hw_idx = cdm_hw->soc_info.pdev->id;
+ cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
cdm_core = (struct cam_cdm *)cdm_hw->core_info;
soc_private = (struct cam_cdm_private_dt_data *)
cdm_hw->soc_info.soc_private;
@@ -797,10 +890,10 @@
cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
cdm_core->bl_tag = 0;
- atomic_set(&cdm_core->error, 0);
cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
if (cdm_core->id >= CAM_CDM_MAX) {
- pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+ CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+ cdm_core->name);
goto release_private_mem;
}
INIT_LIST_HEAD(&cdm_core->bl_request_list);
@@ -817,14 +910,14 @@
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
mutex_lock(&cdm_hw->hw_mutex);
- CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
if (rc < 0) {
- pr_err("cpas-cdm get iommu handle failed\n");
+ CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
goto unlock_release_mem;
}
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
@@ -832,7 +925,7 @@
rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
if (rc < 0) {
- pr_err("Attach iommu non secure handle failed\n");
+ CAM_ERR(CAM_CDM, "Attach iommu non secure handle failed");
goto destroy_non_secure_hdl;
}
cdm_core->iommu_hdl.secure = -1;
@@ -844,7 +937,7 @@
rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
cam_hw_cdm_irq, cdm_hw);
if (rc) {
- pr_err("Failed to request platform resource\n");
+ CAM_ERR(CAM_CDM, "Failed to request platform resource");
goto destroy_non_secure_hdl;
}
@@ -855,76 +948,98 @@
strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("Virtual CDM CPAS registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto release_platform_resource;
}
- CDM_CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+ rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS start failed");
+ goto cpas_unregister;
+ }
+
rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Failed to Init CDM HW\n");
- goto init_failed;
+ CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
+ goto cpas_stop;
}
cdm_hw->open_count++;
if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
&cdm_core->hw_version)) {
- pr_err("Failed to read CDM HW Version\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
goto deinit;
}
- if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+ if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
&cdm_core->hw_family_version)) {
- pr_err("Failed to read CDM family Version\n");
+ CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
goto deinit;
}
- CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+ CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
cdm_core->hw_family_version, cdm_core->hw_version);
cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
false);
if (!cdm_core->ops) {
- pr_err("Failed to util ops for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to util ops for hw");
goto deinit;
}
if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
&cdm_core->version)) {
- pr_err("Failed to set cam he version for hw\n");
+ CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
goto deinit;
}
rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Failed to Deinit CDM HW\n");
- goto release_platform_resource;
+ CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
+ cdm_hw->open_count--;
+ goto cpas_stop;
+ }
+
+ rc = cam_cpas_stop(cdm_core->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
}
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_HW_CDM, &cdm_core->index);
if (rc) {
- pr_err("HW CDM Interface registration failed\n");
- goto release_platform_resource;
+ CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
+ cdm_hw->open_count--;
+ goto cpas_unregister;
}
cdm_hw->open_count--;
mutex_unlock(&cdm_hw->hw_mutex);
- CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
return rc;
deinit:
if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
- pr_err("Deinit failed for hw\n");
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
cdm_hw->open_count--;
-init_failed:
+cpas_stop:
+ if (cam_cpas_stop(cdm_core->cpas_handle))
+ CAM_ERR(CAM_CDM, "CPAS stop failed");
+cpas_unregister:
if (cam_cpas_unregister_client(cdm_core->cpas_handle))
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
release_platform_resource:
if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
- pr_err("Release platform resource failed\n");
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
@@ -932,7 +1047,7 @@
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
NULL, cdm_hw);
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
- pr_err("Release iommu secure hdl failed\n");
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
unlock_release_mem:
mutex_unlock(&cdm_hw->hw_mutex);
release_private_mem:
@@ -954,26 +1069,28 @@
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
- pr_err("Failed to get dev private data\n");
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
- pr_err("Failed to get hw private data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
- pr_err("Failed to get hw core data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get hw core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
if (cdm_hw->open_count != 0) {
- pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+ CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
cdm_hw->open_count);
return rc;
@@ -981,24 +1098,24 @@
rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
if (rc) {
- pr_err("Deinit failed for hw\n");
+ CAM_ERR(CAM_CDM, "Deinit failed for hw");
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
- pr_err("Release platform resource failed\n");
+ CAM_ERR(CAM_CDM, "Release platform resource failed");
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
- pr_err("Release iommu secure hdl failed\n");
+ CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
NULL, cdm_hw);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
index b1b2117..fa98be2 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -41,10 +39,10 @@
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
- pr_err("CDM intf mgr not probed yet\n");
- rc = -1;
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+ rc = -EPERM;
} else {
- CDM_CDBG("CDM intf mgr get refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
cdm_mgr.refcount);
cdm_mgr.refcount++;
}
@@ -56,14 +54,14 @@
{
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.probe_done == false) {
- pr_err("CDM intf mgr not probed yet\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
} else {
- CDM_CDBG("CDM intf mgr put refcount=%d\n",
+ CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
cdm_mgr.refcount);
if (cdm_mgr.refcount > 0) {
cdm_mgr.refcount--;
} else {
- pr_err("Refcount put when zero\n");
+ CAM_ERR(CAM_CDM, "Refcount put when zero");
WARN_ON(1);
}
}
@@ -73,7 +71,7 @@
static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
uint32_t hw_idx)
{
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
if (hw->hw_ops.get_hw_caps) {
@@ -87,23 +85,23 @@
static int get_cdm_index_by_id(char *identifier,
uint32_t cell_index, uint32_t *hw_index)
{
- int rc = -1, i, j;
+ int rc = -EPERM, i, j;
char client_name[128];
- CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+ CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
identifier, cell_index);
snprintf(client_name, sizeof(client_name), "%s", identifier);
- CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+ CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
cdm_mgr.cdm_count);
mutex_lock(&cam_cdm_mgr_lock);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
- CDM_CDBG("dt_num_supported_clients=%d\n",
+ CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
cdm_mgr.nodes[i].data->dt_num_supported_clients);
for (j = 0; j <
cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
- CDM_CDBG("client name:%s\n",
+ CAM_DBG(CAM_CDM, "client name:%s",
cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
if (!strcmp(
cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
@@ -125,16 +123,16 @@
int cam_cdm_get_iommu_handle(char *identifier,
struct cam_iommu_handle *cdm_handles)
{
- int i, j, rc = -1;
+ int i, j, rc = -EPERM;
if ((!identifier) || (!cdm_handles))
return -EINVAL;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
- CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+ CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
for (i = 0; i < cdm_mgr.cdm_count; i++) {
mutex_lock(&cdm_mgr.nodes[i].lock);
@@ -164,7 +162,7 @@
int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
{
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw;
uint32_t hw_index = 0;
@@ -173,39 +171,41 @@
return -EINVAL;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (data->id > CAM_CDM_HW_ANY) {
- pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
- rc = -1;
+ CAM_ERR(CAM_CDM,
+ "only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
+ rc = -EPERM;
goto end;
}
rc = get_cdm_index_by_id(data->identifier, data->cell_index,
&hw_index);
if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
- pr_err("Failed to identify associated hw id\n");
+ CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
goto end;
} else {
- CDM_CDBG("hw_index:%d\n", hw_index);
+ CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
hw = cdm_mgr.nodes[hw_index].device;
if (hw && hw->hw_ops.process_cmd) {
rc = hw->hw_ops.process_cmd(hw->hw_priv,
CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
sizeof(struct cam_cdm_acquire_data));
if (rc < 0) {
- pr_err("CDM hw acquire failed\n");
+ CAM_ERR(CAM_CDM, "CDM hw acquire failed");
goto end;
}
} else {
- pr_err("idx %d doesn't have acquire ops\n", hw_index);
- rc = -1;
+ CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+ hw_index);
+ rc = -EPERM;
}
}
end:
if (rc < 0) {
- pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+ CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
data->id, data->identifier, data->cell_index);
put_cdm_mgr_refcount();
}
@@ -216,11 +216,11 @@
int cam_cdm_release(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EPERM;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
@@ -232,10 +232,11 @@
CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
sizeof(handle));
if (rc < 0)
- pr_err("hw release failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw release failed for handle=%x",
handle);
} else
- pr_err("hw idx %d doesn't have release ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
put_cdm_mgr_refcount();
@@ -250,14 +251,15 @@
int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (!data)
return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -273,10 +275,11 @@
CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
if (rc < 0)
- pr_err("hw submit bl failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw submit bl failed for handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have submit ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
hw_index);
}
}
@@ -289,11 +292,12 @@
int cam_cdm_stream_on(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -304,10 +308,12 @@
rc = hw->hw_ops.start(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
- pr_err("hw start failed handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "hw start failed handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have start ops\n",
+ CAM_ERR(CAM_CDM,
+ "hw idx %d doesn't have start ops",
hw_index);
}
}
@@ -320,11 +326,12 @@
int cam_cdm_stream_off(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -335,10 +342,10 @@
rc = hw->hw_ops.stop(hw->hw_priv, &handle,
sizeof(uint32_t));
if (rc < 0)
- pr_err("hw stop failed handle=%x\n",
+ CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have stop ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
hw_index);
}
}
@@ -351,11 +358,12 @@
int cam_cdm_reset_hw(uint32_t handle)
{
uint32_t hw_index;
- int rc = -1;
+ int rc = -EINVAL;
struct cam_hw_intf *hw;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -367,10 +375,11 @@
CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
sizeof(handle));
if (rc < 0)
- pr_err("CDM hw release failed for handle=%x\n",
+ CAM_ERR(CAM_CDM,
+ "CDM hw release failed for handle=%x",
handle);
} else {
- pr_err("hw idx %d doesn't have release ops\n",
+ CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
hw_index);
}
}
@@ -390,7 +399,7 @@
return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
@@ -413,7 +422,7 @@
cdm_mgr.cdm_count++;
rc = 0;
} else {
- pr_err("CDM registration failed type=%d count=%d\n",
+ CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
type, cdm_mgr.cdm_count);
}
mutex_unlock(&cam_cdm_mgr_lock);
@@ -426,13 +435,14 @@
struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
uint32_t index)
{
- int rc = -1;
+ int rc = -EINVAL;
if ((!hw) || (!data))
- return -EINVAL;
+ return rc;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+ rc = -EPERM;
return rc;
}
@@ -454,7 +464,7 @@
cdm_mgr.cdm_count--;
rc = 0;
} else {
- pr_err("CDM Deregistration failed type=%d index=%d\n",
+ CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
type, index);
}
mutex_unlock(&cam_cdm_mgr_lock);
@@ -469,7 +479,7 @@
rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
if (rc) {
- pr_err("Failed to get dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to get dt properties");
return rc;
}
mutex_lock(&cam_cdm_mgr_lock);
@@ -489,7 +499,8 @@
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0))
- pr_err("Valid node present in index=%d\n", i);
+ CAM_ERR(CAM_CDM,
+ "Valid node present in index=%d", i);
mutex_destroy(&cdm_mgr.nodes[i].lock);
cdm_mgr.nodes[i].device = NULL;
cdm_mgr.nodes[i].data = NULL;
@@ -506,19 +517,19 @@
int i, rc = -EBUSY;
if (get_cdm_mgr_refcount()) {
- pr_err("CDM intf mgr get refcount failed\n");
+ CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
return rc;
}
if (cam_virtual_cdm_remove(pdev)) {
- pr_err("Virtual CDM remove failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
goto end;
}
put_cdm_mgr_refcount();
mutex_lock(&cam_cdm_mgr_lock);
if (cdm_mgr.refcount != 0) {
- pr_err("cdm manger refcount not zero %d\n",
+ CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
cdm_mgr.refcount);
goto end;
}
@@ -526,7 +537,7 @@
for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
(cdm_mgr.nodes[i].refcount != 0)) {
- pr_err("Valid node present in index=%d\n", i);
+ CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
mutex_unlock(&cam_cdm_mgr_lock);
goto end;
}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
index 0f5458c..f8b0d3d 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -40,27 +38,29 @@
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
- CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
reg)));
- CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
- pr_err("CDM accessing invalid reg=%d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
goto permission_error;
} else {
reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
if (reg_addr > (base + mem_len)) {
- pr_err("accessing invalid mapped region %d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Invalid mapped region %d", reg);
goto permission_error;
}
*value = cam_io_r_mb(reg_addr);
- CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
- (void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
- cdm->offset_tbl, reg)), *value);
+ CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+ (void __iomem *)base, reg,
+ (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+ *value);
return false;
}
permission_error:
@@ -79,18 +79,20 @@
resource_size_t mem_len =
cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
- CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+ CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
if ((reg > cdm->offset_tbl->offset_max_size) ||
(reg > cdm->offset_tbl->last_offset)) {
- pr_err("CDM accessing invalid reg=%d\n", reg);
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+ reg);
goto permission_error;
} else {
reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg));
if (reg_addr > (base + mem_len)) {
- pr_err("Accessing invalid region %d:%d\n",
+ CAM_ERR_RATE_LIMIT(CAM_CDM,
+ "Accessing invalid region %d:%d\n",
reg, (CAM_CDM_OFFSET_FROM_REG(
cdm->offset_tbl, reg)));
goto permission_error;
@@ -106,22 +108,22 @@
int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
struct cam_cdm_private_dt_data *ptr)
{
- int i, rc = -1;
+ int i, rc = -EINVAL;
ptr->dt_num_supported_clients = of_property_count_strings(
pdev->dev.of_node,
"cdm-client-names");
- CDM_CDBG("Num supported cdm_client = %d\n",
+ CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
ptr->dt_num_supported_clients);
if (ptr->dt_num_supported_clients >
CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
- pr_err("Invalid count of client names count=%d\n",
+ CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
ptr->dt_num_supported_clients);
rc = -EINVAL;
return rc;
}
if (ptr->dt_num_supported_clients < 0) {
- CDM_CDBG("No cdm client names found\n");
+ CAM_DBG(CAM_CDM, "No cdm client names found");
ptr->dt_num_supported_clients = 0;
ptr->dt_cdm_shared = false;
} else {
@@ -130,10 +132,10 @@
for (i = 0; i < ptr->dt_num_supported_clients; i++) {
rc = of_property_read_string_index(pdev->dev.of_node,
"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
- CDM_CDBG("cdm-client-names[%d] = %s\n", i,
+ CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s", i,
ptr->dt_cdm_client_name[i]);
if (rc < 0) {
- pr_err("Reading cdm-client-names failed\n");
+ CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
break;
}
}
@@ -156,7 +158,7 @@
rc = cam_soc_util_get_dt_properties(soc_ptr);
if (rc != 0) {
- pr_err("Failed to retrieve the CDM dt properties\n");
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
} else {
soc_ptr->soc_private = kzalloc(
sizeof(struct cam_cdm_private_dt_data),
@@ -167,15 +169,15 @@
rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
soc_ptr->soc_private);
if (rc != 0) {
- pr_err("Failed to load CDM dt private data\n");
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
goto error;
}
id = of_match_node(table, soc_ptr->pdev->dev.of_node);
if ((!id) || !(id->data)) {
- pr_err("Failed to retrieve the CDM id table\n");
+ CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
goto error;
}
- CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+ CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
(struct cam_cdm_reg_offset_table *)id->data;
strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
@@ -186,7 +188,7 @@
return rc;
error:
- rc = -1;
+ rc = -EINVAL;
kfree(soc_ptr->soc_private);
soc_ptr->soc_private = NULL;
return rc;
@@ -199,7 +201,8 @@
rc = of_property_read_u32(pdev->dev.of_node,
"num-hw-cdm", &mgr->dt_supported_hw_cdm);
- CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+ CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+ mgr->dt_supported_hw_cdm);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index 3d258b4..c8b830f 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -245,7 +243,7 @@
*dst++ = *src++;
}
- return pCmdBuffer;
+ return dst;
}
uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
@@ -323,11 +321,11 @@
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
void __iomem **device_base)
{
- int ret = -1, i;
+ int ret = -EINVAL, i;
for (i = 0; i < base_array_size; i++) {
if (base_table[i])
- CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+ CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
i, (base_table[i])->mem_cam_base, hw_base);
if ((base_table[i]) &&
((base_table[i])->mem_cam_base == hw_base)) {
@@ -349,7 +347,7 @@
if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
(!base_addr)) {
- pr_err(" invalid base addr and data length %d %pK\n",
+ CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
cmd_buf_size, base_addr);
return -EINVAL;
}
@@ -359,7 +357,7 @@
(((reg_cont->count * sizeof(uint32_t)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
cmd_buf_size)) {
- pr_err(" buffer size %d is not sufficient for count%d\n",
+ CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
cmd_buf_size, reg_cont->count);
return -EINVAL;
}
@@ -381,7 +379,7 @@
uint32_t *data;
if (!base_addr) {
- pr_err("invalid base address\n");
+ CAM_ERR(CAM_CDM, "invalid base address");
return -EINVAL;
}
@@ -390,15 +388,16 @@
(((reg_random->count * (sizeof(uint32_t) * 2)) +
cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
cmd_buf_size)) {
- pr_err("invalid reg_count %d cmd_buf_size %d\n",
+ CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
reg_random->count, cmd_buf_size);
return -EINVAL;
}
data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
for (i = 0; i < reg_random->count; i++) {
- CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
- ((uint64_t) base_addr + data[0]), data[1]);
+ CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+ ((void __iomem *)(base_addr + data[0])),
+ data[1]);
cam_io_w(data[1], base_addr + data[0]);
data += 2;
}
@@ -420,7 +419,8 @@
swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
- pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+ CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+ swd_dmi->length + 1);
return -EINVAL;
}
data = cmd_buf + cdm_required_size_dmi();
@@ -457,7 +457,7 @@
total_cmd_buf_size = cmd_buf_size;
while (cmd_buf_size > 0) {
- CDM_CDBG("cmd data=%x\n", *cmd_buf);
+ CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
switch (cdm_cmd_type) {
case CAM_CDM_CMD_REG_CONT: {
@@ -488,7 +488,8 @@
case CAM_CDM_CMD_SWD_DMI_32:
case CAM_CDM_CMD_SWD_DMI_64: {
if (*current_device_base == 0) {
- pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+ CAM_ERR(CAM_CDM,
+ "Got SWI DMI cmd =%d for invalid hw",
cdm_cmd_type);
ret = -EINVAL;
break;
@@ -513,11 +514,12 @@
change_base_cmd->base, base_array_size,
base_table, current_device_base);
if (ret != 0) {
- pr_err("Get ioremap change base failed %x\n",
+ CAM_ERR(CAM_CDM,
+ "Get ioremap change base failed %x",
change_base_cmd->base);
break;
}
- CDM_CDBG("Got ioremap for %x addr=%pK\n",
+ CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
change_base_cmd->base,
current_device_base);
cmd_buf_size -= (4 *
@@ -526,7 +528,7 @@
}
break;
default:
- pr_err(" unsupported cdm_cmd_type type 0%x\n",
+ CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
cdm_cmd_type);
ret = -EINVAL;
break;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index e34bfc2..b230d4e 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -46,7 +44,7 @@
if (payload->irq_status & 0x2) {
struct cam_cdm_bl_cb_request_entry *node;
- CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+ CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
payload->irq_data);
mutex_lock(&cdm_hw->hw_mutex);
node = cam_cdm_find_request_by_bl_tag(
@@ -60,18 +58,18 @@
(void *)node);
} else if (node->request_type ==
CAM_HW_CDM_BL_CB_INTERNAL) {
- pr_err("Invalid node=%pK %d\n", node,
- node->request_type);
+ CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+ node, node->request_type);
}
list_del_init(&node->entry);
kfree(node);
} else {
- pr_err("Invalid node for inline irq\n");
+ CAM_ERR(CAM_CDM, "Invalid node for inline irq");
}
mutex_unlock(&cdm_hw->hw_mutex);
}
if (payload->irq_status & 0x1) {
- CDM_CDBG("CDM HW reset done IRQ\n");
+ CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
complete(&core->reset_complete);
}
kfree(payload);
@@ -83,7 +81,7 @@
struct cam_cdm_hw_intf_cmd_submit_bl *req,
struct cam_cdm_client *client)
{
- int i, rc = -1;
+ int i, rc = -EINVAL;
struct cam_cdm_bl_request *cdm_cmd = req->data;
struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
@@ -94,10 +92,11 @@
if ((!cdm_cmd->cmd[i].len) &&
(cdm_cmd->cmd[i].len > 0x100000)) {
- pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "len(%d) is invalid count=%d total cnt=%d",
cdm_cmd->cmd[i].len, i,
req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
@@ -111,15 +110,17 @@
(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
} else {
- pr_err("Only mem hdl/Kernel va type is supported %d\n",
+ CAM_ERR(CAM_CDM,
+ "Only mem hdl/Kernel va type is supported %d",
req->data->type);
- rc = -1;
+ rc = -EINVAL;
break;
}
if ((!rc) && (vaddr_ptr) && (len) &&
(len >= cdm_cmd->cmd[i].offset)) {
- CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+ CAM_DBG(CAM_CDM,
+ "hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
cdm_cmd->cmd[i].bl_addr.mem_handle,
(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
cdm_cmd->cmd[i].len, len);
@@ -130,23 +131,26 @@
cdm_cmd->cmd[i].len, client->data.base_array,
client->data.base_array_cnt, core->bl_tag);
if (rc) {
- pr_err("write failed for cnt=%d:%d\n",
+ CAM_ERR(CAM_CDM, "write failed for cnt=%d:%d",
i, req->data->cmd_arrary_count);
break;
}
} else {
- pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for hdl=%x len=%zu:%d",
cdm_cmd->cmd[i].bl_addr.mem_handle, len,
cdm_cmd->cmd[i].offset);
- pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Sanity check failed for cmd_count=%d cnt=%d",
i, req->data->cmd_arrary_count);
- rc = -1;
+ rc = -EINVAL;
break;
}
if (!rc) {
struct cam_cdm_work_payload *payload;
- CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+ CAM_DBG(CAM_CDM,
+ "write BL success for cnt=%d with tag=%d",
i, core->bl_tag);
if ((true == req->data->flag) &&
(i == req->data->cmd_arrary_count)) {
@@ -184,7 +188,8 @@
}
}
core->bl_tag++;
- CDM_CDBG("Now commit the BL nothing for virtual\n");
+ CAM_DBG(CAM_CDM,
+ "Now commit the BL nothing for virtual");
if (!rc && (core->bl_tag == 63))
core->bl_tag = 0;
}
@@ -229,9 +234,8 @@
}
rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
- if (rc != 0) {
- pr_err("Failed to load CDM dt private data\n");
- rc = -1;
+ if (rc) {
+ CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
kfree(cdm_hw->soc_info.soc_private);
cdm_hw->soc_info.soc_private = NULL;
goto soc_load_failed;
@@ -258,7 +262,7 @@
cdm_hw_intf->hw_ops.write = NULL;
cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
- CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+ CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
cdm_hw_intf->hw_idx);
platform_set_drvdata(pdev, cdm_hw_intf);
@@ -286,22 +290,23 @@
CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("Virtual CDM CPAS registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
goto cpas_registration_failed;
}
- CDM_CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
cdm_core->cpas_handle = cpas_parms.client_handle;
- CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
if (rc) {
- pr_err("Virtual CDM Interface registration failed\n");
+ CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
goto intf_registration_failed;
}
- CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+ CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+ cdm_hw_intf->hw_idx);
mutex_unlock(&cdm_hw->hw_mutex);
return 0;
@@ -329,27 +334,29 @@
cdm_hw_intf = platform_get_drvdata(pdev);
if (!cdm_hw_intf) {
- pr_err("Failed to get dev private data\n");
+ CAM_ERR(CAM_CDM, "Failed to get dev private data");
return rc;
}
cdm_hw = cdm_hw_intf->hw_priv;
if (!cdm_hw) {
- pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual private data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
cdm_core = cdm_hw->core_info;
if (!cdm_core) {
- pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+ CAM_ERR(CAM_CDM,
+ "Failed to get virtual core data for type=%d idx=%d",
cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
return rc;
}
rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
if (rc) {
- pr_err("CPAS unregister failed\n");
+ CAM_ERR(CAM_CDM, "CPAS unregister failed");
return rc;
}
@@ -357,7 +364,8 @@
cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
cdm_core->index);
if (rc) {
- pr_err("Virtual CDM Interface de-registration failed\n");
+ CAM_ERR(CAM_CDM,
+ "Virtual CDM Interface de-registration failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
index 60f94d1..6fb1200 100644
--- a/drivers/media/platform/msm/camera/cam_core/Makefile
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -1,4 +1,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index 17b3c7c..8f625ae 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "cam_context.h"
+#include "cam_debug_util.h"
static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
void *evt_data)
@@ -21,7 +22,7 @@
struct cam_context *ctx = (struct cam_context *)context;
if (!ctx || !ctx->state_machine) {
- pr_err("%s: Context is not ready.\n", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
@@ -29,23 +30,24 @@
rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
evt_data);
else
- pr_debug("%s: No function to handle event %d in dev %d, state %d\n",
- __func__, evt_id, ctx->dev_hdl, ctx->state);
+ CAM_DBG(CAM_CORE,
+ "No function to handle event %d in dev %d, state %d",
+ evt_id, ctx->dev_hdl, ctx->state);
return rc;
}
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info)
{
int rc;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n'", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!info) {
- pr_err("%s: Invalid get device info payload.\n", __func__);
+ CAM_ERR(CAM_CORE, "Invalid get device info payload");
return -EINVAL;
}
@@ -54,35 +56,7 @@
rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
ctx, info);
} else {
- pr_err("%s: No get device info in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
- rc = -EPROTO;
- }
- mutex_unlock(&ctx->ctx_mutex);
-
- return rc;
-}
-
-int cam_context_handle_link(struct cam_context *ctx,
- struct cam_req_mgr_core_dev_link_setup *link)
-{
- int rc;
-
- if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n", __func__);
- return -EINVAL;
- }
-
- if (!link) {
- pr_err("%s: Invalid link payload.\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&ctx->ctx_mutex);
- if (ctx->state_machine[ctx->state].crm_ops.link) {
- rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
- } else {
- pr_err("%s: No crm link in dev %d, state %d\n", __func__,
+ CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
@@ -91,18 +65,46 @@
return rc;
}
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *link)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (!link) {
+ CAM_ERR(CAM_CORE, "Invalid link payload");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.link) {
+ rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
+
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink)
{
int rc;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready!\n", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!unlink) {
- pr_err("%s: Invalid unlink payload.\n", __func__);
+ CAM_ERR(CAM_CORE, "Invalid unlink payload");
return -EINVAL;
}
@@ -111,8 +113,8 @@
rc = ctx->state_machine[ctx->state].crm_ops.unlink(
ctx, unlink);
} else {
- pr_err("%s: No crm unlink in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -120,18 +122,18 @@
return rc;
}
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
int rc;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n'", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!apply) {
- pr_err("%s: Invalid apply request payload.\n'", __func__);
+ CAM_ERR(CAM_CORE, "Invalid apply request payload");
return -EINVAL;
}
@@ -140,8 +142,8 @@
rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
apply);
} else {
- pr_err("%s: No crm apply req in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -149,6 +151,29 @@
return rc;
}
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush)
+{
+ int rc;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctx->ctx_mutex);
+ if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+ rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+ flush);
+ } else {
+ CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ rc = -EPROTO;
+ }
+ mutex_unlock(&ctx->ctx_mutex);
+
+ return rc;
+}
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -156,13 +181,12 @@
int rc;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
- pr_err("%s: Invalid acquire device command payload.\n",
- __func__);
+ CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
return -EINVAL;
}
@@ -171,8 +195,8 @@
rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
ctx, cmd);
} else {
- pr_err("%s: No acquire device in dev %d, state %d\n",
- __func__, cmd->dev_handle, ctx->state);
+ CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+ cmd->dev_handle, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -186,13 +210,12 @@
int rc;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
- pr_err("%s: Invalid release device command payload.\n",
- __func__);
+ CAM_ERR(CAM_CORE, "Invalid release device command payload");
return -EINVAL;
}
@@ -201,8 +224,8 @@
rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
ctx, cmd);
} else {
- pr_err("%s: No release device in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -216,13 +239,12 @@
int rc;
if (!ctx->state_machine) {
- pr_err("%s: context is not ready\n'", __func__);
+ CAM_ERR(CAM_CORE, "context is not ready");
return -EINVAL;
}
if (!cmd) {
- pr_err("%s: Invalid config device command payload.\n",
- __func__);
+ CAM_ERR(CAM_CORE, "Invalid config device command payload");
return -EINVAL;
}
@@ -231,8 +253,8 @@
rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
ctx, cmd);
} else {
- pr_err("%s: No config device in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -246,13 +268,12 @@
int rc = 0;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
- pr_err("%s: Invalid start device command payload.\n",
- __func__);
+ CAM_ERR(CAM_CORE, "Invalid start device command payload");
return -EINVAL;
}
@@ -262,8 +283,8 @@
ctx, cmd);
else
/* start device can be optional for some driver */
- pr_debug("%s: No start device in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
mutex_unlock(&ctx->ctx_mutex);
@@ -276,13 +297,12 @@
int rc = 0;
if (!ctx->state_machine) {
- pr_err("%s: Context is not ready.\n'", __func__);
+ CAM_ERR(CAM_CORE, "Context is not ready");
return -EINVAL;
}
if (!cmd) {
- pr_err("%s: Invalid stop device command payload.\n",
- __func__);
+ CAM_ERR(CAM_CORE, "Invalid stop device command payload");
return -EINVAL;
}
@@ -292,8 +312,8 @@
ctx, cmd);
else
/* stop device can be optional for some driver */
- pr_warn("%s: No stop device in dev %d, state %d\n",
- __func__, ctx->dev_hdl, ctx->state);
+ CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
mutex_unlock(&ctx->ctx_mutex);
return rc;
@@ -309,7 +329,7 @@
/* crm_node_intf is optinal */
if (!ctx || !hw_mgr_intf || !req_list) {
- pr_err("%s: Invalid input parameters\n", __func__);
+ CAM_ERR(CAM_CORE, "Invalid input parameters");
return -EINVAL;
}
@@ -352,7 +372,7 @@
* so we just free the memory for the context
*/
if (ctx->state != CAM_CTX_AVAILABLE)
- pr_err("%s: Device did not shutdown cleanly.\n", __func__);
+ CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
memset(ctx, 0, sizeof(*ctx));
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 37a5c03..d87c984 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -13,6 +13,7 @@
#ifndef _CAM_CONTEXT_H_
#define _CAM_CONTEXT_H_
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "cam_req_mgr_interface.h"
#include "cam_hw_mgr_intf.h"
@@ -100,6 +101,7 @@
* @link: Link the context
* @unlink: Unlink the context
* @apply_req: Apply setting for the context
+ * @flush_req: Flush request to remove request ids
*
*/
struct cam_ctx_crm_ops {
@@ -111,6 +113,8 @@
struct cam_req_mgr_core_dev_link_setup *unlink);
int (*apply_req)(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
+ int (*flush_req)(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush);
};
@@ -182,7 +186,7 @@
};
/**
- * cam_context_handle_get_dev_info()
+ * cam_context_handle_crm_get_dev_info()
*
* @brief: Handle get device information command
*
@@ -190,11 +194,11 @@
* @info: Device information returned
*
*/
-int cam_context_handle_get_dev_info(struct cam_context *ctx,
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
struct cam_req_mgr_device_info *info);
/**
- * cam_context_handle_link()
+ * cam_context_handle_crm_link()
*
* @brief: Handle link command
*
@@ -202,11 +206,11 @@
* @link: Link command payload
*
*/
-int cam_context_handle_link(struct cam_context *ctx,
+int cam_context_handle_crm_link(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *link);
/**
- * cam_context_handle_unlink()
+ * cam_context_handle_crm_unlink()
*
* @brief: Handle unlink command
*
@@ -214,11 +218,11 @@
* @unlink: Unlink command payload
*
*/
-int cam_context_handle_unlink(struct cam_context *ctx,
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
struct cam_req_mgr_core_dev_link_setup *unlink);
/**
- * cam_context_handle_apply_req()
+ * cam_context_handle_crm_apply_req()
*
* @brief: Handle apply request command
*
@@ -226,9 +230,20 @@
* @apply: Apply request command payload
*
*/
-int cam_context_handle_apply_req(struct cam_context *ctx,
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply);
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief: Handle flush request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @apply: Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *apply);
/**
* cam_context_handle_acquire_dev()
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 2a0c4a7..1ee82b5 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CTXT-UTILS %s:%d " fmt, __func__, __LINE__
-
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
@@ -19,57 +17,63 @@
#include <media/cam_sync.h>
#include <media/cam_defs.h>
-#include "cam_sync_api.h"
-#include "cam_req_mgr_util.h"
+#include "cam_context.h"
#include "cam_mem_mgr.h"
#include "cam_node.h"
-#include "cam_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_sync_api.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state)
{
- int rc = 0;
- int i, j;
+ int j;
+ int result;
struct cam_ctx_request *req;
struct cam_hw_done_event_data *done =
(struct cam_hw_done_event_data *)done_event_data;
+ if (!ctx || !done) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
+ return -EINVAL;
+ }
+
if (list_empty(&ctx->active_req_list)) {
- pr_err("Buf done with no active request\n");
- rc = -EINVAL;
- goto end;
+ CAM_ERR(CAM_CTXT, "no active request");
+ return -EIO;
}
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
- for (i = 0; i < done->num_handles; i++) {
- for (j = 0; j < req->num_out_map_entries; j++) {
- if (done->resource_handle[i] ==
- req->out_map_entries[j].resource_handle)
- break;
- }
+ trace_cam_buf_done("UTILS", ctx, req);
- if (j == req->num_out_map_entries) {
- pr_err("Can not find matching lane handle 0x%x\n",
- done->resource_handle[i]);
- rc = -EINVAL;
- continue;
- }
+ if (done->request_id != req->request_id) {
+ CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
+ done->request_id, req->request_id);
+ return -EIO;
+ }
- cam_sync_signal(req->out_map_entries[j].sync_id,
- CAM_SYNC_STATE_SIGNALED_SUCCESS);
- req->num_out_acked++;
+ if (!req->num_out_map_entries) {
+ CAM_ERR(CAM_CTXT, "no output fence to signal");
+ return -EIO;
+ }
+
+ list_del_init(&req->list);
+ if (!bubble_state)
+ result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+ else
+ result = CAM_SYNC_STATE_SIGNALED_ERROR;
+
+ for (j = 0; j < req->num_out_map_entries; j++) {
+ cam_sync_signal(req->out_map_entries[j].sync_id, result);
req->out_map_entries[j].sync_id = -1;
}
- if (req->num_out_acked == req->num_out_map_entries) {
- list_del_init(&req->list);
- list_add_tail(&req->list, &ctx->free_req_list);
- }
+ list_add_tail(&req->list, &ctx->free_req_list);
-end:
- return rc;
+ return 0;
}
int cam_context_apply_req_to_hw(struct cam_context *ctx,
@@ -79,14 +83,20 @@
struct cam_ctx_request *req;
struct cam_hw_config_args cfg;
+ if (!ctx || !apply) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, apply);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
if (list_empty(&ctx->pending_req_list)) {
- pr_err("No available request for Apply id %lld\n",
+ CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
apply->request_id);
rc = -EFAULT;
goto end;
@@ -103,7 +113,7 @@
cfg.num_hw_update_entries = req->num_hw_update_entries;
cfg.out_map_entries = req->out_map_entries;
cfg.num_out_map_entries = req->num_out_map_entries;
- cfg.priv = (void *)&req->request_id;
+ cfg.priv = req->req_priv;
list_add_tail(&req->list, &ctx->active_req_list);
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -114,27 +124,62 @@
return rc;
}
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+{
+ struct cam_context *ctx = data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_req_mgr_apply_request apply;
+
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ return;
+ }
+
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->pending_req_list))
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ spin_unlock(&ctx->lock);
+
+ if (!req) {
+ CAM_ERR(CAM_CTXT, "No more request obj free");
+ return;
+ }
+
+ req->num_in_acked++;
+ if (req->num_in_acked == req->num_in_map_entries) {
+ apply.request_id = req->request_id;
+ cam_context_apply_req_to_hw(ctx, &apply);
+ }
+}
+
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
- int rc = 0;
int i;
struct cam_hw_release_args arg;
struct cam_ctx_request *req;
- if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
- rc = -EFAULT;
- goto end;
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ return -EINVAL;
}
- if (ctx->ctxt_to_hw_map) {
- arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
- ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
- &arg);
- ctx->ctxt_to_hw_map = NULL;
+ if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
+ return -EINVAL;
}
+ arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ if ((list_empty(&ctx->active_req_list)) &&
+ (list_empty(&ctx->pending_req_list)))
+ arg.active_req = false;
+ else
+ arg.active_req = true;
+
+ ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+ ctx->ctxt_to_hw_map = NULL;
+
ctx->session_hdl = 0;
ctx->dev_hdl = 0;
ctx->link_hdl = 0;
@@ -143,56 +188,35 @@
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_warn("signal fence in active list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++) {
- if (req->out_map_entries[i].sync_id != -1)
+ if (req->out_map_entries[i].sync_id > 0)
cam_sync_signal(req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
}
list_add_tail(&req->list, &ctx->free_req_list);
}
- /* flush the pending queue */
while (!list_empty(&ctx->pending_req_list)) {
req = list_first_entry(&ctx->pending_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in pending list. fence num %d\n",
+ for (i = 0; i < req->num_in_map_entries; i++)
+ if (req->in_map_entries[i].sync_id > 0)
+ cam_sync_deregister_callback(
+ cam_context_sync_callback, ctx,
+ req->in_map_entries[i].sync_id);
+ CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
+ if (req->out_map_entries[i].sync_id > 0)
cam_sync_signal(req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
list_add_tail(&req->list, &ctx->free_req_list);
}
-end:
- return rc;
-}
-
-void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
-{
- struct cam_context *ctx = data;
- struct cam_ctx_request *req = NULL;
- struct cam_req_mgr_apply_request apply;
-
- spin_lock(&ctx->lock);
- if (!list_empty(&ctx->pending_req_list))
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- spin_unlock(&ctx->lock);
-
- if (!req) {
- pr_err("No more request obj free\n");
- return;
- }
-
- req->num_in_acked++;
- if (req->num_in_acked == req->num_in_map_entries) {
- apply.request_id = req->request_id;
- cam_context_apply_req_to_hw(ctx, &apply);
- }
+ return 0;
}
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
@@ -206,8 +230,14 @@
size_t len = 0;
int32_t i = 0;
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
@@ -221,7 +251,7 @@
spin_unlock(&ctx->lock);
if (!req) {
- pr_err("No more request obj free\n");
+ CAM_ERR(CAM_CTXT, "No more request obj free");
rc = -ENOMEM;
goto end;
}
@@ -235,20 +265,12 @@
(uint64_t *) &packet_addr,
&len);
if (rc != 0) {
- pr_err("Can not get packet address\n");
+ CAM_ERR(CAM_CTXT, "Can not get packet address");
rc = -EINVAL;
goto free_req;
}
packet = (struct cam_packet *) (packet_addr + cmd->offset);
- pr_debug("pack_handle %llx\n", cmd->packet_handle);
- pr_debug("packet address is 0x%llx\n", packet_addr);
- pr_debug("packet with length %zu, offset 0x%llx\n",
- len, cmd->offset);
- pr_debug("Packet request id 0x%llx\n",
- packet->header.request_id);
- pr_debug("Packet size 0x%x\n", packet->header.size);
- pr_debug("packet op %d\n", packet->header.op_code);
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
@@ -265,7 +287,7 @@
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc != 0) {
- pr_err("Prepare config packet failed in HW layer\n");
+ CAM_ERR(CAM_CTXT, "Prepare config packet failed in HW layer");
rc = -EFAULT;
goto free_req;
}
@@ -285,7 +307,7 @@
cam_context_sync_callback,
(void *)ctx,
req->in_map_entries[i].sync_id);
- pr_debug("register in fence callback: %d ret = %d\n",
+ CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
req->in_map_entries[i].sync_id, rc);
}
goto end;
@@ -298,7 +320,6 @@
list_add_tail(&req->list, &ctx->free_req_list);
spin_unlock(&ctx->lock);
end:
- pr_debug("Config dev successful\n");
return rc;
}
@@ -310,26 +331,31 @@
struct cam_create_dev_hdl req_hdl_param;
struct cam_hw_release_args release;
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
- pr_debug("acquire cmd: session_hdl 0x%x, num_resources %d\n",
- cmd->session_handle, cmd->num_resources);
- pr_debug(" handle type %d, res %lld\n", cmd->handle_type,
+ CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+ cmd->session_handle, cmd->num_resources, cmd->handle_type,
cmd->resource_hdl);
if (cmd->num_resources > CAM_CTX_RES_MAX) {
- pr_err("Too much resources in the acquire\n");
+ CAM_ERR(CAM_CTXT, "resource limit exceeded");
rc = -ENOMEM;
goto end;
}
/* for now we only support user pointer */
if (cmd->handle_type != 1) {
- pr_err("Only user pointer is supported");
+ CAM_ERR(CAM_CTXT, "Only user pointer is supported");
rc = -EINVAL;
goto end;
}
@@ -340,15 +366,11 @@
param.num_acq = cmd->num_resources;
param.acquire_info = cmd->resource_hdl;
- pr_debug("ctx %pK: acquire hw resource: hw_intf: 0x%pK, priv 0x%pK",
- ctx, ctx->hw_mgr_intf, ctx->hw_mgr_intf->hw_mgr_priv);
- pr_debug("acquire_hw_func 0x%pK\n", ctx->hw_mgr_intf->hw_acquire);
-
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
¶m);
if (rc != 0) {
- pr_err("Acquire device failed\n");
+ CAM_ERR(CAM_CTXT, "Acquire device failed");
goto end;
}
@@ -361,11 +383,10 @@
req_hdl_param.media_entity_flag = 0;
req_hdl_param.priv = ctx;
- pr_debug("get device handle from bridge\n");
ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
if (ctx->dev_hdl <= 0) {
rc = -EFAULT;
- pr_err("Can not create device handle\n");
+ CAM_ERR(CAM_CTXT, "Can not create device handle");
goto free_hw;
}
cmd->dev_handle = ctx->dev_hdl;
@@ -373,7 +394,6 @@
/* store session information */
ctx->session_hdl = cmd->session_handle;
- pr_err("dev_handle = %x\n", cmd->dev_handle);
return rc;
free_hw:
@@ -390,31 +410,37 @@
int rc = 0;
struct cam_hw_start_args arg;
+ if (!ctx || !cmd) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
if ((cmd->session_handle != ctx->session_hdl) ||
(cmd->dev_handle != ctx->dev_hdl)) {
- pr_err("Invalid session hdl[%d], dev_handle[%d]\n",
+ CAM_ERR(CAM_CTXT, "Invalid session hdl[%d], dev_handle[%d]",
cmd->session_handle, cmd->dev_handle);
rc = -EPERM;
goto end;
}
if (ctx->hw_mgr_intf->hw_start) {
+ arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
&arg);
if (rc) {
/* HW failure. user need to clean up the resource */
- pr_err("Start HW failed\n");
+ CAM_ERR(CAM_CTXT, "Start HW failed");
goto end;
}
}
- pr_debug("start device success\n");
end:
return rc;
}
@@ -426,8 +452,14 @@
struct cam_hw_stop_args stop;
struct cam_ctx_request *req;
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input param");
+ rc = -EINVAL;
+ goto end;
+ }
+
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready\n");
+ CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
@@ -445,7 +477,7 @@
req = list_first_entry(&ctx->pending_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in pending list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
if (req->out_map_entries[i].sync_id != -1)
@@ -458,7 +490,7 @@
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
- pr_debug("signal fence in active list. fence num %d\n",
+ CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
req->num_out_map_entries);
for (i = 0; i < req->num_out_map_entries; i++)
if (req->out_map_entries[i].sync_id != -1)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
index 3498836..45d989f 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_core_defs.h
@@ -9,8 +9,9 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
-#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#ifndef _CAM_CORE_DEFS_H_
+#define _CAM_CORE_DEFS_H_
#define CAM_CORE_TRACE_ENABLE 0
@@ -40,5 +41,5 @@
__func__, __LINE__, ##args)
#endif
-#endif /* _CAM_REQ_MGR_CORE_DEFS_H_ */
+#endif /* _CAM_CORE_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index f72a1d7..aab75d5 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -51,7 +51,7 @@
* struct cam_hw_fence_map_entry - Entry for the resource to sync id map
*
* @resrouce_handle: Resource port id for the buffer
- * @sync_id: Synce id
+ * @sync_id: Sync id
*
*/
struct cam_hw_fence_map_entry {
@@ -65,12 +65,14 @@
* @num_handles: number of handles in the event
* @resrouce_handle: list of the resource handle
* @timestamp: time stamp
+ * @request_id: request identifier
*
*/
struct cam_hw_done_event_data {
uint32_t num_handles;
uint32_t resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
struct timeval timestamp;
+ uint64_t request_id;
};
/**
@@ -95,10 +97,12 @@
* struct cam_hw_release_args - Payload for release command
*
* @ctxt_to_hw_map: HW context from the acquire
+ * @active_req: Active request flag
*
*/
struct cam_hw_release_args {
void *ctxt_to_hw_map;
+ bool active_req;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 74a94b2..043f44d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -15,7 +15,8 @@
#include <linux/uaccess.h>
#include "cam_node.h"
-
+#include "cam_trace.h"
+#include "cam_debug_util.h"
static void __cam_node_handle_shutdown(struct cam_node *node)
{
if (node->hw_mgr_intf.hw_close)
@@ -29,7 +30,7 @@
int rc = -EFAULT;
if (!query) {
- pr_err("%s: Invalid params\n", __func__);
+ CAM_ERR(CAM_CORE, "Invalid params");
return -EINVAL;
}
@@ -64,7 +65,7 @@
rc = cam_context_handle_acquire_dev(ctx, acquire);
if (rc) {
- pr_err("%s: Acquire device failed\n", __func__);
+ CAM_ERR(CAM_CORE, "Acquire device failed");
goto free_ctx;
}
@@ -86,19 +87,19 @@
return -EINVAL;
if (start->dev_handle <= 0) {
- pr_err("Invalid device handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (start->session_handle <= 0) {
- pr_err("Invalid session handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, start->dev_handle);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ start->dev_handle);
return -EINVAL;
}
@@ -114,19 +115,19 @@
return -EINVAL;
if (stop->dev_handle <= 0) {
- pr_err("Invalid device handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (stop->session_handle <= 0) {
- pr_err("Invalid session handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, stop->dev_handle);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ stop->dev_handle);
return -EINVAL;
}
@@ -142,19 +143,19 @@
return -EINVAL;
if (config->dev_handle <= 0) {
- pr_err("Invalid device handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (config->session_handle <= 0) {
- pr_err("Invalid session handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, config->dev_handle);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ config->dev_handle);
return -EINVAL;
}
@@ -171,29 +172,29 @@
return -EINVAL;
if (release->dev_handle <= 0) {
- pr_err("Invalid device handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid device handle for context");
return -EINVAL;
}
if (release->session_handle <= 0) {
- pr_err("Invalid session handle for context\n");
+ CAM_ERR(CAM_CORE, "Invalid session handle for context");
return -EINVAL;
}
ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, release->dev_handle);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ release->dev_handle);
return -EINVAL;
}
rc = cam_context_handle_release_dev(ctx, release);
if (rc)
- pr_err("%s: context release failed\n", __func__);
+ CAM_ERR(CAM_CORE, "context release failed");
rc = cam_destroy_device_hdl(release->dev_handle);
if (rc)
- pr_err("%s: destroy device handle is failed\n", __func__);
+ CAM_ERR(CAM_CORE, "destroy device handle is failed");
mutex_lock(&node->list_mutex);
list_add_tail(&ctx->list, &node->free_ctx_list);
@@ -201,7 +202,7 @@
return rc;
}
-static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
{
struct cam_context *ctx = NULL;
@@ -210,14 +211,15 @@
ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, info->dev_hdl);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ info->dev_hdl);
return -EINVAL;
}
- return cam_context_handle_get_dev_info(ctx, info);
+ return cam_context_handle_crm_get_dev_info(ctx, info);
}
-static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
+static int __cam_node_crm_link_setup(
+ struct cam_req_mgr_core_dev_link_setup *setup)
{
int rc;
struct cam_context *ctx = NULL;
@@ -227,20 +229,20 @@
ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, setup->dev_hdl);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ setup->dev_hdl);
return -EINVAL;
}
if (setup->link_enable)
- rc = cam_context_handle_link(ctx, setup);
+ rc = cam_context_handle_crm_link(ctx, setup);
else
- rc = cam_context_handle_unlink(ctx, setup);
+ rc = cam_context_handle_crm_unlink(ctx, setup);
return rc;
}
-static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
{
struct cam_context *ctx = NULL;
@@ -249,12 +251,33 @@
ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
if (!ctx) {
- pr_err("%s: Can not get context for handle %d\n",
- __func__, apply->dev_hdl);
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ apply->dev_hdl);
return -EINVAL;
}
- return cam_context_handle_apply_req(ctx, apply);
+ trace_cam_apply_req("Node", apply);
+
+ return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+ struct cam_context *ctx = NULL;
+
+ if (!flush) {
+ CAM_ERR(CAM_CORE, "Invalid flush request payload");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+ if (!ctx) {
+ CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+ flush->dev_hdl);
+ return -EINVAL;
+ }
+
+ return cam_context_handle_crm_flush_req(ctx, flush);
}
int cam_node_deinit(struct cam_node *node)
@@ -262,7 +285,7 @@
if (node)
memset(node, 0, sizeof(*node));
- pr_debug("%s: deinit complete!\n", __func__);
+ CAM_DBG(CAM_CORE, "deinit complete");
return 0;
}
@@ -283,9 +306,10 @@
strlcpy(node->name, name, sizeof(node->name));
memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
- node->crm_node_intf.apply_req = __cam_node_apply_req;
- node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
- node->crm_node_intf.link_setup = __cam_node_link_setup;
+ node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+ node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+ node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+ node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
mutex_init(&node->list_mutex);
INIT_LIST_HEAD(&node->free_ctx_list);
@@ -293,8 +317,8 @@
node->ctx_size = ctx_size;
for (i = 0; i < ctx_size; i++) {
if (!ctx_list[i].state_machine) {
- pr_err("%s: camera context %d is not initialized!",
- __func__, i);
+ CAM_ERR(CAM_CORE,
+ "camera context %d is not initialized", i);
rc = -1;
goto err;
}
@@ -304,7 +328,7 @@
node->state = CAM_NODE_STATE_INIT;
err:
- pr_debug("%s: Exit. (rc = %d)\n", __func__, rc);
+ CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
return rc;
}
@@ -315,7 +339,7 @@
if (!cmd)
return -EINVAL;
- pr_debug("%s: handle cmd %d\n", __func__, cmd->op_code);
+ CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
switch (cmd->op_code) {
case CAM_QUERY_CAP: {
@@ -329,8 +353,8 @@
rc = __cam_node_handle_query_cap(node, &query);
if (rc) {
- pr_err("%s: querycap is failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+ rc);
break;
}
@@ -350,8 +374,8 @@
}
rc = __cam_node_handle_acquire_dev(node, &acquire);
if (rc) {
- pr_err("%s: acquire device failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+ rc);
break;
}
if (copy_to_user((void __user *)cmd->handle, &acquire,
@@ -368,8 +392,8 @@
else {
rc = __cam_node_handle_start_dev(node, &start);
if (rc)
- pr_err("%s: start device failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE,
+ "start device failed(rc = %d)", rc);
}
break;
}
@@ -382,8 +406,8 @@
else {
rc = __cam_node_handle_stop_dev(node, &stop);
if (rc)
- pr_err("%s: stop device failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE,
+ "stop device failed(rc = %d)", rc);
}
break;
}
@@ -396,8 +420,8 @@
else {
rc = __cam_node_handle_config_dev(node, &config);
if (rc)
- pr_err("%s: config device failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE,
+ "config device failed(rc = %d)", rc);
}
break;
}
@@ -410,8 +434,8 @@
else {
rc = __cam_node_handle_release_dev(node, &release);
if (rc)
- pr_err("%s: release device failed(rc = %d)\n",
- __func__, rc);
+ CAM_ERR(CAM_CORE,
+ "release device failed(rc = %d)", rc);
}
break;
}
@@ -419,7 +443,7 @@
__cam_node_handle_shutdown(node);
break;
default:
- pr_err("Unknown op code %d\n", cmd->op_code);
+ CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
rc = -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index a89981d..d690508 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -12,6 +12,7 @@
#include "cam_subdev.h"
#include "cam_node.h"
+#include "cam_debug_util.h"
/**
* cam_subdev_subscribe_event()
@@ -63,7 +64,7 @@
(struct cam_control *) arg);
break;
default:
- pr_err("Invalid command %d for %s!\n", cmd,
+ CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
node->name);
rc = -EINVAL;
}
@@ -80,7 +81,7 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -88,7 +89,8 @@
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CORE,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -129,16 +131,12 @@
int rc;
struct cam_node *node = NULL;
- if (!sd || !pdev || !name) {
- rc = -EINVAL;
- goto err;
- }
+ if (!sd || !pdev || !name)
+ return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!node)
+ return -ENOMEM;
/* Setup camera v4l2 subdevice */
sd->pdev = pdev;
@@ -151,8 +149,8 @@
rc = cam_register_subdev(sd);
if (rc) {
- pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
- __func__, sd->name);
+ CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+ sd->name);
goto err;
}
platform_set_drvdata(sd->pdev, sd);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 9a30d64..82035e9 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -21,22 +21,6 @@
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_soc.h"
-int cam_cpas_util_get_string_index(const char **strings,
- uint32_t num_strings, char *matching_string, uint32_t *index)
-{
- int i;
-
- for (i = 0; i < num_strings; i++) {
- if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
- CPAS_CDBG("matched %s : %d\n", matching_string, i);
- *index = i;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
{
@@ -62,7 +46,7 @@
value = reg_info->value;
}
- CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+ CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
reg_base, reg_info->offset, value);
cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
@@ -75,13 +59,13 @@
struct cam_cpas_bus_client *bus_client, unsigned int level)
{
if (!bus_client->valid || (bus_client->dyn_vote == true)) {
- pr_err("Invalid params %d %d\n", bus_client->valid,
+ CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
bus_client->dyn_vote);
return -EINVAL;
}
if (level >= bus_client->num_usecases) {
- pr_err("Invalid vote level=%d, usecases=%d\n", level,
+ CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
bus_client->num_usecases);
return -EINVAL;
}
@@ -89,7 +73,8 @@
if (level == bus_client->curr_vote_level)
return 0;
- CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+ CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
+ level);
msm_bus_scale_client_update_request(bus_client->client_id, level);
bus_client->curr_vote_level = level;
@@ -104,14 +89,14 @@
int idx = 0;
if (!bus_client->valid) {
- pr_err("bus client not valid\n");
+ CAM_ERR(CAM_CPAS, "bus client not valid");
return -EINVAL;
}
if ((bus_client->num_usecases != 2) ||
(bus_client->num_paths != 1) ||
(bus_client->dyn_vote != true)) {
- pr_err("dynamic update not allowed %d %d %d\n",
+ CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
bus_client->num_usecases, bus_client->num_paths,
bus_client->dyn_vote);
return -EINVAL;
@@ -120,7 +105,7 @@
mutex_lock(&bus_client->lock);
if (bus_client->curr_vote_level > 1) {
- pr_err("curr_vote_level %d cannot be greater than 1\n",
+ CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
bus_client->curr_vote_level);
mutex_unlock(&bus_client->lock);
return -EINVAL;
@@ -136,7 +121,7 @@
path->vectors[0].ab = ab;
path->vectors[0].ib = ib;
- CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+ CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
bus_client->client_id, ab, ib, idx);
msm_bus_scale_client_update_request(bus_client->client_id, idx);
@@ -154,20 +139,20 @@
pdata = msm_bus_pdata_from_node(soc_info->pdev,
dev_node);
if (!pdata) {
- pr_err("failed get_pdata\n");
+ CAM_ERR(CAM_CPAS, "failed get_pdata");
return -EINVAL;
}
if ((pdata->num_usecases == 0) ||
(pdata->usecase[0].num_paths == 0)) {
- pr_err("usecase=%d\n", pdata->num_usecases);
+ CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
rc = -EINVAL;
goto error;
}
client_id = msm_bus_scale_register_client(pdata);
if (!client_id) {
- pr_err("failed in register ahb bus client\n");
+ CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
rc = -EINVAL;
goto error;
}
@@ -176,7 +161,8 @@
"qcom,msm-bus-vector-dyn-vote");
if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
- pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+ CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+ pdata->num_usecases);
rc = -EINVAL;
goto fail_unregister_client;
}
@@ -193,7 +179,7 @@
bus_client->valid = true;
mutex_init(&bus_client->lock);
- CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+ CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
bus_client->src, bus_client->dst, bus_client->client_id);
return 0;
@@ -268,7 +254,7 @@
axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
"qcom,axi-port-list");
if (!axi_port_list_node) {
- pr_err("Node qcom,axi-port-list not found.\n");
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-list not found.");
return -EINVAL;
}
@@ -286,14 +272,15 @@
"qcom,axi-port-name", 0,
(const char **)&axi_port->axi_port_name);
if (rc) {
- pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS,
+ "failed to read qcom,axi-port-name rc=%d", rc);
goto port_name_fail;
}
axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
"qcom,axi-port-mnoc");
if (!axi_port_mnoc_node) {
- pr_err("Node qcom,axi-port-mnoc not found.\n");
+ CAM_ERR(CAM_CPAS, "Node qcom,axi-port-mnoc not found.");
rc = -EINVAL;
goto mnoc_node_get_fail;
}
@@ -308,7 +295,8 @@
axi_port_camnoc_node = of_find_node_by_name(
axi_port_node, "qcom,axi-port-camnoc");
if (!axi_port_camnoc_node) {
- pr_err("Node qcom,axi-port-camnoc not found\n");
+ CAM_ERR(CAM_CPAS,
+ "Node qcom,axi-port-camnoc not found");
rc = -EINVAL;
goto camnoc_node_get_fail;
}
@@ -358,7 +346,8 @@
rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
if (rc) {
- pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+ CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+ enable, rc);
return rc;
}
@@ -375,7 +364,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
mnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, rc=%d",
enable, rc);
goto remove_ahb_vote;
}
@@ -384,7 +374,8 @@
cam_cpas_util_vote_bus_client_bw(
&curr_port->camnoc_bus, camnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote, enable=%d, %d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote, enable=%d, %d",
enable, rc);
cam_cpas_util_vote_bus_client_bw(
&curr_port->mnoc_bus, 0, 0);
@@ -446,7 +437,8 @@
int rc = 0;
if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
- pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
reg_base, reg_base_index, soc_info->num_reg_map);
return -EINVAL;
}
@@ -457,7 +449,7 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started%d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
rc = -EPERM;
goto unlock_client;
}
@@ -489,7 +481,8 @@
return -EINVAL;
if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
- pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
reg_base, reg_base_index, soc_info->num_reg_map);
return -EINVAL;
}
@@ -500,7 +493,7 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started%d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
rc = -EPERM;
goto unlock_client;
}
@@ -531,7 +524,7 @@
int rc = 0;
if (!axi_port) {
- pr_err("axi port does not exists\n");
+ CAM_ERR(CAM_CPAS, "axi port does not exists");
return -EINVAL;
}
@@ -563,7 +556,8 @@
if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
mnoc_bw = camnoc_bw;
- CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]",
axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
camnoc_bw, mnoc_bw);
@@ -571,7 +565,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
mnoc_bw, 0);
if (rc) {
- pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
mnoc_bw, mnoc_bw, rc);
goto unlock_axi_port;
}
@@ -580,7 +575,8 @@
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
camnoc_bw, 0);
if (rc) {
- pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
camnoc_bw, camnoc_bw, rc);
goto unlock_axi_port;
}
@@ -600,7 +596,8 @@
if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
(axi_vote->uncompressed_bw == 0))) {
- pr_err("Invalid vote, client_handle=%d\n", client_handle);
+ CAM_ERR(CAM_CPAS, "Invalid vote, client_handle=%d",
+ client_handle);
return -EINVAL;
}
@@ -610,12 +607,13 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
rc = -EPERM;
goto unlock_client;
}
- CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "Client[%d] Requested compressed[%llu], uncompressed[%llu]",
client_indx, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
@@ -640,13 +638,14 @@
int i;
if (!dev || !req_level) {
- pr_err("Invalid params %pK, %pK\n", dev, req_level);
+ CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
return -EINVAL;
}
opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
if (IS_ERR(opp)) {
- pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+ CAM_ERR(CAM_CPAS, "Error on OPP freq :%ld, %pK",
+ corner_freq, opp);
return -EINVAL;
}
@@ -656,7 +655,8 @@
if (corner == soc_private->vdd_ahb[i].vdd_corner)
level = soc_private->vdd_ahb[i].ahb_level;
- CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
freq, corner_freq, corner, level);
*req_level = level;
@@ -665,7 +665,8 @@
}
static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
- struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
+ struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+ enum cam_vote_level *applied_level)
{
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
@@ -674,7 +675,7 @@
int i, rc = 0;
if (!ahb_bus_client->valid) {
- pr_err("AHB Bus client not valid\n");
+ CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
return -EINVAL;
}
@@ -693,7 +694,7 @@
mutex_lock(&ahb_bus_client->lock);
cpas_client->ahb_level = required_level;
- CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+ CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
required_level, ahb_bus_client->curr_vote_level);
if (required_level == ahb_bus_client->curr_vote_level)
@@ -706,13 +707,26 @@
highest_level = cpas_core->cpas_client[i]->ahb_level;
}
- CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+ CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
highest_level);
- if (rc)
- pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+ if (rc) {
+ CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
highest_level, rc);
+ goto unlock_bus_client;
+ }
+
+ rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+ if (rc) {
+ CAM_ERR(CAM_CPAS,
+ "Failed in scaling clock rate level %d for AHB",
+ highest_level);
+ goto unlock_bus_client;
+ }
+
+ if (applied_level)
+ *applied_level = highest_level;
unlock_bus_client:
mutex_unlock(&ahb_bus_client->lock);
@@ -727,7 +741,7 @@
int rc = 0;
if (!ahb_vote || (ahb_vote->vote.level == 0)) {
- pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+ CAM_ERR(CAM_CPAS, "Invalid AHB vote, %pK", ahb_vote);
return -EINVAL;
}
@@ -737,18 +751,19 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("client has not started %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
rc = -EPERM;
goto unlock_client;
}
- CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+ CAM_DBG(CAM_CPAS,
+ "client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
client_indx, ahb_vote->type, ahb_vote->vote.level,
ahb_vote->vote.freq,
cpas_core->cpas_client[client_indx]->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
- cpas_core->cpas_client[client_indx], ahb_vote);
+ cpas_core->cpas_client[client_indx], ahb_vote, NULL);
unlock_client:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -765,15 +780,17 @@
struct cam_cpas_client *cpas_client;
struct cam_ahb_vote *ahb_vote;
struct cam_axi_vote *axi_vote;
+ enum cam_vote_level applied_level = CAM_SVS_VOTE;
int rc;
if (!hw_priv || !start_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, start_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_cmd_start), arg_size);
return -EINVAL;
}
@@ -790,7 +807,7 @@
if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
(axi_vote->uncompressed_bw == 0))) {
- pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+ CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d], axi[%llu], [%llu]",
ahb_vote->vote.level, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
return -EINVAL;
@@ -803,28 +820,29 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("client is not registered %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is in start state\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
rc = -EPERM;
goto done;
}
cpas_client = cpas_core->cpas_client[client_indx];
- CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+ CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
client_indx, ahb_vote->type, ahb_vote->vote.level,
cpas_client->ahb_level);
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
- ahb_vote);
+ ahb_vote, &applied_level);
if (rc)
goto done;
- CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+ CAM_DBG(CAM_CPAS,
+ "AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
client_indx, axi_vote->compressed_bw,
axi_vote->uncompressed_bw);
rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
@@ -833,9 +851,10 @@
goto done;
if (cpas_core->streamon_clients == 0) {
- rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+ applied_level);
if (rc) {
- pr_err("enable_resorce failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
goto done;
}
@@ -844,7 +863,8 @@
if (rc) {
cam_cpas_soc_disable_resources(
&cpas_hw->soc_info);
- pr_err("failed in power_on settings rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "failed in power_on settings rc=%d",
rc);
goto done;
}
@@ -855,7 +875,7 @@
cpas_client->started = true;
cpas_core->streamon_clients++;
- CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
client_indx, cpas_core->streamon_clients);
done:
mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -877,12 +897,13 @@
int rc = 0;
if (!hw_priv || !stop_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, stop_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
return -EINVAL;
}
@@ -898,11 +919,11 @@
mutex_lock(&cpas_hw->hw_mutex);
mutex_lock(&cpas_core->client_mutex[client_indx]);
- CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
client_indx, cpas_core->streamon_clients);
if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is not started\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
rc = -EPERM;
goto done;
}
@@ -915,7 +936,8 @@
if (cpas_core->internal_ops.power_off) {
rc = cpas_core->internal_ops.power_off(cpas_hw);
if (rc) {
- pr_err("failed in power_off settings rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "failed in power_off settings rc=%d",
rc);
/* Do not return error, passthrough */
}
@@ -923,7 +945,7 @@
rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
if (rc) {
- pr_err("disable_resorce failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
goto done;
}
cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -932,7 +954,7 @@
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SUSPEND_VOTE;
rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
- &ahb_vote);
+ &ahb_vote, NULL);
if (rc)
goto done;
@@ -955,12 +977,13 @@
int rc = 0;
if (!hw_priv || !init_hw_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, init_hw_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- pr_err("INIT HW size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "INIT HW size mismatch %ld %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
@@ -987,7 +1010,7 @@
struct cam_cpas_private_soc *soc_private =
(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
- CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+ CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
register_params->identifier, register_params->cell_index);
if (soc_private->client_id_based)
@@ -1000,11 +1023,11 @@
mutex_lock(&cpas_hw->hw_mutex);
- rc = cam_cpas_util_get_string_index(soc_private->client_name,
+ rc = cam_common_util_get_string_index(soc_private->client_name,
soc_private->num_clients, client_name, &client_indx);
if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("Invalid Client register : %s %d, %d\n",
+ CAM_ERR(CAM_CPAS, "Invalid Client register : %s %d, %d",
register_params->identifier,
register_params->cell_index, client_indx);
mutex_unlock(&cpas_hw->hw_mutex);
@@ -1020,7 +1043,8 @@
rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
cpas_client, client_indx);
if (rc) {
- pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+ CAM_ERR(CAM_CPAS,
+ "axi_port_insert failed client_indx=%d, rc=%d",
client_indx, rc);
kfree(cpas_client);
mutex_unlock(&cpas_hw->hw_mutex);
@@ -1036,7 +1060,7 @@
mutex_unlock(&cpas_hw->hw_mutex);
- CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
client_indx, cpas_core->registered_clients);
return 0;
@@ -1056,13 +1080,13 @@
mutex_lock(&cpas_core->client_mutex[client_indx]);
if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
- pr_err("client not registered %d\n", client_indx);
+ CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
rc = -EPERM;
goto done;
}
if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
- pr_err("Client %d is not stopped\n", client_indx);
+ CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
rc = -EPERM;
goto done;
}
@@ -1070,7 +1094,7 @@
cam_cpas_util_remove_client_from_axi_port(
cpas_core->cpas_client[client_indx]);
- CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+ CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
client_indx, cpas_core->registered_clients);
kfree(cpas_core->cpas_client[client_indx]);
@@ -1090,12 +1114,13 @@
struct cam_cpas_hw_caps *hw_caps;
if (!hw_priv || !get_hw_cap_args) {
- pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+ hw_priv, get_hw_cap_args);
return -EINVAL;
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- pr_err("HW_CAPS size mismatch %ld %d\n",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
@@ -1117,8 +1142,8 @@
if (!hw_priv || !cmd_args ||
(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
- pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
- cmd_type);
+ CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+ hw_priv, cmd_args, cmd_type);
return -EINVAL;
}
@@ -1127,7 +1152,7 @@
struct cam_cpas_register_params *register_params;
if (sizeof(struct cam_cpas_register_params) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1140,7 +1165,7 @@
uint32_t *client_handle;
if (sizeof(uint32_t) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1154,7 +1179,7 @@
if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1171,7 +1196,7 @@
if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1188,7 +1213,7 @@
struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1202,7 +1227,7 @@
struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
- pr_err("cmd_type %d, size mismatch %d\n",
+ CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
cmd_type, arg_size);
break;
}
@@ -1213,7 +1238,7 @@
break;
}
default:
- pr_err("CPAS HW command not valid =%d\n", cmd_type);
+ CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
break;
}
@@ -1259,7 +1284,7 @@
rc = of_property_read_string_index(of_node, "arch-compat", 0,
(const char **)&compat_str);
if (rc) {
- pr_err("failed to get arch-compat rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
return -EINVAL;
}
@@ -1270,7 +1295,7 @@
hw_intf->hw_type = CAM_HW_CPASTOP;
rc = cam_cpastop_get_internal_ops(internal_ops);
} else {
- pr_err("arch-compat %s not supported\n", compat_str);
+ CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
rc = -EINVAL;
}
@@ -1360,7 +1385,7 @@
rc = cam_cpas_util_client_setup(cpas_hw);
if (rc) {
- pr_err("failed in client setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
goto deinit_platform_res;
}
@@ -1368,13 +1393,13 @@
cpas_hw->soc_info.pdev->dev.of_node,
&cpas_core->ahb_bus_client);
if (rc) {
- pr_err("failed in ahb setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
goto client_cleanup;
}
rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
if (rc) {
- pr_err("failed in axi setup, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
goto ahb_cleanup;
}
@@ -1383,20 +1408,20 @@
if (rc)
goto axi_cleanup;
- rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info);
+ rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
if (rc) {
- pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
goto remove_default_vote;
}
if (internal_ops->get_hw_info) {
rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
if (rc) {
- pr_err("failed in get_hw_info, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
goto disable_soc_res;
}
} else {
- pr_err("Invalid get_hw_info\n");
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
goto disable_soc_res;
}
@@ -1407,7 +1432,7 @@
rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
if (rc) {
- pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
goto remove_default_vote;
}
@@ -1438,7 +1463,7 @@
kfree(cpas_core);
kfree(cpas_hw);
kfree(cpas_hw_intf);
- pr_err("failed in hw probe\n");
+ CAM_ERR(CAM_CPAS, "failed in hw probe");
return rc;
}
@@ -1448,7 +1473,7 @@
struct cam_cpas *cpas_core;
if (!cpas_hw_intf) {
- pr_err("cpas interface not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas interface not initialized");
return -EINVAL;
}
@@ -1456,7 +1481,7 @@
cpas_core = (struct cam_cpas *)cpas_hw->core_info;
if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
- pr_err("cpas hw is in power up state\n");
+ CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 6d4fafe..bbc99b7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -15,6 +15,7 @@
#include "cam_cpas_api.h"
#include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
#define CPAS_MAX_CLIENTS 20
#define CAM_CPAS_INFLIGHT_WORKS 5
@@ -192,7 +193,5 @@
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
-int cam_cpas_util_get_string_index(const char **strings,
- uint32_t num_strings, char *matching_string, uint32_t *index);
#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index 9ee5a43..fa4018e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -18,16 +18,7 @@
#include "cam_cpas_api.h"
#include "cam_hw.h"
#include "cam_hw_intf.h"
-
-#ifdef CONFIG_CAM_CPAS_DBG
-#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-#undef pr_fmt
-#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
-
-#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+#include "cam_debug_util.h"
/* Number of times to retry while polling */
#define CAM_CPAS_POLL_RETRY_CNT 5
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index aba0caa..3846784 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -55,12 +55,12 @@
struct cam_hw_version *cpas_version)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!camera_family || !camera_version || !cpas_version) {
- pr_err("invalid input %pK %pK %pK\n", camera_family,
+ CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK", camera_family,
camera_version, cpas_version);
return -EINVAL;
}
@@ -80,7 +80,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -98,9 +98,9 @@
CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -115,12 +115,12 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
if (!value) {
- pr_err("Invalid arg value\n");
+ CAM_ERR(CAM_CPAS, "Invalid arg value");
return -EINVAL;
}
@@ -138,13 +138,13 @@
CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
sizeof(struct cam_cpas_hw_cmd_reg_read_write));
if (rc) {
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
return rc;
}
*value = cmd_reg_read.value;
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -158,7 +158,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -173,9 +173,9 @@
CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
sizeof(struct cam_cpas_hw_cmd_axi_vote));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -189,7 +189,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -204,9 +204,9 @@
CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
sizeof(struct cam_cpas_hw_cmd_ahb_vote));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -219,7 +219,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -232,9 +232,9 @@
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
sizeof(struct cam_cpas_hw_cmd_stop));
if (rc)
- pr_err("Failed in stop, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
} else {
- pr_err("Invalid stop ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid stop ops");
rc = -EINVAL;
}
@@ -248,7 +248,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -263,9 +263,9 @@
g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
sizeof(struct cam_cpas_hw_cmd_start));
if (rc)
- pr_err("Failed in start, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
} else {
- pr_err("Invalid start ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid start ops");
rc = -EINVAL;
}
@@ -278,7 +278,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -288,9 +288,9 @@
CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
&client_handle, sizeof(uint32_t));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -304,7 +304,7 @@
int rc;
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
@@ -314,9 +314,9 @@
CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
sizeof(struct cam_cpas_register_params));
if (rc)
- pr_err("Failed in process_cmd, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
} else {
- pr_err("Invalid process_cmd ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
rc = -EINVAL;
}
@@ -330,7 +330,7 @@
int rc = 0;
if (!cmd) {
- pr_err("Invalid input cmd\n");
+ CAM_ERR(CAM_CPAS, "Invalid input cmd");
return -EINVAL;
}
@@ -341,7 +341,8 @@
rc = copy_from_user(&query, (void __user *) cmd->handle,
sizeof(query));
if (rc) {
- pr_err("Failed in copy from user, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+ rc);
break;
}
@@ -353,14 +354,14 @@
rc = copy_to_user((void __user *) cmd->handle, &query,
sizeof(query));
if (rc)
- pr_err("Failed in copy to user, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
break;
}
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+ CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
rc = -EINVAL;
break;
}
@@ -374,13 +375,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt++;
- CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+ CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
@@ -392,13 +393,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
mutex_lock(&cpas_intf->intf_lock);
cpas_intf->open_cnt--;
- CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+ CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
mutex_unlock(&cpas_intf->intf_lock);
return 0;
@@ -411,7 +412,7 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
@@ -420,7 +421,7 @@
rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
break;
default:
- pr_err("Invalid command %d for CPAS!\n", cmd);
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
@@ -437,13 +438,13 @@
struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
if (!cpas_intf || !cpas_intf->probe_done) {
- pr_err("CPAS not initialized\n");
+ CAM_ERR(CAM_CPAS, "CPAS not initialized");
return -ENODEV;
}
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -453,7 +454,7 @@
rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
break;
default:
- pr_err("Invalid command %d for CPAS!\n", cmd);
+ CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
rc = -EINVAL;
break;
}
@@ -461,7 +462,8 @@
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CPAS,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -508,7 +510,8 @@
rc = cam_register_subdev(subdev);
if (rc) {
- pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+ CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+ CAM_CPAS_DEV_NAME);
return rc;
}
@@ -523,7 +526,7 @@
int rc;
if (g_cpas_intf) {
- pr_err("cpas dev proble already done\n");
+ CAM_ERR(CAM_CPAS, "cpas dev proble already done");
return -EALREADY;
}
@@ -536,7 +539,7 @@
rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
if (rc || (g_cpas_intf->hw_intf == NULL)) {
- pr_err("Failed in hw probe, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
goto error_destroy_mem;
}
@@ -546,11 +549,11 @@
rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
hw_caps, sizeof(struct cam_cpas_hw_caps));
if (rc) {
- pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
goto error_hw_remove;
}
} else {
- pr_err("Invalid get_hw_caps ops\n");
+ CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
goto error_hw_remove;
}
@@ -559,7 +562,8 @@
goto error_hw_remove;
g_cpas_intf->probe_done = true;
- CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+ CAM_DBG(CAM_CPAS,
+ "CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -573,14 +577,14 @@
mutex_destroy(&g_cpas_intf->intf_lock);
kfree(g_cpas_intf);
g_cpas_intf = NULL;
- pr_err("CPAS probe failed\n");
+ CAM_ERR(CAM_CPAS, "CPAS probe failed");
return rc;
}
static int cam_cpas_dev_remove(struct platform_device *dev)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
- pr_err("cpas intf not initialized\n");
+ CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0c71ece..f85f461 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,26 +22,6 @@
#include "cam_cpas_hw.h"
#include "cam_cpas_soc.h"
-static int cam_cpas_get_vote_level_from_string(const char *string,
- enum cam_vote_level *vote_level)
-{
- if (!vote_level || !string)
- return -EINVAL;
-
- if (strnstr("suspend", string, strlen(string)))
- *vote_level = CAM_SUSPEND_VOTE;
- else if (strnstr("svs", string, strlen(string)))
- *vote_level = CAM_SVS_VOTE;
- else if (strnstr("nominal", string, strlen(string)))
- *vote_level = CAM_NOMINAL_VOTE;
- else if (strnstr("turbo", string, strlen(string)))
- *vote_level = CAM_TURBO_VOTE;
- else
- *vote_level = CAM_SVS_VOTE;
-
- return 0;
-}
-
int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
struct cam_cpas_private_soc *soc_private)
{
@@ -49,7 +29,8 @@
int count = 0, i = 0, rc = 0;
if (!soc_private || !pdev) {
- pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+ CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+ soc_private, pdev);
return -EINVAL;
}
@@ -58,7 +39,8 @@
rc = of_property_read_string_index(of_node, "arch-compat", 0,
(const char **)&soc_private->arch_compat);
if (rc) {
- pr_err("device %s failed to read arch-compat\n", pdev->name);
+ CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+ pdev->name);
return rc;
}
@@ -67,12 +49,13 @@
count = of_property_count_strings(of_node, "client-names");
if (count <= 0) {
- pr_err("no client-names found\n");
+ CAM_ERR(CAM_CPAS, "no client-names found");
count = 0;
return -EINVAL;
}
soc_private->num_clients = count;
- CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "arch-compat=%s, client_id_based = %d, num_clients=%d",
soc_private->arch_compat, soc_private->client_id_based,
soc_private->num_clients);
@@ -80,15 +63,16 @@
rc = of_property_read_string_index(of_node,
"client-names", i, &soc_private->client_name[i]);
if (rc) {
- pr_err("no client-name at cnt=%d\n", i);
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
- CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+ CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+ soc_private->client_name[i]);
}
count = of_property_count_strings(of_node, "client-axi-port-names");
if ((count <= 0) || (count != soc_private->num_clients)) {
- pr_err("incorrect client-axi-port-names info %d %d\n",
+ CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
count, soc_private->num_clients);
count = 0;
return -EINVAL;
@@ -99,10 +83,10 @@
"client-axi-port-names", i,
&soc_private->client_axi_port_name[i]);
if (rc) {
- pr_err("no client-name at cnt=%d\n", i);
+ CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
return -ENODEV;
}
- CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+ CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
soc_private->client_axi_port_name[i]);
}
@@ -119,25 +103,29 @@
rc = of_property_read_u32_index(of_node, "vdd-corners",
i, &soc_private->vdd_ahb[i].vdd_corner);
if (rc) {
- pr_err("vdd-corners failed at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "vdd-corners failed at index=%d", i);
return -ENODEV;
}
rc = of_property_read_string_index(of_node,
"vdd-corner-ahb-mapping", i, &ahb_string);
if (rc) {
- pr_err("no ahb-mapping at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "no ahb-mapping at index=%d", i);
return -ENODEV;
}
- rc = cam_cpas_get_vote_level_from_string(ahb_string,
+ rc = cam_soc_util_get_level_from_string(ahb_string,
&soc_private->vdd_ahb[i].ahb_level);
if (rc) {
- pr_err("invalid ahb-string at index=%d\n", i);
+ CAM_ERR(CAM_CPAS,
+ "invalid ahb-string at index=%d", i);
return -EINVAL;
}
- CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+ CAM_DBG(CAM_CPAS,
+ "Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
soc_private->vdd_ahb[i].vdd_corner,
ahb_string, soc_private->vdd_ahb[i].ahb_level);
}
@@ -155,19 +143,20 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
- pr_err("failed in get_dt_properties, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
return rc;
}
if (soc_info->irq_line && !irq_handler) {
- pr_err("Invalid IRQ handler\n");
+ CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
return -EINVAL;
}
rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
irq_data);
if (rc) {
- pr_err("failed in request_platform_resource, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+ rc);
return rc;
}
@@ -180,7 +169,7 @@
rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
if (rc) {
- pr_err("failed in get_custom_info, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
goto free_soc_private;
}
@@ -199,7 +188,7 @@
rc = cam_soc_util_release_platform_resource(soc_info);
if (rc)
- pr_err("release platform failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
kfree(soc_info->soc_private);
soc_info->soc_private = NULL;
@@ -207,13 +196,15 @@
return rc;
}
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level)
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ default_level, true);
if (rc)
- pr_err("enable platform resource failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
return rc;
}
@@ -224,7 +215,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("disable platform failed, rc=%d\n", rc);
+ CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index d3dfbbd..b2ad513 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -59,6 +59,7 @@
int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t vfe_irq_handler, void *irq_data);
int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
-int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level default_level);
int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info);
#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index 95e26c5..0669070 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -29,13 +29,13 @@
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
- BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->camera_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->camera_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
- CPAS_CDBG("Family %d, version %d.%d.%d\n",
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr);
@@ -49,21 +49,22 @@
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
- pr_err("invalid num_reg_map=%d\n", num_reg_map);
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
- pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camss", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMSS] = index;
} else {
- pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+ CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
return -EINVAL;
}
@@ -73,7 +74,7 @@
int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
- pr_err("invalid NULL param\n");
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index b901410..d9133b9 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -20,6 +20,7 @@
#include "cam_io_util.h"
#include "cam_cpas_soc.h"
#include "cpastop100.h"
+#include "cpastop_v170_110.h"
struct cam_camnoc_info *camnoc_info;
@@ -38,24 +39,24 @@
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
hw_caps->camera_version.major =
- BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
hw_caps->camera_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
hw_caps->camera_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
hw_caps->cpas_version.major =
- BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
hw_caps->cpas_version.minor =
- BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
hw_caps->cpas_version.incr =
- BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
hw_caps->camera_capability = reg_value;
- CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+ CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
hw_caps->camera_family, hw_caps->camera_version.major,
hw_caps->camera_version.minor, hw_caps->camera_version.incr,
hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -71,31 +72,32 @@
int rc;
if (num_reg_map > CAM_CPAS_REG_MAX) {
- pr_err("invalid num_reg_map=%d\n", num_reg_map);
+ CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
return -EINVAL;
}
if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
- pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+ CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+ soc_info->num_mem_block);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_cpas_top", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CPASTOP] = index;
} else {
- pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+ CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
- rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+ rc = cam_common_util_get_string_index(soc_info->mem_block_name,
soc_info->num_mem_block, "cam_camnoc", &index);
if ((rc == 0) && (index < num_reg_map)) {
regbase_index[CAM_CPAS_REG_CAMNOC] = index;
} else {
- pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+ CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
rc, index, num_reg_map);
return -EINVAL;
}
@@ -124,7 +126,8 @@
reg_value[3] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 3]);
- pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS,
+ "offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1], reg_value[2], reg_value[3]);
}
@@ -139,7 +142,7 @@
reg_value[2] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 2]);
- pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1], reg_value[2]);
i = i + 3;
@@ -152,7 +155,7 @@
reg_value[1] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i + 1]);
- pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
camnoc_info->error_logger[i], reg_value[0],
reg_value[1]);
i = i + 2;
@@ -162,7 +165,7 @@
reg_value[0] = cam_io_r_mb(
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->error_logger[i]);
- pr_err("offset[0x%x] values [0x%x]\n",
+ CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
camnoc_info->error_logger[i], reg_value[0]);
}
@@ -178,7 +181,8 @@
reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
- pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+ CAM_ERR(CAM_CPAS,
+ "Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
i, camnoc_info->irq_err[i].err_status.offset, reg_value);
return reg_value;
@@ -186,7 +190,7 @@
static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
{
- pr_err("ahb timout error\n");
+ CAM_ERR(CAM_CPAS, "ahb timout error");
return 0;
}
@@ -229,7 +233,8 @@
int i;
struct cam_cpas_client *cpas_client;
- CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+ CAM_DBG(CAM_CPAS,
+ "Notify CB : num_clients=%d, registered=%d, started=%d",
cpas_core->num_clients, cpas_core->registered_clients,
cpas_core->streamon_clients);
@@ -237,7 +242,8 @@
if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
cpas_client = cpas_core->cpas_client[i];
if (cpas_client->data.cam_cpas_client_cb) {
- CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+ CAM_DBG(CAM_CPAS,
+ "Calling client CB %d : %d 0x%x",
i, irq_type, irq_data);
cpas_client->data.cam_cpas_client_cb(
cpas_client->data.client_handle,
@@ -261,7 +267,7 @@
payload = container_of(work, struct cam_cpas_work_payload, work);
if (!payload) {
- pr_err("NULL payload");
+ CAM_ERR(CAM_CPAS, "NULL payload");
return;
}
@@ -273,7 +279,7 @@
if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
- pr_err("Error occurred, type=%d\n", irq_type);
+ CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
irq_data = 0;
switch (irq_type) {
@@ -293,10 +299,10 @@
cpas_hw);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
- CPAS_CDBG("TEST IRQ\n");
+ CAM_DBG(CAM_CPAS, "TEST IRQ");
break;
default:
- pr_err("Invalid IRQ type\n");
+ CAM_ERR(CAM_CPAS, "Invalid IRQ type");
break;
}
@@ -309,7 +315,7 @@
}
if (payload->irq_status)
- pr_err("IRQ not handled irq_status=0x%x\n",
+ CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
payload->irq_status);
kfree(payload);
@@ -331,7 +337,7 @@
soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_sbm->sbm_status.offset);
- CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+ CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
payload->hw = cpas_hw;
INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
@@ -396,7 +402,8 @@
CAM_CPAS_POLL_RETRY_CNT,
CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
if (rc) {
- pr_err("camnoc flush slave pending trans failed\n");
+ CAM_ERR(CAM_CPAS,
+ "camnoc flush slave pending trans failed");
/* Do not return error, passthrough */
}
}
@@ -414,15 +421,19 @@
(hw_caps->cpas_version.minor == 0) &&
(hw_caps->cpas_version.incr == 0)) {
camnoc_info = &cam170_cpas100_camnoc_info;
+ } else if ((hw_caps->cpas_version.major == 1) &&
+ (hw_caps->cpas_version.minor == 1) &&
+ (hw_caps->cpas_version.incr == 0)) {
+ camnoc_info = &cam170_cpas110_camnoc_info;
} else {
- pr_err("CPAS Version not supported %d.%d.%d\n",
+ CAM_ERR(CAM_CPAS, "CPAS Version not supported %d.%d.%d",
hw_caps->cpas_version.major,
hw_caps->cpas_version.minor,
hw_caps->cpas_version.incr);
return -EINVAL;
}
} else {
- pr_err("Camera Version not supported %d.%d.%d\n",
+ CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
hw_caps->camera_version.major,
hw_caps->camera_version.minor,
hw_caps->camera_version.incr);
@@ -435,7 +446,7 @@
int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
{
if (!internal_ops) {
- pr_err("invalid NULL param\n");
+ CAM_ERR(CAM_CPAS, "invalid NULL param");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
new file mode 100644
index 0000000..f4d0e36
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -0,0 +1,542 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CPASTOP_V170_110_H_
+#define _CPASTOP_V170_110_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = {
+ .sbm_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+ .value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+ 0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+ 0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+ 0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+ 0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+ 0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+ (TEST_IRQ_ENABLE ?
+ 0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+ 0x0),
+ },
+ .sbm_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+ },
+ .sbm_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+ .value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+ }
+};
+
+static struct cam_camnoc_irq_err
+ cam_cpas110_irq_err[] = {
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+ .enable = true,
+ .sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+ .enable = true,
+ .sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+ .value = 1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x1190,
+ /* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+ },
+ .err_clear = {
+ .access_type = CAM_REG_TYPE_WRITE,
+ .enable = true,
+ .offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+ .enable = true,
+ .sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x1,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+ .enable = false,
+ },
+ {
+ .irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+ .enable = TEST_IRQ_ENABLE ? true : false,
+ .sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+ .err_enable = {
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .enable = true,
+ .offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+ .value = 0x5,
+ },
+ .err_status = {
+ .access_type = CAM_REG_TYPE_READ,
+ .enable = true,
+ .offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+ },
+ .err_clear = {
+ .enable = false,
+ },
+ },
+};
+
+static struct cam_camnoc_specific
+ cam_cpas110_camnoc_specific[] = {
+ {
+ .port_type = CAM_CAMNOC_CDM,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+ .mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+ .shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+ .value = 0,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE02,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+ .value = 0x66665433,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+ .value = 0x3,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IFE13,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+ .value = 0x66665433,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+ .value = 0x66666666,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+ .value = 0xFFFFFF00,
+ },
+ .safe_lut = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+ .value = 0x3,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+ .mask = 0x7,
+ /* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+ .shift = 0x0,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+ .value = 0x33333333,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+ .value = 0x33333333,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 1,
+ .offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+ .mask = 0x70,
+ /* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+ .shift = 0x4,
+ .value = 3,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+ .value = 1,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_JPEG,
+ .enable = true,
+ .priority_lut_low = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+ .value = 0x22222222,
+ },
+ .priority_lut_high = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+ .value = 0x22222222,
+ },
+ .urgency = {
+ .enable = true,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+ .value = 0x22,
+ },
+ .danger_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+ .value = 0x0,
+ },
+ .safe_lut = {
+ .enable = false,
+ .access_type = CAM_REG_TYPE_READ_WRITE,
+ .masked_value = 0,
+ .offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+ .value = 0x0,
+ },
+ .ubwc_ctl = {
+ .enable = false,
+ },
+ },
+ {
+ .port_type = CAM_CAMNOC_FD,
+ .enable = false,
+ },
+ {
+ .port_type = CAM_CAMNOC_ICP,
+ .enable = false,
+ }
+};
+
+static uint32_t cam_cpas110_slave_error_logger[] = {
+ 0x2700, /* ERRLOGGER_SWID_LOW */
+ 0x2704, /* ERRLOGGER_SWID_HIGH */
+ 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
+ .camnoc_flush_slave_pending_trans = {
+ .enable = true,
+ .data.reg_info = {
+ .access_type = CAM_REG_TYPE_READ,
+ .offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+ .mask = 0xE0000, /* Bits 17, 18, 19 */
+ .value = 0, /* expected to be 0 */
+ },
+ },
+};
+
+static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
+ .specific = &cam_cpas110_camnoc_specific[0],
+ .specific_size = sizeof(cam_cpas110_camnoc_specific) /
+ sizeof(cam_cpas110_camnoc_specific[0]),
+ .irq_sbm = &cam_cpas110_irq_sbm,
+ .irq_err = &cam_cpas110_irq_err[0],
+ .irq_err_size = sizeof(cam_cpas110_irq_err) /
+ sizeof(cam_cpas110_irq_err[0]),
+ .error_logger = &cam_cpas110_slave_error_logger[0],
+ .error_logger_size = sizeof(cam_cpas110_slave_error_logger) /
+ sizeof(cam_cpas110_slave_error_logger[0]),
+ .errata_wa_list = &cam170_cpas110_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V170_110_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 27b8504..3977b68 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <media/cam_cpas.h>
+#include "cam_soc_util.h"
#define CAM_HW_IDENTIFIER_LENGTH 128
@@ -93,7 +94,7 @@
struct device *dev;
void *userdata;
void (*cam_cpas_client_cb)(
- int32_t client_handle,
+ uint32_t client_handle,
void *userdata,
enum cam_camnoc_irq_type event_type,
uint32_t event_data);
@@ -101,7 +102,7 @@
};
/**
- * enum cam_vote_level - Enum for voting type
+ * enum cam_vote_type - Enum for voting type
*
* @CAM_VOTE_ABSOLUTE : Absolute vote
* @CAM_VOTE_DYNAMIC : Dynamic vote
@@ -112,21 +113,6 @@
};
/**
- * enum cam_vote_level - Enum for voting level
- *
- * @CAM_SUSPEND_VOTE : Suspend vote
- * @CAM_SVS_VOTE : SVS vote
- * @CAM_NOMINAL_VOTE : Nominal vote
- * @CAM_TURBO_VOTE : Turbo vote
- */
-enum cam_vote_level {
- CAM_SUSPEND_VOTE,
- CAM_SVS_VOTE,
- CAM_NOMINAL_VOTE,
- CAM_TURBO_VOTE,
-};
-
-/**
* struct cam_ahb_vote : AHB vote
*
* @type : AHB voting type.
diff --git a/drivers/media/platform/msm/camera/cam_icp/Makefile b/drivers/media/platform/msm/camera/cam_icp/Makefile
new file mode 100644
index 0000000..5aba168
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
similarity index 84%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 41290f4..15bd98c 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-ICP-CTXT %s:%d " fmt, __func__, __LINE__
-
#include <linux/debugfs.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
@@ -25,6 +23,8 @@
#include "cam_icp_context.h"
#include "cam_req_mgr_util.h"
#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -32,8 +32,10 @@
int rc;
rc = cam_context_acquire_dev_to_hw(ctx, cmd);
- if (!rc)
+ if (!rc) {
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
+ }
return rc;
}
@@ -45,9 +47,10 @@
rc = cam_context_release_dev_to_hw(ctx, cmd);
if (rc)
- pr_err("Unable to release device\n");
+ CAM_ERR(CAM_ICP, "Unable to release device");
ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("ICP", ctx);
return rc;
}
@@ -57,8 +60,10 @@
int rc;
rc = cam_context_start_dev_to_hw(ctx, cmd);
- if (!rc)
+ if (!rc) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ICP", ctx);
+ }
return rc;
}
@@ -70,7 +75,7 @@
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc)
- pr_err("Unable to prepare device\n");
+ CAM_ERR(CAM_ICP, "Failed to prepare device");
return rc;
}
@@ -82,9 +87,10 @@
rc = cam_context_stop_dev_to_hw(ctx);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to stop device");
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ICP", ctx);
return rc;
}
@@ -95,11 +101,11 @@
rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to stop device");
rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
if (rc)
- pr_err("Unable to stop device\n");
+ CAM_ERR(CAM_ICP, "Failed to release device");
return rc;
}
@@ -107,7 +113,7 @@
static int __cam_icp_handle_buf_done_in_ready(void *ctx,
uint32_t evt_id, void *done)
{
- return cam_context_buf_done_from_hw(ctx, done, 0);
+ return cam_context_buf_done_from_hw(ctx, done, evt_id);
}
static struct cam_ctx_ops
@@ -160,7 +166,7 @@
int rc;
if ((!ctx) || (!ctx->base) || (!hw_intf)) {
- pr_err("Invalid params: %pK %pK\n", ctx, hw_intf);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
rc = -EINVAL;
goto err;
}
@@ -168,7 +174,7 @@
rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
CAM_CTX_REQ_MAX);
if (rc) {
- pr_err("Camera Context Base init failed!\n");
+ CAM_ERR(CAM_ICP, "Camera Context Base init failed");
goto err;
}
@@ -183,7 +189,7 @@
int cam_icp_context_deinit(struct cam_icp_context *ctx)
{
if ((!ctx) || (!ctx->base)) {
- pr_err("Invalid params: %pK\n", ctx);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_context.h b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/cam_icp_context.h
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
diff --git a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
similarity index 86%
rename from drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
rename to drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 703561d..bbdff27 100644
--- a/drivers/media/platform/msm/camera/icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-ICP %s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -36,6 +34,7 @@
#include "cam_icp_context.h"
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
#define CAM_ICP_DEV_NAME "cam-icp"
@@ -65,13 +64,13 @@
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt >= 1) {
- pr_err("ICP subdev is already opened\n");
+ CAM_ERR(CAM_ICP, "ICP subdev is already opened");
rc = -EALREADY;
goto end;
}
if (!node) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
@@ -79,7 +78,7 @@
hw_mgr_intf = &node->hw_mgr_intf;
rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
- pr_err("FW download failed\n");
+ CAM_ERR(CAM_ICP, "FW download failed");
goto end;
}
g_icp_dev.open_cnt++;
@@ -97,27 +96,27 @@
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt <= 0) {
- pr_err("ICP subdev is already closed\n");
+ CAM_ERR(CAM_ICP, "ICP subdev is already closed");
rc = -EINVAL;
goto end;
}
g_icp_dev.open_cnt--;
if (!node) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args");
rc = -EINVAL;
goto end;
}
hw_mgr_intf = &node->hw_mgr_intf;
if (!hw_mgr_intf) {
- pr_err("hw_mgr_intf is not initialized\n");
+ CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
rc = -EINVAL;
goto end;
}
rc = hw_mgr_intf->hw_close(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
- pr_err("HW close failed\n");
+ CAM_ERR(CAM_ICP, "HW close failed");
goto end;
}
@@ -138,18 +137,16 @@
struct cam_hw_mgr_intf *hw_mgr_intf;
if (!pdev) {
- pr_err("pdev is NULL\n");
+ CAM_ERR(CAM_ICP, "pdev is NULL");
return -EINVAL;
}
- memset(&g_icp_dev, 0, sizeof(g_icp_dev));
-
g_icp_dev.sd.pdev = pdev;
g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
CAM_ICP_DEVICE_TYPE);
if (rc) {
- pr_err("ICP cam_subdev_probe failed!\n");
+ CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
goto probe_fail;
}
@@ -163,26 +160,24 @@
rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
if (rc) {
- pr_err("ICP HW manager init failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
goto hw_init_fail;
}
- pr_debug("Initializing the ICP contexts\n");
for (i = 0; i < CAM_CTX_MAX; i++) {
g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
hw_mgr_intf);
if (rc) {
- pr_err("ICP context init failed!\n");
+ CAM_ERR(CAM_ICP, "ICP context init failed");
goto ctx_fail;
}
}
- pr_debug("Initializing the ICP Node\n");
rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
CAM_CTX_MAX, CAM_ICP_DEV_NAME);
if (rc) {
- pr_err("ICP node init failed!\n");
+ CAM_ERR(CAM_ICP, "ICP node init failed");
goto ctx_fail;
}
@@ -209,20 +204,20 @@
struct cam_subdev *subdev;
if (!pdev) {
- pr_err("pdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "pdev is NULL");
+ return -ENODEV;
}
sd = platform_get_drvdata(pdev);
if (!sd) {
- pr_err("V4l2 subdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+ return -ENODEV;
}
subdev = v4l2_get_subdevdata(sd);
if (!subdev) {
- pr_err("cam subdev is NULL\n");
- return -EINVAL;
+ CAM_ERR(CAM_ICP, "cam subdev is NULL");
+ return -ENODEV;
}
for (i = 0; i < CAM_CTX_MAX; i++)
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
similarity index 95%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 0ffea5b..9150795 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -98,6 +98,13 @@
* @icp_base: icp base address
*/
void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
/**
* cam_hfi_deinit() - cleanup HFI
*/
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
similarity index 99%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index ff6b72a..04e3c85 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -49,6 +49,7 @@
#define ICP_CSR_EN_CLKGATE_WFI (1 << 12)
#define ICP_CSR_EDBGRQ (1 << 14)
#define ICP_CSR_DBGSWENABLE (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI (1 << 7)
/* start of Queue table and queues */
#define MAX_ICP_HFI_QUEUES 4
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_session_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/fw_inc/hfi_sys_defs.h
rename to drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
similarity index 83%
rename from drivers/media/platform/msm/camera/icp/hfi.c
rename to drivers/media/platform/msm/camera/cam_icp/hfi.c
index 170c8cf..48e1f1c 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "HFI-FW %s:%d " fmt, __func__, __LINE__
-
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -27,6 +25,7 @@
#include "hfi_session_defs.h"
#include "hfi_intf.h"
#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
#define HFI_VERSION_INFO_MAJOR_VAL 1
#define HFI_VERSION_INFO_MINOR_VAL 1
@@ -39,11 +38,10 @@
#define HFI_VERSION_INFO_STEP_BMSK 0xFF
#define HFI_VERSION_INFO_STEP_SHFT 0
-#undef HFI_DBG
-#define HFI_DBG(fmt, args...) pr_debug(fmt, ##args)
-
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
int hfi_write_cmd(void *cmd_ptr)
{
@@ -52,23 +50,25 @@
struct hfi_qtbl *q_tbl;
struct hfi_q_hdr *q;
int rc = 0;
- int i = 0;
if (!cmd_ptr) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_HFI, "command is null");
return -EINVAL;
}
- if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
- pr_err("HFI interface not ready yet\n");
- return -EIO;
+ mutex_lock(&hfi_cmd_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "HFI interface not setup");
+ rc = -ENODEV;
+ goto err;
}
- mutex_lock(&g_hfi->cmd_q_lock);
- if (!g_hfi->cmd_q_state) {
- pr_err("HFI command interface not ready yet\n");
- mutex_unlock(&g_hfi->cmd_q_lock);
- return -EIO;
+ if (g_hfi->hfi_state != HFI_READY ||
+ !g_hfi->cmd_q_state) {
+ CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+ g_hfi->hfi_state, g_hfi->cmd_q_state);
+ rc = -ENODEV;
+ goto err;
}
q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
@@ -78,24 +78,20 @@
size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
if (!size_in_words) {
- pr_debug("failed");
+ CAM_DBG(CAM_HFI, "failed");
rc = -EINVAL;
goto err;
}
- HFI_DBG("size_in_words : %u, q->qhdr_write_idx %x\n", size_in_words,
- q->qhdr_write_idx);
-
read_idx = q->qhdr_read_idx;
empty_space = (q->qhdr_write_idx >= read_idx) ?
(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
(read_idx - q->qhdr_write_idx);
if (empty_space <= size_in_words) {
- pr_err("failed");
+ CAM_ERR(CAM_HFI, "failed");
rc = -EIO;
goto err;
}
- HFI_DBG("empty_space : %u\n", empty_space);
new_write_idx = q->qhdr_write_idx + size_in_words;
write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
@@ -110,15 +106,12 @@
memcpy(write_q, (uint8_t *)cmd_ptr + temp,
new_write_idx << BYTE_WORD_SHIFT);
}
- for (i = 0; i < size_in_words; i++)
- pr_debug("%x\n", write_ptr[i]);
q->qhdr_write_idx = new_write_idx;
- HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
cam_io_w((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
- mutex_unlock(&g_hfi->cmd_q_lock);
+ mutex_unlock(&hfi_cmd_q_mutex);
return rc;
}
@@ -129,32 +122,40 @@
uint32_t new_read_idx, size_in_words, temp;
uint32_t *read_q, *read_ptr;
int rc = 0;
- int i = 0;
- if (!pmsg || q_id > Q_DBG) {
- pr_err("Inavlid args\n");
+ if (!pmsg) {
+ CAM_ERR(CAM_HFI, "Invalid msg");
return -EINVAL;
}
- if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
- pr_err("HFI interface not ready yet\n");
- return -EIO;
+ if (q_id > Q_DBG) {
+ CAM_ERR(CAM_HFI, "Inavlid q :%u", q_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hfi_msg_q_mutex);
+ if (!g_hfi) {
+ CAM_ERR(CAM_HFI, "hfi not set up yet");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ if ((g_hfi->hfi_state != HFI_READY) ||
+ !g_hfi->msg_q_state) {
+ CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+ g_hfi->hfi_state, g_hfi->msg_q_state);
+ rc = -ENODEV;
+ goto err;
}
q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
q = &q_tbl_ptr->q_hdr[q_id];
if (q->qhdr_read_idx == q->qhdr_write_idx) {
- pr_debug("FW or Q not ready, hfi state : %u, r idx : %u, w idx : %u\n",
+ CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
- return -EIO;
- }
-
- mutex_lock(&g_hfi->msg_q_lock);
- if (!g_hfi->msg_q_state) {
- pr_err("HFI message interface not ready yet\n");
- mutex_unlock(&g_hfi->msg_q_lock);
- return -EIO;
+ rc = -EIO;
+ goto err;
}
if (q_id == Q_MSG)
@@ -165,12 +166,9 @@
read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
- HFI_DBG("size_in_words : %u, read_ptr : %pK\n", size_in_words,
- (void *)read_ptr);
-
if ((size_in_words == 0) ||
(size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
- pr_err("invalid HFI message packet size - 0x%08x\n",
+ CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
size_in_words << BYTE_WORD_SHIFT);
q->qhdr_read_idx = q->qhdr_write_idx;
rc = -EIO;
@@ -178,7 +176,6 @@
}
new_read_idx = q->qhdr_read_idx + size_in_words;
- HFI_DBG("new_read_idx : %u\n", new_read_idx);
if (new_read_idx < q->qhdr_q_size) {
memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
@@ -190,12 +187,9 @@
new_read_idx << BYTE_WORD_SHIFT);
}
- for (i = 0; i < size_in_words; i++)
- HFI_DBG("%x\n", read_ptr[i]);
-
q->qhdr_read_idx = new_read_idx;
err:
- mutex_unlock(&g_hfi->msg_q_lock);
+ mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
@@ -265,7 +259,7 @@
case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
break;
default:
- pr_err("command not supported :%d\n", type);
+ CAM_ERR(CAM_HFI, "command not supported :%d", type);
break;
}
}
@@ -277,7 +271,7 @@
struct cam_icp_query_cap_cmd *query_cmd = NULL;
if (!query_buf) {
- pr_err("%s: query buf is NULL\n", __func__);
+ CAM_ERR(CAM_HFI, "query buf is NULL");
return -EINVAL;
}
@@ -302,6 +296,19 @@
return 0;
}
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+ uint32_t data;
+ uint32_t val;
+
+ data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+ /* Add waiting logic in case it is not idle */
+ if (data & ICP_CSR_A5_STATUS_WFI) {
+ val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+ cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+ }
+}
void cam_hfi_enable_cpu(void __iomem *icp_base)
{
@@ -319,6 +326,9 @@
struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
uint32_t hw_version, fw_version, status = 0;
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
if (!g_hfi) {
g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
if (!g_hfi) {
@@ -327,13 +337,13 @@
}
}
- HFI_DBG("g_hfi: %pK\n", (void *)g_hfi);
if (g_hfi->hfi_state != HFI_DEINIT) {
- pr_err("hfi_init: invalid state\n");
+ CAM_ERR(CAM_HFI, "hfi_init: invalid state");
return -EINVAL;
}
memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+ g_hfi->hfi_state = HFI_DEINIT;
if (debug) {
cam_io_w_mb(
@@ -360,7 +370,6 @@
qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
/* setup host-to-firmware command queue */
- pr_debug("updating the command queue info\n");
cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
@@ -371,7 +380,6 @@
cmd_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
- pr_debug("updating the message queue info\n");
msg_q_hdr = &qtbl->q_hdr[Q_MSG];
msg_q_hdr->qhdr_status = QHDR_ACTIVE;
msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
@@ -382,7 +390,6 @@
msg_q_hdr->qhdr_write_idx = RESET;
/* setup firmware-to-Host message queue */
- pr_debug("updating the debug queue info\n");
dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
@@ -391,7 +398,6 @@
dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
dbg_q_hdr->qhdr_read_idx = RESET;
dbg_q_hdr->qhdr_write_idx = RESET;
- pr_debug("Done updating the debug queue info\n");
switch (event_driven_mode) {
case INTR_MODE:
@@ -460,7 +466,8 @@
break;
default:
- pr_err("Invalid event driven mode :%u", event_driven_mode);
+ CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+ event_driven_mode);
break;
}
@@ -477,56 +484,58 @@
icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
- HFI_DBG("hw version : [%x]\n", hw_version);
rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
if (rc) {
- pr_err("timed out , status = %u\n", status);
+ CAM_ERR(CAM_HFI, "timed out , status = %u", status);
goto regions_fail;
}
fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
- HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
+ CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+ hw_version, fw_version);
g_hfi->csr_base = icp_base;
g_hfi->hfi_state = HFI_READY;
g_hfi->cmd_q_state = true;
g_hfi->msg_q_state = true;
- mutex_init(&g_hfi->cmd_q_lock);
- mutex_init(&g_hfi->msg_q_lock);
cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
+
return rc;
regions_fail:
kfree(g_hfi);
alloc_fail:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
void cam_hfi_deinit(void)
{
+ mutex_lock(&hfi_cmd_q_mutex);
+ mutex_lock(&hfi_msg_q_mutex);
+
if (!g_hfi) {
- pr_err("hfi path not established yet\n");
- return;
+ CAM_ERR(CAM_HFI, "hfi path not established yet");
+ goto err;
}
+
+ g_hfi->cmd_q_state = false;
+ g_hfi->msg_q_state = false;
+
cam_io_w((uint32_t)INTR_DISABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
-
- mutex_lock(&g_hfi->cmd_q_lock);
- g_hfi->cmd_q_state = false;
- mutex_unlock(&g_hfi->cmd_q_lock);
-
- mutex_lock(&g_hfi->msg_q_lock);
- g_hfi->msg_q_state = false;
- mutex_unlock(&g_hfi->msg_q_lock);
-
- mutex_destroy(&g_hfi->cmd_q_lock);
- mutex_destroy(&g_hfi->msg_q_lock);
-
- kfree(g_hfi);
+ kzfree(g_hfi);
g_hfi = NULL;
+
+err:
+ mutex_unlock(&hfi_cmd_q_mutex);
+ mutex_unlock(&hfi_msg_q_mutex);
}
void icp_enable_fw_debug(void)
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
similarity index 60%
rename from drivers/media/platform/msm/camera/icp/icp_hw/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
index 8e95286..5276340 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
@@ -1,9 +1,9 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 0000000..5f4f9fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
similarity index 80%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 39eacd8..e200f6f 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "A5-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/debugfs.h>
@@ -35,6 +33,7 @@
#include "hfi_sys_defs.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -50,7 +49,7 @@
&cpas_vote->axi_vote);
if (rc)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -60,26 +59,26 @@
struct elf32_hdr *elf_hdr;
if (!elf) {
- pr_err("Invalid params\n");
+ CAM_ERR(CAM_ICP, "Invalid params");
return -EINVAL;
}
elf_hdr = (struct elf32_hdr *)elf;
if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
- pr_err("ICP elf identifier is failed\n");
+ CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
return -EINVAL;
}
/* check architecture */
if (elf_hdr->e_machine != EM_ARM) {
- pr_err("unsupported arch\n");
+ CAM_ERR(CAM_ICP, "unsupported arch");
return -EINVAL;
}
/* check elf bit format */
if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
- pr_err("elf doesn't support 32 bit format\n");
+ CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
return -EINVAL;
}
@@ -97,7 +96,7 @@
struct elf32_phdr *prg_hdr;
if (!elf || !fw_size) {
- pr_err("invalid args\n");
+ CAM_ERR(CAM_ICP, "invalid args");
return -EINVAL;
}
@@ -109,11 +108,11 @@
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
- pr_err("failed to get elf program header attr\n");
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
- pr_debug("num_prg_hdrs = %d\n", num_prg_hdrs);
+ CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
if (prg_hdr->p_flags == 0)
continue;
@@ -121,7 +120,7 @@
seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
~(prg_hdr->p_align - 1);
seg_mem_size += prg_hdr->p_vaddr;
- pr_debug("p_memsz = %x p_align = %x p_vaddr = %x seg_mem_size = %x\n",
+ CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
(int)prg_hdr->p_vaddr, (int)seg_mem_size);
if (*fw_size < seg_mem_size)
@@ -130,7 +129,7 @@
}
if (*fw_size == 0) {
- pr_err("invalid elf fw file\n");
+ CAM_ERR(CAM_ICP, "invalid elf fw file");
return -EINVAL;
}
@@ -155,7 +154,7 @@
prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
if (!prg_hdr) {
- pr_err("failed to get elf program header attr\n");
+ CAM_ERR(CAM_ICP, "failed to get elf program header attr");
return -EINVAL;
}
@@ -163,15 +162,14 @@
if (prg_hdr->p_flags == 0)
continue;
- pr_debug("Loading FW header size: %u\n", prg_hdr->p_filesz);
+ CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+ prg_hdr->p_filesz);
if (prg_hdr->p_filesz != 0) {
src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
prg_hdr->p_vaddr);
memcpy_toio(dest, src, prg_hdr->p_filesz);
- pr_debug("fw kva: %pK, p_vaddr: 0x%x\n",
- dest, prg_hdr->p_vaddr);
}
}
@@ -191,7 +189,7 @@
struct a5_soc_info *cam_a5_soc_info = NULL;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -202,44 +200,38 @@
cam_a5_soc_info = soc_info->soc_private;
rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
- pr_debug("request_firmware: %d\n", rc);
- if (rc < 0) {
- pr_err("Failed to locate fw\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
return rc;
}
if (!core_info->fw_elf) {
- pr_err("request_firmware is failed\n");
+ CAM_ERR(CAM_ICP, "Invalid elf size");
return -EINVAL;
}
fw_start = core_info->fw_elf->data;
rc = cam_icp_validate_fw(fw_start);
- if (rc < 0) {
- pr_err("fw elf validation failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw elf validation failed");
return -EINVAL;
}
rc = cam_icp_get_fw_size(fw_start, &fw_size);
- if (rc < 0) {
- pr_err("unable to get fw file size\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to get fw size");
return rc;
}
- pr_debug("cam_icp_get_fw_size: %u\n", fw_size);
-
- /* Check FW firmware memory allocation is OK or not */
- pr_debug("cam_icp_get_fw_size: %u %llu\n",
- fw_size, core_info->fw_buf_len);
if (core_info->fw_buf_len < fw_size) {
- pr_err("fw allocation failed\n");
+ CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+ fw_size, core_info->fw_buf_len);
goto fw_alloc_failed;
}
- /* download fw */
rc = cam_icp_program_fw(fw_start, core_info);
- if (rc < 0) {
- pr_err("fw program is failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "fw program is failed");
goto fw_program_failed;
}
@@ -259,7 +251,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -267,28 +259,29 @@
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+ soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_a5_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -305,24 +298,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &a5_dev->soc_info;
core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_a5_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -339,7 +333,7 @@
uint32_t irq_status = 0;
if (!data) {
- pr_err("Invalid cam_dev_info or query_cap args\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
return IRQ_HANDLED;
}
@@ -354,18 +348,15 @@
soc_info->reg_map[A5_SIERRA_BASE].mem_base +
core_info->a5_hw_info->a5_host_int_clr);
- pr_debug("irq_status = %x\n", irq_status);
- if (irq_status & A5_HOST_INT)
- pr_debug("A5 to Host interrupt, read msg Q\n");
-
if ((irq_status & A5_WDT_0) ||
(irq_status & A5_WDT_1)) {
- pr_err_ratelimited("watch dog interrupt from A5\n");
+ CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
}
if (core_info->irq_cb.icp_hw_mgr_cb)
core_info->irq_cb.icp_hw_mgr_cb(irq_status,
core_info->irq_cb.data);
+
return IRQ_HANDLED;
}
@@ -379,12 +370,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
@@ -401,7 +392,7 @@
struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -409,15 +400,16 @@
core_info->fw_kva_addr = fw_buf_info->kva;
core_info->fw_buf_len = fw_buf_info->len;
- pr_debug("fw buf info = %x %llx %lld\n", core_info->fw_buf,
- core_info->fw_kva_addr, core_info->fw_buf_len);
+ CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+ core_info->fw_buf, core_info->fw_kva_addr,
+ core_info->fw_buf_len);
break;
}
case CAM_ICP_A5_SET_IRQ_CB: {
struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -433,7 +425,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -445,7 +437,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
similarity index 92%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index f649c3b..08b934e 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -23,6 +23,7 @@
#include "cam_a5_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
struct a5_soc_info cam_a5_soc_info;
EXPORT_SYMBOL(cam_a5_soc_info);
@@ -64,7 +65,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
@@ -101,7 +102,7 @@
a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
- pr_debug("%s: type %d index %d\n", __func__,
+ CAM_DBG(CAM_ICP, "type %d index %d",
a5_dev_intf->hw_type,
a5_dev_intf->hw_idx);
@@ -118,9 +119,9 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s: No a5 hardware info\n", __func__);
+ CAM_ERR(CAM_ICP, "No a5 hardware info");
rc = -EINVAL;
- goto pr_err;
+ goto match_err;
}
hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
core_info->a5_hw_info = hw_info;
@@ -130,16 +131,16 @@
rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
a5_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
goto init_soc_failure;
}
- pr_debug("cam_a5_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&a5_dev->soc_info);
rc = cam_a5_register_cpas(&a5_dev->soc_info,
core_info, a5_dev_intf->hw_idx);
if (rc < 0) {
- pr_err("a5 cpas registration failed\n");
+ CAM_ERR(CAM_ICP, "a5 cpas registration failed");
goto cpas_reg_failed;
}
a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -147,13 +148,13 @@
spin_lock_init(&a5_dev->hw_lock);
init_completion(&a5_dev->hw_complete);
- pr_debug("%s: A5%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "A5%d probe successful",
a5_dev_intf->hw_idx);
return 0;
cpas_reg_failed:
init_soc_failure:
-pr_err:
+match_err:
kfree(a5_dev->core_info);
core_info_alloc_failure:
kfree(a5_dev);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
similarity index 86%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index d12b3b6..f252931 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "a5_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -35,7 +33,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s: get a5 dt prop is failed\n", __func__);
+ CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
return rc;
}
@@ -44,7 +42,7 @@
rc = of_property_read_string(of_node, "fw_name", &fw_name);
if (rc < 0)
- pr_err("%s: fw_name read failed\n", __func__);
+ CAM_ERR(CAM_ICP, "fw_name read failed");
return rc;
}
@@ -82,9 +80,10 @@
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -95,7 +94,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/a5_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 0000000..114e4a1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
similarity index 84%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index 91652d7..557eaf1 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "BPS-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
@@ -31,6 +29,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -45,7 +44,7 @@
&cpas_vote->axi_vote);
if (rc < 0)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -61,7 +60,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -69,28 +68,29 @@
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_bps_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -107,24 +107,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &bps_dev->soc_info;
core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_bps_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -142,12 +143,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
@@ -160,7 +161,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
@@ -172,7 +173,7 @@
struct cam_icp_cpas_vote *cpas_vote = cmd_args;
if (!cmd_args) {
- pr_err("cmd args NULL\n");
+ CAM_ERR(CAM_ICP, "cmd args NULL");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
similarity index 92%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
index c3477ee..ddff677 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
@@ -23,9 +23,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_bps_device_hw_info cam_bps_hw_info = {
.reserved = 0,
@@ -47,7 +45,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
@@ -95,7 +93,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s: No bps hardware info\n", __func__);
+ CAM_ERR(CAM_ICP, "No bps hardware info");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
@@ -108,13 +106,13 @@
rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
bps_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(bps_dev->core_info);
kfree(bps_dev);
kfree(bps_dev_intf);
return rc;
}
- pr_debug("cam_bps_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "soc info : %pK",
(void *)&bps_dev->soc_info);
rc = cam_bps_register_cpas(&bps_dev->soc_info,
@@ -129,7 +127,7 @@
mutex_init(&bps_dev->hw_mutex);
spin_lock_init(&bps_dev->hw_lock);
init_completion(&bps_dev->hw_complete);
- pr_debug("%s: BPS%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "BPS%d probe successful",
bps_dev_intf->hw_idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
similarity index 86%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 76884bf..54e898c 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "bps_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -28,7 +26,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
- pr_err("get bps dt prop is failed\n");
+ CAM_ERR(CAM_ICP, "get bps dt prop is failed");
return rc;
}
@@ -66,9 +64,10 @@
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, false);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -79,7 +78,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
if (rc)
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "disable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/bps_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
similarity index 63%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
index 4a6c3c0..71afea4 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
@@ -1,16 +1,16 @@
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
similarity index 60%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 677c24e..1b3afc0 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "ICP-HW-MGR %s:%d " fmt, __func__, __LINE__
-
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -26,7 +24,6 @@
#include <linux/debugfs.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
-#include <linux/debugfs.h>
#include "cam_sync_api.h"
#include "cam_packet_util.h"
@@ -47,9 +44,7 @@
#include "cam_mem_mgr.h"
#include "a5_core.h"
#include "hfi_sys_defs.h"
-
-#undef ICP_DBG
-#define ICP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define ICP_WORKQ_NUM_TASK 30
#define ICP_WORKQ_TASK_CMD_TYPE 1
@@ -81,7 +76,7 @@
struct cam_icp_hw_mgr *hw_mgr;
if (!data || !priv) {
- pr_err("Invalid params%pK %pK\n", data, priv);
+ CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
return -EINVAL;
}
@@ -89,10 +84,7 @@
task_data = (struct hfi_cmd_work_data *)data;
rc = hfi_write_cmd(task_data->data);
- if (rc < 0)
- pr_err("unable to write\n");
- ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
return rc;
}
@@ -100,31 +92,29 @@
{
int i;
uint32_t idx;
- uint32_t request_id;
+ uint64_t request_id;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
struct hfi_msg_frame_process_done *frame_done;
struct hfi_frame_process_info *hfi_frame_process;
- struct cam_hw_done_event_data buf_data;
+ struct cam_hw_done_event_data buf_data;
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
- pr_err("failed with error : %u\n", ioconfig_ack->err_type);
+ CAM_ERR(CAM_ICP, "failed with error : %u",
+ ioconfig_ack->err_type);
return -EIO;
}
frame_done =
(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
if (frame_done->result) {
- pr_err("result : %u\n", frame_done->result);
+ CAM_ERR(CAM_ICP, "result : %u", frame_done->result);
return -EIO;
}
- ICP_DBG("result : %u\n", frame_done->result);
ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
request_id = ioconfig_ack->user_data2;
- ICP_DBG("ctx : %pK, request_id :%d\n",
- (void *)ctx_data->context_priv, request_id);
hfi_frame_process = &ctx_data->hfi_frame_process;
for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
@@ -132,22 +122,17 @@
break;
if (i >= CAM_FRAME_CMD_MAX) {
- pr_err("unable to find pkt in ctx data for req_id =%d\n",
+ CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
request_id);
return -EINVAL;
}
idx = i;
- /* send event to ctx this needs to be done in msg handler */
- buf_data.num_handles = hfi_frame_process->num_out_resources[idx];
- for (i = 0; i < buf_data.num_handles; i++)
- buf_data.resource_handle[i] =
- hfi_frame_process->out_resource[idx][i];
-
- ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+ buf_data.request_id = hfi_frame_process->request_id[idx];
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
/* now release memory for hfi frame process command */
- ICP_DBG("matching request id: %d\n",
+ CAM_DBG(CAM_ICP, "matching request id: %lld",
hfi_frame_process->request_id[idx]);
mutex_lock(&ctx_data->hfi_frame_process.lock);
hfi_frame_process->request_id[idx] = 0;
@@ -164,33 +149,39 @@
struct hfi_msg_bps_common *bps_config_ack = NULL;
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
- ICP_DBG("opcode : %u\n", ioconfig_ack->opcode);
if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
ipe_config_ack =
(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
if (ipe_config_ack->rc) {
- pr_err("rc = %d err = %u\n",
+ CAM_ERR(CAM_ICP, "rc = %d err = %u",
ipe_config_ack->rc, ioconfig_ack->err_type);
return -EIO;
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
+ return -EINVAL;
+ }
+
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
mutex_unlock(&ctx_data->ctx_mutex);
- ICP_DBG("scratch_mem_size = %u\n",
- ipe_config_ack->scratch_mem_size);
} else {
bps_config_ack =
(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
if (bps_config_ack->rc) {
- pr_err("rc : %u, opcode :%u\n",
+ CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
bps_config_ack->rc, ioconfig_ack->opcode);
return -EIO;
}
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
+ return -EINVAL;
+ }
}
complete(&ctx_data->wait_complete);
@@ -204,22 +195,20 @@
create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
if (!create_handle_ack) {
- pr_err("Invalid create_handle_ack\n");
+ CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
return -EINVAL;
}
- ICP_DBG("err type : %u\n", create_handle_ack->err_type);
-
ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
if (!ctx_data) {
- pr_err("Invalid ctx_data\n");
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
}
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->fw_handle = create_handle_ack->fw_handle;
mutex_unlock(&ctx_data->ctx_mutex);
- ICP_DBG("fw_handle = %x\n", ctx_data->fw_handle);
+ CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
complete(&ctx_data->wait_complete);
return 0;
@@ -232,18 +221,16 @@
ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
if (!ping_ack) {
- pr_err("Empty ping ack message\n");
+ CAM_ERR(CAM_ICP, "Empty ping ack message");
return -EINVAL;
}
ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
if (!ctx_data) {
- pr_err("Invalid ctx_data\n");
+ CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
}
- ICP_DBG("%x %x %pK\n", ping_ack->size, ping_ack->pkt_type,
- (void *)ping_ack->user_data);
complete(&ctx_data->wait_complete);
return 0;
@@ -253,29 +240,24 @@
{
int rc;
- switch (msg_ptr[ICP_PACKET_IPCODE]) {
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
- ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
+ CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
- if (rc < 0) {
- pr_err("error in process_msg_config_io\n");
+ if (rc)
return rc;
- }
break;
case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
- ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
- if (rc < 0) {
- pr_err("error in msg_frame_process\n");
+ if (rc)
return rc;
- }
break;
default:
- pr_err("Invalid opcode : %u\n",
- msg_ptr[ICP_PACKET_IPCODE]);
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+ msg_ptr[ICP_PACKET_OPCODE]);
break;
}
@@ -286,23 +268,27 @@
{
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+ int rc = 0;
- if (msg_ptr[ICP_PACKET_IPCODE] ==
- HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY ||
- msg_ptr[ICP_PACKET_IPCODE] ==
- HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY) {
- ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_DESTROY:\n");
+ switch (msg_ptr[ICP_PACKET_OPCODE]) {
+ case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+ case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+ case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+ CAM_DBG(CAM_ICP, "received IPE/BPS_DESTROY/ABORT:");
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
ctx_data =
(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
complete(&ctx_data->wait_complete);
-
- } else {
- pr_err("Invalid opcode : %u\n", msg_ptr[ICP_PACKET_IPCODE]);
- return -EINVAL;
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+ msg_ptr[ICP_PACKET_OPCODE]);
+ rc = -EINVAL;
+ break;
}
- return 0;
+ return rc;
}
static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
@@ -314,62 +300,53 @@
int read_len;
if (!data || !priv) {
- pr_err("Invalid data\n");
+ CAM_ERR(CAM_ICP, "Invalid data");
return -EINVAL;
}
task_data = data;
hw_mgr = priv;
- ICP_DBG("irq status : %u\n", task_data->irq_status);
read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
if (read_len < 0) {
- ICP_DBG("Unable to read msg q\n");
+ CAM_DBG(CAM_ICP, "Unable to read msg q");
return read_len;
}
msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
- ICP_DBG("packet type: %x\n", msg_ptr[ICP_PACKET_TYPE]);
mutex_lock(&hw_mgr->hw_mgr_mutex);
switch (msg_ptr[ICP_PACKET_TYPE]) {
case HFI_MSG_SYS_INIT_DONE:
- ICP_DBG("received HFI_MSG_SYS_INIT_DONE\n");
+ CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
complete(&hw_mgr->a5_complete);
break;
case HFI_MSG_SYS_PING_ACK:
- ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
+ CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
- if (rc)
- pr_err("fail process PING_ACK\n");
break;
case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
- ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
+ CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
- if (rc)
- pr_err("fail process CREATE_HANDLE_ACK\n");
break;
case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
- if (rc)
- pr_err("fail process INDIRECT_ACK\n");
break;
case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
- if (rc)
- pr_err("fail process DIRECT_ACK\n");
break;
case HFI_MSG_EVENT_NOTIFY:
- ICP_DBG("received HFI_MSG_EVENT_NOTIFY\n");
+ CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
break;
default:
- pr_err("invalid msg : %u\n", msg_ptr[ICP_PACKET_TYPE]);
+ CAM_ERR(CAM_ICP, "invalid msg : %u",
+ msg_ptr[ICP_PACKET_TYPE]);
break;
}
@@ -389,7 +366,7 @@
spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
if (!task) {
- pr_err("no empty task\n");
+ CAM_ERR(CAM_ICP, "no empty task");
spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
return -ENOMEM;
}
@@ -416,143 +393,102 @@
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
}
-static int cam_icp_allocate_hfi_mem(void)
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
{
int rc;
struct cam_mem_mgr_request_desc alloc;
struct cam_mem_mgr_memory_desc out;
- dma_addr_t iova;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+ alloc.size = SZ_1M;
+ alloc.align = 0;
+ alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+ CAM_MEM_FLAG_HW_SHARED_ACCESS;
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_request_mem(&alloc, &out);
+ if (rc)
+ return rc;
+
+ *qtbl = out;
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
+
+ return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+ int rc;
uint64_t kvaddr;
size_t len;
-
- rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
- CAM_MEM_MGR_REGION_SHARED,
- &icp_hw_mgr.hfi_mem.shmem);
- if (rc)
- return -ENOMEM;
+ dma_addr_t iova;
rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
&iova, &kvaddr, &len);
- if (rc < 0) {
- pr_err("Unable to allocate FW memory\n");
+ if (rc)
return -ENOMEM;
- }
icp_hw_mgr.hfi_mem.fw_buf.len = len;
icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
- ICP_DBG("kva = %llX\n", kvaddr);
- ICP_DBG("IOVA = %llX\n", iova);
- ICP_DBG("length = %zu\n", len);
-
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
- pr_err("Unable to allocate qtbl memory\n");
- goto qtbl_alloc_failed;
- }
- icp_hw_mgr.hfi_mem.qtbl = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("qtbl IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for cmd queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
- pr_err("Unable to allocate cmd q memory\n");
- goto cmd_q_alloc_failed;
- }
- icp_hw_mgr.hfi_mem.cmd_q = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("cmd_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for msg queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
- pr_err("Unable to allocate msg q memory\n");
- goto msg_q_alloc_failed;
- }
- icp_hw_mgr.hfi_mem.msg_q = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("msg_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for dbg queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
- pr_err("Unable to allocate dbg q memory\n");
- goto dbg_q_alloc_failed;
- }
- icp_hw_mgr.hfi_mem.dbg_q = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("dbg_q IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
-
- /* Allocate memory for sec heap queue */
- memset(&alloc, 0, sizeof(alloc));
- memset(&out, 0, sizeof(out));
- alloc.size = SZ_1M;
- alloc.align = 0;
- alloc.region = CAM_MEM_MGR_REGION_SHARED;
- alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
- rc = cam_mem_mgr_request_mem(&alloc, &out);
- if (rc < 0) {
- pr_err("Unable to allocate sec heap q memory\n");
- goto sec_heap_alloc_failed;
- }
- icp_hw_mgr.hfi_mem.sec_heap = out;
-
- ICP_DBG("kva = %llX\n", out.kva);
- ICP_DBG("sec_heap IOVA = %X\n", out.iova);
- ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
- ICP_DBG("MEM HDL = %X\n", out.mem_handle);
- ICP_DBG("length = %lld\n", out.len);
- ICP_DBG("region = %d\n", out.region);
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %llx, len: %zu",
+ kvaddr, iova, len);
return rc;
+}
+static int cam_icp_allocate_hfi_mem(void)
+{
+ int rc;
+
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_SMMU_REGION_SHARED,
+ &icp_hw_mgr.hfi_mem.shmem);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to get shared memory info");
+ return rc;
+ }
+
+ rc = cam_icp_allocate_fw_mem();
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
+ return rc;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
+ goto qtbl_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
+ goto cmd_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
+ goto msg_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
+ goto dbg_q_alloc_failed;
+ }
+
+ rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to allocate sec heap q memory");
+ goto sec_heap_alloc_failed;
+ }
+
+ return rc;
sec_heap_alloc_failed:
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
dbg_q_alloc_failed:
@@ -563,20 +499,17 @@
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
qtbl_alloc_failed:
cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
- pr_err("returned with error : %d\n", rc);
-
return rc;
}
static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
{
int i = 0;
- int num_ctx = CAM_ICP_CTX_MAX;
- for (i = 0; i < num_ctx; i++) {
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
- if (hw_mgr->ctx_data[i].in_use == 0) {
- hw_mgr->ctx_data[i].in_use = 1;
+ if (hw_mgr->ctx_data[i].in_use == false) {
+ hw_mgr->ctx_data[i].in_use = true;
mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
break;
}
@@ -586,22 +519,87 @@
return i;
}
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->in_use = false;
+ mutex_unlock(&ctx_data->ctx_mutex);
+}
+
+static int cam_icp_mgr_abort_handle(
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int rc = 0;
+ int timeout = 5000;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async abort_cmd;
+ struct crm_workq_task *task;
+ unsigned long rem_jiffies;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
+ abort_cmd.size =
+ sizeof(struct hfi_cmd_ipebps_async) +
+ sizeof(struct hfi_cmd_abort_destroy) -
+ sizeof(abort_cmd.payload.direct);
+ abort_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+ abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+ else
+ abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+ reinit_completion(&ctx_data->wait_complete);
+ abort_cmd.num_fw_handles = 1;
+ abort_cmd.fw_handles[0] = ctx_data->fw_handle;
+ abort_cmd.user_data1 = (uint64_t)ctx_data;
+ abort_cmd.user_data2 = (uint64_t)0x0;
+ memcpy(abort_cmd.payload.direct, &ctx_data->temp_payload,
+ sizeof(uint32_t));
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)&abort_cmd;
+ task_data->request_id = 0;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+ ctx_data->fw_handle, ctx_data);
+ rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+ msecs_to_jiffies((timeout)));
+ if (!rem_jiffies) {
+ rc = -ETIMEDOUT;
+ CAM_DBG(CAM_ICP, "FW timeout/err in abort handle command");
+ }
+
+ return rc;
+}
+
static int cam_icp_mgr_destroy_handle(
- struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+ struct cam_icp_hw_ctx_data *ctx_data)
{
int rc = 0;
int timeout = 5000;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async destroy_cmd;
+ struct crm_workq_task *task;
unsigned long rem_jiffies;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
destroy_cmd.size =
sizeof(struct hfi_cmd_ipebps_async) +
sizeof(struct ipe_bps_destroy) -
sizeof(destroy_cmd.payload.direct);
destroy_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
else
destroy_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
@@ -619,14 +617,18 @@
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
ctx_data->fw_handle, ctx_data);
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timeout: %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timeout: %d", rc);
}
return rc;
@@ -634,29 +636,25 @@
static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
{
- struct crm_workq_task *task;
int i = 0;
if (ctx_id >= CAM_ICP_CTX_MAX) {
- pr_err("ctx_id is wrong: %d\n", ctx_id);
+ CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
return -EINVAL;
}
mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
if (!hw_mgr->ctx_data[ctx_id].in_use) {
- ICP_DBG("ctx is not in use: %d\n", ctx_id);
mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
return 0;
}
mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (task)
- cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id], task);
+ cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
mutex_lock(&hw_mgr->hw_mgr_mutex);
mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- hw_mgr->ctx_data[ctx_id].in_use = 0;
+ hw_mgr->ctx_data[ctx_id].in_use = false;
hw_mgr->ctx_data[ctx_id].fw_handle = 0;
hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
mutex_lock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
@@ -664,138 +662,102 @@
clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
mutex_unlock(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
mutex_destroy(&hw_mgr->ctx_data[ctx_id].hfi_frame_process.lock);
- mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ hw_mgr->ctxt_cnt--;
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
}
-static int cam_icp_mgr_get_ctx_from_fw_handle(struct cam_icp_hw_mgr *hw_mgr,
- uint32_t fw_handle)
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
{
- int ctx_id;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_intf *ipe0_dev_intf = NULL;
+ struct cam_hw_intf *ipe1_dev_intf = NULL;
+ struct cam_hw_intf *bps_dev_intf = NULL;
- for (ctx_id = 0; ctx_id < CAM_ICP_CTX_MAX; ctx_id++) {
- mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
- if (hw_mgr->ctx_data[ctx_id].in_use) {
- if (hw_mgr->ctx_data[ctx_id].fw_handle == fw_handle) {
- mutex_unlock(
- &hw_mgr->ctx_data[ctx_id].ctx_mutex);
- return ctx_id;
- }
- }
- mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
+ ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
+ bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
+
+ if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+ CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+ return;
}
- ICP_DBG("Invalid fw handle to get ctx\n");
- return -EINVAL;
+ if (ipe1_dev_intf)
+ ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
}
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
{
struct cam_icp_hw_mgr *hw_mgr = hw_priv;
struct cam_hw_intf *a5_dev_intf = NULL;
- struct cam_hw_intf *ipe0_dev_intf = NULL;
- struct cam_hw_intf *ipe1_dev_intf = NULL;
- struct cam_hw_intf *bps_dev_intf = NULL;
struct cam_icp_a5_set_irq_cb irq_cb;
struct cam_icp_a5_set_fw_buf_info fw_buf_info;
- struct cam_icp_hw_ctx_data *ctx_data = NULL;
- int i;
+ int i, rc = 0;
mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (hw_mgr->fw_download == false) {
- ICP_DBG("hw mgr is already closed\n");
+ if ((hw_mgr->fw_download == false) && (!hw_mgr->ctxt_cnt)) {
+ CAM_DBG(CAM_ICP, "hw mgr is already closed");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return 0;
}
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
- ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
-
- if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
- pr_err("dev intfs are wrong, failed to close\n");
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
return -EINVAL;
}
irq_cb.icp_hw_mgr_cb = NULL;
irq_cb.data = NULL;
- a5_dev_intf->hw_ops.process_cmd(
+ rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
+ if (rc)
+ CAM_ERR(CAM_ICP, "deregister irq call back failed");
fw_buf_info.kva = 0;
fw_buf_info.iova = 0;
fw_buf_info.len = 0;
- a5_dev_intf->hw_ops.process_cmd(
+ rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_SET_FW_BUF,
&fw_buf_info,
sizeof(fw_buf_info));
+ if (rc)
+ CAM_ERR(CAM_ICP, "nullify the fw buf failed");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
- for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
- ctx_data = &hw_mgr->ctx_data[i];
+ for (i = 0; i < CAM_ICP_CTX_MAX; i++)
cam_icp_mgr_release_ctx(hw_mgr, i);
- }
mutex_lock(&hw_mgr->hw_mgr_mutex);
- ipe1_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][1];
- if (ipe1_dev_intf)
- ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
- NULL, 0);
-
- ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
- bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
- a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
cam_hfi_deinit();
+ cam_icp_mgr_device_deinit(hw_mgr);
cam_icp_free_hfi_mem();
hw_mgr->fw_download = false;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
return 0;
}
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
{
+ int rc = 0;
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_intf *ipe0_dev_intf = NULL;
struct cam_hw_intf *ipe1_dev_intf = NULL;
struct cam_hw_intf *bps_dev_intf = NULL;
- struct cam_hw_info *a5_dev = NULL;
- struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
- struct cam_icp_a5_set_irq_cb irq_cb;
- struct cam_icp_a5_set_fw_buf_info fw_buf_info;
- struct hfi_mem_info hfi_mem;
- unsigned long rem_jiffies;
- int timeout = 5000;
- int rc = 0;
-
- if (!hw_mgr) {
- pr_err("hw_mgr is NULL\n");
- return -EINVAL;
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (hw_mgr->fw_download) {
- ICP_DBG("FW already downloaded\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return rc;
- }
-
- /* Allocate memory for FW and shared memory */
- rc = cam_icp_allocate_hfi_mem();
- if (rc < 0) {
- pr_err("hfi mem alloc failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return rc;
- }
a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
ipe0_dev_intf = hw_mgr->devices[CAM_ICP_DEV_IPE][0];
@@ -803,48 +765,59 @@
bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
- pr_err("dev intfs are wrong\n");
- goto dev_intf_fail;
+ CAM_ERR(CAM_ICP, "dev intfs are wrong");
+ return -EINVAL;
}
- a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
-
rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("a5 dev init failed\n");
+ if (rc)
goto a5_dev_init_failed;
- }
+
rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("bps dev init failed\n");
+ if (rc)
goto bps_dev_init_failed;
- }
+
rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
- if (rc < 0) {
- pr_err("ipe0 dev init failed\n");
+ if (rc)
goto ipe0_dev_init_failed;
- }
if (ipe1_dev_intf) {
rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
NULL, 0);
- if (rc < 0) {
- pr_err("ipe1 dev init failed\n");
+ if (rc)
goto ipe1_dev_init_failed;
- }
}
- /* Set IRQ callback */
+
+ return rc;
+ipe1_dev_init_failed:
+ ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+ipe0_dev_init_failed:
+ bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+bps_dev_init_failed:
+ a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_a5_set_irq_cb irq_cb;
+ struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
- irq_cb.data = hw_mgr_priv;
+ irq_cb.data = hw_mgr;
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SET_IRQ_CB,
&irq_cb, sizeof(irq_cb));
- if (rc < 0) {
- pr_err("CAM_ICP_A5_SET_IRQ_CB failed\n");
- rc = -EINVAL;
+ if (rc)
goto set_irq_failed;
- }
fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
@@ -853,12 +826,9 @@
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_SET_FW_BUF,
- &fw_buf_info,
- sizeof(fw_buf_info));
- if (rc < 0) {
- pr_err("CAM_ICP_A5_CMD_SET_FW_BUF failed\n");
+ &fw_buf_info, sizeof(fw_buf_info));
+ if (rc)
goto set_irq_failed;
- }
cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
@@ -866,38 +836,40 @@
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_FW_DOWNLOAD,
NULL, 0);
- if (rc < 0) {
- pr_err("FW download is failed\n");
- goto set_irq_failed;
- }
+ if (rc)
+ goto fw_download_failed;
+
+ return rc;
+fw_download_failed:
+ cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct hfi_mem_info hfi_mem;
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
- ICP_DBG("kva = %llX\n", hfi_mem.qtbl.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.qtbl.iova);
- ICP_DBG("length = %lld\n", hfi_mem.qtbl.len);
hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.cmd_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.cmd_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.cmd_q.len);
hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.msg_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.msg_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.msg_q.len);
hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
- ICP_DBG("kva = %llX\n", hfi_mem.dbg_q.kva);
- ICP_DBG("IOVA = %X\n", hfi_mem.dbg_q.iova);
- ICP_DBG("length = %lld\n", hfi_mem.dbg_q.len);
hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
@@ -906,64 +878,154 @@
hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
- rc = cam_hfi_init(0, &hfi_mem,
+ return cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
hw_mgr->a5_debug);
- if (rc < 0) {
- pr_err("hfi_init is failed\n");
- goto set_irq_failed;
- }
+}
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+ int rc;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ unsigned long rem_jiffies;
+ int timeout = 5000;
- ICP_DBG("Sending HFI init command\n");
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
reinit_completion(&hw_mgr->a5_complete);
-
+ CAM_DBG(CAM_ICP, "Sending HFI init command");
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_SEND_INIT,
NULL, 0);
+ if (rc)
+ return rc;
- ICP_DBG("Wait for INIT DONE Message\n");
rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
- msecs_to_jiffies((timeout)));
+ msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
- goto set_irq_failed;
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+ }
+ CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
+
+ return rc;
+}
+
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_hw_info *a5_dev = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ int rc = 0;
+
+ if (!hw_mgr) {
+ CAM_ERR(CAM_ICP, "hw_mgr is NULL");
+ return -EINVAL;
}
- ICP_DBG("Done Waiting for INIT DONE Message\n");
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (hw_mgr->fw_download) {
+ CAM_DBG(CAM_ICP, "FW already downloaded");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+ }
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+ rc = cam_icp_allocate_hfi_mem();
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto alloc_hfi_mem_failed;
+ }
+
+ rc = cam_icp_mgr_device_init(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto dev_init_fail;
+ }
+
+ rc = cam_icp_mgr_fw_download(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto fw_download_failed;
+ }
+
+ rc = cam_icp_mgr_hfi_init(hw_mgr);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ goto hfi_init_failed;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ rc = cam_icp_mgr_send_fw_init(hw_mgr);
+ if (rc)
+ goto fw_init_failed;
rc = a5_dev_intf->hw_ops.process_cmd(
a5_dev_intf->hw_priv,
CAM_ICP_A5_CMD_POWER_COLLAPSE,
NULL, 0);
- if (rc) {
- pr_err("icp power collapse failed\n");
- goto set_irq_failed;
- }
-
hw_mgr->fw_download = true;
hw_mgr->ctxt_cnt = 0;
- ICP_DBG("FW download done successfully\n");
-
+ CAM_DBG(CAM_ICP, "FW download done successfully");
+ if (!download_fw_args)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
return rc;
-set_irq_failed:
- if (ipe1_dev_intf)
- rc = ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
- NULL, 0);
-ipe1_dev_init_failed:
- rc = ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
-ipe0_dev_init_failed:
- rc = bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
-bps_dev_init_failed:
- rc = a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
-a5_dev_init_failed:
-dev_intf_fail:
+fw_init_failed:
+ cam_hfi_deinit();
+hfi_init_failed:
+ cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+ cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
cam_icp_free_hfi_mem();
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+alloc_hfi_mem_failed:
+ return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+ struct cam_hw_config_args *config_args,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.num_handles = config_args->num_out_map_entries;
+ buf_data.request_id = *(uint64_t *)config_args->priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, true, &buf_data);
+
+ return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_config_args *config_args)
+{
+ int rc = 0;
+ uint64_t request_id = 0;
+ struct crm_workq_task *task;
+ struct hfi_cmd_work_data *task_data;
+ struct hfi_cmd_ipebps_async *hfi_cmd;
+ struct cam_hw_update_entry *hw_update_entries;
+
+ request_id = *(uint64_t *)config_args->priv;
+ hw_update_entries = config_args->hw_update_entries;
+ CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "no empty task");
+ return -ENOMEM;
+ }
+
+ task_data = (struct hfi_cmd_work_data *)task->payload;
+ task_data->data = (void *)hw_update_entries->addr;
+ hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+ task_data->request_id = request_id;
+ task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_icp_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+
return rc;
}
@@ -972,83 +1034,48 @@
int rc = 0;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_hw_config_args *config_args = config_hw_args;
- uint32_t fw_handle;
- int ctx_id = 0;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
- int32_t request_id = 0;
- struct cam_hw_update_entry *hw_update_entries;
- struct crm_workq_task *task;
- struct hfi_cmd_work_data *task_data;
- struct hfi_cmd_ipebps_async *hfi_cmd;
if (!hw_mgr || !config_args) {
- pr_err("Invalid arguments %pK %pK\n",
+ CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
hw_mgr, config_args);
return -EINVAL;
}
if (!config_args->num_hw_update_entries) {
- pr_err("No hw update enteries are available\n");
+ CAM_ERR(CAM_ICP, "No hw update enteries are available");
return -EINVAL;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)config_args->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Fw handle to ctx mapping is failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
- }
-
- ctx_data = &hw_mgr->ctx_data[ctx_id];
+ ctx_data = config_args->ctxt_to_hw_map;
if (!ctx_data->in_use) {
- pr_err("ctx is not in use\n");
+ CAM_ERR(CAM_ICP, "ctx is not in use");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
+ rc = -EINVAL;
+ goto config_err;
}
-
- request_id = *(uint32_t *)config_args->priv;
- hw_update_entries = config_args->hw_update_entries;
- ICP_DBG("req_id = %d\n", request_id);
- ICP_DBG("fw_handle = %x req_id = %d %pK\n",
- fw_handle, request_id, config_args->priv);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no empty task\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -ENOMEM;
- }
-
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- task_data = (struct hfi_cmd_work_data *)task->payload;
- if (!task_data) {
- pr_err("task_data is NULL\n");
- return -EINVAL;
- }
+ rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+ if (rc)
+ goto config_err;
- task_data->data = (void *)hw_update_entries->addr;
- hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
- ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
- hfi_cmd->user_data2, hfi_cmd);
- task_data->request_id = request_id;
- task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
- task->process_cb = cam_icp_mgr_process_cmd;
- rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
- CRM_TASK_PRIORITY_0);
+ return 0;
+config_err:
+ cam_icp_mgr_handle_config_err(config_args, ctx_data);
return rc;
}
static int cam_icp_mgr_prepare_frame_process_cmd(
- struct cam_icp_hw_ctx_data *ctx_data,
- struct hfi_cmd_ipebps_async *hfi_cmd,
- uint32_t request_id,
- uint32_t fw_cmd_buf_iova_addr)
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct hfi_cmd_ipebps_async *hfi_cmd,
+ uint64_t request_id,
+ uint32_t fw_cmd_buf_iova_addr)
{
hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
else
hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
@@ -1058,276 +1085,264 @@
hfi_cmd->user_data1 = (uint64_t)ctx_data;
hfi_cmd->user_data2 = request_id;
- ICP_DBG("ctx_data : %pK, request_id :%d cmd_buf %x\n",
- (void *)ctx_data->context_priv,
- request_id, fw_cmd_buf_iova_addr);
+ CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
+ (void *)ctx_data->context_priv, request_id,
+ fw_cmd_buf_iova_addr);
return 0;
}
-static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
- void *prepare_hw_update_args)
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
{
- int rc = 0, i, j;
- int ctx_id = 0;
- uint32_t fw_handle;
- int32_t idx;
- uint64_t iova_addr;
- uint32_t fw_cmd_buf_iova_addr;
- size_t fw_cmd_buf_len;
- int32_t sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
- int32_t merged_sync_in_obj;
-
-
- struct cam_hw_prepare_update_args *prepare_args =
- prepare_hw_update_args;
- struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
- struct cam_icp_hw_ctx_data *ctx_data = NULL;
- struct cam_packet *packet = NULL;
- struct cam_cmd_buf_desc *cmd_desc = NULL;
- struct cam_buf_io_cfg *io_cfg_ptr = NULL;
- struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
-
- if ((!prepare_args) || (!hw_mgr)) {
- pr_err("Invalid args\n");
- return -EINVAL;
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)prepare_args->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Fw handle to ctx mapping is failed\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EINVAL;
- }
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
-
- ctx_data = &hw_mgr->ctx_data[ctx_id];
- if (!ctx_data->in_use) {
- pr_err("ctx is not in use\n");
- return -EINVAL;
- }
-
- packet = prepare_args->packet;
- if (!packet) {
- pr_err("received packet is NULL\n");
- return -EINVAL;
- }
-
- ICP_DBG("packet header : opcode = %x size = %x",
- packet->header.op_code,
- packet->header.size);
-
- ICP_DBG(" req_id = %x flags = %x\n",
- (uint32_t)packet->header.request_id,
- packet->header.flags);
-
- ICP_DBG("packet data : c_off = %x c_num = %x\n",
- packet->cmd_buf_offset,
- packet->num_cmd_buf);
-
- ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
- packet->io_configs_offset,
- packet->num_io_configs, packet->patch_offset,
- packet->num_patches, packet->kmd_cmd_buf_index,
- packet->kmd_cmd_buf_offset);
-
if (((packet->header.op_code & 0xff) !=
CAM_ICP_OPCODE_IPE_UPDATE) &&
((packet->header.op_code & 0xff) !=
CAM_ICP_OPCODE_BPS_UPDATE)) {
- pr_err("Invalid Opcode in pkt: %d\n",
+ CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
packet->header.op_code & 0xff);
return -EINVAL;
}
if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
- (!packet->num_io_configs)) {
- pr_err("wrong number of cmd/patch info: %u %u\n",
- packet->num_cmd_buf,
- packet->num_patches);
+ (!packet->num_io_configs)) {
+ CAM_ERR(CAM_ICP, "wrong number of cmd/patch info: %u %u",
+ packet->num_cmd_buf, packet->num_patches);
return -EINVAL;
}
- /* process command buffer descriptors */
+ return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_packet *packet,
+ uint32_t *fw_cmd_buf_iova_addr)
+{
+ int rc = 0;
+ uint64_t iova_addr;
+ size_t fw_cmd_buf_len;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
cmd_desc = (struct cam_cmd_buf_desc *)
- ((uint32_t *) &packet->payload +
- packet->cmd_buf_offset/4);
- ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
- (void *)packet, (void *)cmd_desc,
- sizeof(struct cam_cmd_buf_desc));
+ ((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
- if (rc < 0) {
- pr_err("unable to get src buf info for cmd buf: %x\n",
- hw_mgr->iommu_hdl);
- return rc;
- }
- ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
- (void *)iova_addr, fw_cmd_buf_len);
- fw_cmd_buf_iova_addr = iova_addr;
- fw_cmd_buf_iova_addr = (fw_cmd_buf_iova_addr + cmd_desc->offset);
-
- /* Update Buffer Address from handles and patch information */
- rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
if (rc) {
- pr_err("Patch processing failed\n");
+ CAM_ERR(CAM_ICP, "unable to get src buf info for cmd buf: %x",
+ hw_mgr->iommu_hdl);
return rc;
}
+ CAM_DBG(CAM_ICP, "cmd_buf desc cpu and iova address: %pK %zu",
+ (void *)iova_addr, fw_cmd_buf_len);
- /* process io config out descriptors */
+ *fw_cmd_buf_iova_addr = iova_addr;
+ *fw_cmd_buf_iova_addr = (*fw_cmd_buf_iova_addr + cmd_desc->offset);
+
+ return rc;
+}
+
+static void cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args)
+{
+ int i, j, k;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
packet->io_configs_offset/4);
- ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
- (void *)packet, (void *)io_cfg_ptr,
- sizeof(struct cam_buf_io_cfg));
-
prepare_args->num_out_map_entries = 0;
- for (i = 0, j = 0; i < packet->num_io_configs; i++) {
+ prepare_args->num_in_map_entries = 0;
+
+ for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
- ICP_DBG("direction is i : %d :%u\n",
- i, io_cfg_ptr[i].direction);
- ICP_DBG("fence is i : %d :%d\n",
- i, io_cfg_ptr[i].fence);
- continue;
+ prepare_args->in_map_entries[j++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_in_map_entries++;
+ } else {
+ prepare_args->out_map_entries[k++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_out_map_entries++;
}
-
- prepare_args->out_map_entries[j].sync_id = io_cfg_ptr[i].fence;
- prepare_args->out_map_entries[j++].resource_handle =
- io_cfg_ptr[i].fence;
- prepare_args->num_out_map_entries++;
- ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
+ CAM_DBG(CAM_ICP, "dir[%d]: %u, fence: %u",
+ i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
}
- ICP_DBG("out buf entries processing is done\n");
+}
- /* process io config in descriptors */
- for (i = 0, j = 0; i < packet->num_io_configs; i++) {
- if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
- sync_in_obj[j++] = io_cfg_ptr[i].fence;
- ICP_DBG(" in fence = %x index = %d\n",
- io_cfg_ptr[i].fence, i);
- }
- }
-
- if (j == 1)
- merged_sync_in_obj = sync_in_obj[j - 1];
- else if (j > 1) {
- rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
- if (rc < 0) {
- pr_err("unable to create in merged object: %d\n",
- rc);
- return rc;
- }
- } else {
- pr_err("no input fence provided %u\n", j);
- return -EINVAL;
- }
-
- prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
- prepare_args->in_map_entries[0].resource_handle =
- ctx_data->icp_dev_acquire_info.dev_type;
- prepare_args->num_in_map_entries = 1;
- ICP_DBG("out buf entries processing is done\n");
+static int cam_icp_mgr_update_hfi_frame_process(
+ struct cam_icp_hw_ctx_data *ctx_data,
+ struct cam_packet *packet,
+ struct cam_hw_prepare_update_args *prepare_args,
+ int32_t *idx)
+{
+ int32_t index;
mutex_lock(&ctx_data->hfi_frame_process.lock);
- idx = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
- ctx_data->hfi_frame_process.bits);
- if (idx < 0 || idx >= CAM_FRAME_CMD_MAX) {
- pr_err("request idx is wrong: %d\n", idx);
+ index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+ ctx_data->hfi_frame_process.bits);
+ if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+ CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
mutex_unlock(&ctx_data->hfi_frame_process.lock);
return -EINVAL;
}
- set_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ set_bit(index, ctx_data->hfi_frame_process.bitmap);
mutex_unlock(&ctx_data->hfi_frame_process.lock);
- ctx_data->hfi_frame_process.request_id[idx] = packet->header.request_id;
- ICP_DBG("slot[%d]: %d\n", idx,
- ctx_data->hfi_frame_process.request_id[idx]);
- ctx_data->hfi_frame_process.num_out_resources[idx] =
- prepare_args->num_out_map_entries;
- for (i = 0; i < prepare_args->num_out_map_entries; i++)
- ctx_data->hfi_frame_process.out_resource[idx][i] =
- prepare_args->out_map_entries[i].resource_handle;
+ ctx_data->hfi_frame_process.request_id[index] =
+ packet->header.request_id;
+
+ *idx = index;
+
+ return 0;
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc = 0;
+ int32_t idx;
+ uint32_t fw_cmd_buf_iova_addr;
+ struct cam_icp_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *prepare_args =
+ prepare_hw_update_args;
+
+ if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+ CAM_ERR(CAM_ICP, "Invalid args");
+ return -EINVAL;
+ }
+
+ ctx_data = prepare_args->ctxt_to_hw_map;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!ctx_data->in_use) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_ICP, "ctx is not in use");
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ packet = prepare_args->packet;
+
+ rc = cam_icp_mgr_pkt_validation(packet);
+ if (rc)
+ return rc;
+
+ rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+ &fw_cmd_buf_iova_addr);
+ if (rc)
+ return rc;
+
+ /* Update Buffer Address from handles and patch information */
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+ if (rc)
+ return rc;
+
+ cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+ packet, prepare_args);
+
+ rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+ prepare_args, &idx);
+ if (rc) {
+ if (prepare_args->in_map_entries[0].sync_id > 0)
+ cam_sync_destroy(
+ prepare_args->in_map_entries[0].sync_id);
+ return rc;
+ }
hfi_cmd = (struct hfi_cmd_ipebps_async *)
&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
cam_icp_mgr_prepare_frame_process_cmd(
- ctx_data, hfi_cmd, packet->header.request_id,
- fw_cmd_buf_iova_addr);
+ ctx_data, hfi_cmd, packet->header.request_id,
+ fw_cmd_buf_iova_addr);
prepare_args->num_hw_update_entries = 1;
prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
-
prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
- ICP_DBG("slot : %d, hfi_cmd : %pK, request : %d\n", idx,
- (void *)hfi_cmd,
- ctx_data->hfi_frame_process.request_id[idx]);
-
return rc;
}
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+ struct hfi_frame_process_info *hfi_frame_process;
+ int idx;
+
+ mutex_lock(&ctx_data->hfi_frame_process.lock);
+ hfi_frame_process = &ctx_data->hfi_frame_process;
+ for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+ if (!hfi_frame_process->request_id[idx])
+ continue;
+
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+ &hfi_frame_process->request_id[idx]);
+
+ /* now release memory for hfi frame process command */
+ hfi_frame_process->request_id[idx] = 0;
+ clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+ }
+ mutex_unlock(&ctx_data->hfi_frame_process.lock);
+
+ return 0;
+}
+
static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
{
int rc = 0;
int ctx_id = 0;
- int i;
- uint32_t fw_handle;
struct cam_hw_release_args *release_hw = release_hw_args;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
if (!release_hw || !hw_mgr) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
return -EINVAL;
}
- for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
- ctx_data = &hw_mgr->ctx_data[i];
- ICP_DBG("i = %d in_use = %u fw_handle = %u\n", i,
- ctx_data->in_use, ctx_data->fw_handle);
- }
-
- mutex_lock(&hw_mgr->hw_mgr_mutex);
- fw_handle = *(uint32_t *)release_hw->ctxt_to_hw_map;
- ctx_id = cam_icp_mgr_get_ctx_from_fw_handle(hw_mgr, fw_handle);
- if (ctx_id < 0) {
- pr_err("Invalid ctx id\n");
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = release_hw->ctxt_to_hw_map;
+ ctx_id = ctx_data->ctx_id;
+ if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+ CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
return -EINVAL;
}
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ if (release_hw->active_req) {
+ cam_icp_mgr_abort_handle(ctx_data);
+ cam_icp_mgr_send_abort_status(ctx_data);
+ }
rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
- if (rc)
- return -EINVAL;
+ if (!hw_mgr->ctxt_cnt)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
- ICP_DBG("fw handle %d\n", fw_handle);
return rc;
}
static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task, uint32_t io_buf_addr)
+ uint32_t io_buf_addr)
{
int rc = 0;
struct hfi_cmd_work_data *task_data;
struct hfi_cmd_ipebps_async ioconfig_cmd;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
+
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
- if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
+ if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
else
ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
reinit_completion(&ctx_data->wait_complete);
- ICP_DBG("Sending HFI_CMD_IPEBPS_ASYNC_COMMAND: opcode :%u\n",
- ioconfig_cmd.opcode);
+
ioconfig_cmd.num_fw_handles = 1;
ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
ioconfig_cmd.payload.indirect = io_buf_addr;
@@ -1338,36 +1353,39 @@
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
- ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
return rc;
}
static int cam_icp_mgr_create_handle(uint32_t dev_type,
- struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+ struct cam_icp_hw_ctx_data *ctx_data)
{
struct hfi_cmd_create_handle create_handle;
struct hfi_cmd_work_data *task_data;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
int rc = 0;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task)
+ return -ENOMEM;
+
create_handle.size = sizeof(struct hfi_cmd_create_handle);
create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
create_handle.handle_type = dev_type;
create_handle.user_data1 = (uint64_t)ctx_data;
- ICP_DBG("%x %x %x %pK\n", create_handle.size, create_handle.pkt_type,
- create_handle.handle_type, (void *)create_handle.user_data1);
- ICP_DBG("Sending HFI_CMD_IPEBPS_CREATE_HANDLE\n");
reinit_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1375,33 +1393,39 @@
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
return rc;
}
-static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data,
- struct crm_workq_task *task)
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
{
struct hfi_cmd_ping_pkt ping_pkt;
struct hfi_cmd_work_data *task_data;
unsigned long rem_jiffies;
int timeout = 5000;
+ struct crm_workq_task *task;
int rc = 0;
+ task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+ if (!task) {
+ CAM_ERR(CAM_ICP, "No free task to send ping command");
+ return -ENOMEM;
+ }
+
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
ping_pkt.pkt_type = HFI_CMD_SYS_PING;
ping_pkt.user_data = (uint64_t)ctx_data;
- ICP_DBG("Sending HFI_CMD_SYS_PING\n");
- ICP_DBG("%x %x %pK\n", ping_pkt.size, ping_pkt.pkt_type,
- (void *)ping_pkt.user_data);
init_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1409,221 +1433,211 @@
task_data->request_id = 0;
task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_icp_mgr_process_cmd;
- cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ return rc;
rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
msecs_to_jiffies((timeout)));
if (!rem_jiffies) {
rc = -ETIMEDOUT;
- pr_err("FW response timed out %d\n", rc);
+ CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
}
-
return rc;
}
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+ struct cam_hw_acquire_args *args,
+ struct cam_icp_hw_ctx_data *ctx_data)
+{
+ int i;
+ int acquire_size;
+ struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+ struct cam_icp_res_info *p_icp_out = NULL;
+
+ if (copy_from_user(&icp_dev_acquire_info,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_icp_acquire_dev_info)))
+ return -EFAULT;
+
+ if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
+ CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
+ icp_dev_acquire_info.num_out_res);
+ return -EINVAL;
+ }
+
+ acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+ (icp_dev_acquire_info.num_out_res *
+ sizeof(struct cam_icp_res_info));
+ ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+ if (!ctx_data->icp_dev_acquire_info)
+ return -ENOMEM;
+
+ if (copy_from_user(ctx_data->icp_dev_acquire_info,
+ (void __user *)args->acquire_info, acquire_size)) {
+ kfree(ctx_data->icp_dev_acquire_info);
+ ctx_data->icp_dev_acquire_info = NULL;
+ return -EFAULT;
+ }
+
+ CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+ ctx_data->icp_dev_acquire_info->dev_type,
+ ctx_data->icp_dev_acquire_info->in_res.format,
+ ctx_data->icp_dev_acquire_info->in_res.width,
+ ctx_data->icp_dev_acquire_info->in_res.height,
+ ctx_data->icp_dev_acquire_info->in_res.fps,
+ ctx_data->icp_dev_acquire_info->num_out_res,
+ ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+ p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+ for (i = 0; i < ctx_data->icp_dev_acquire_info->num_out_res; i++)
+ CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
+ p_icp_out[i].format,
+ p_icp_out[i].width,
+ p_icp_out[i].height,
+ p_icp_out[i].fps);
+
+ return 0;
+}
+
static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
{
- int rc = 0, i, bitmap_size = 0, tmp_size;
+ int rc = 0, bitmap_size = 0;
uint32_t ctx_id = 0;
uint64_t io_buf_addr;
size_t io_buf_size;
struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_icp_hw_ctx_data *ctx_data = NULL;
struct cam_hw_acquire_args *args = acquire_hw_args;
- struct cam_icp_acquire_dev_info icp_dev_acquire_info;
- struct cam_icp_res_info *p_icp_out = NULL;
- struct crm_workq_task *task;
- uint8_t *tmp_acquire;
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
if ((!hw_mgr_priv) || (!acquire_hw_args)) {
- pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
acquire_hw_args);
return -EINVAL;
}
if (args->num_acq > 1) {
- pr_err("number of resources are wrong: %u\n", args->num_acq);
+ CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+ args->num_acq);
return -EINVAL;
}
- if (copy_from_user(&icp_dev_acquire_info,
- (void __user *)args->acquire_info,
- sizeof(icp_dev_acquire_info)))
- return -EFAULT;
-
- if (icp_dev_acquire_info.num_out_res > ICP_IPE_MAX_OUTPUT_SUPPORTED) {
- pr_err("num of out resources exceeding : %u\n",
- icp_dev_acquire_info.num_out_res);
- return -EINVAL;
- }
-
- ICP_DBG("%x %x %x %x %x %x %x\n",
- icp_dev_acquire_info.dev_type,
- icp_dev_acquire_info.in_res.format,
- icp_dev_acquire_info.in_res.width,
- icp_dev_acquire_info.in_res.height,
- icp_dev_acquire_info.in_res.fps,
- icp_dev_acquire_info.num_out_res,
- icp_dev_acquire_info.scratch_mem_size);
-
- tmp_size = sizeof(icp_dev_acquire_info) +
- icp_dev_acquire_info.num_out_res *
- sizeof(struct cam_icp_res_info);
-
- tmp_acquire = kzalloc(tmp_size, GFP_KERNEL);
- if (!tmp_acquire)
- return -EINVAL;
-
- if (copy_from_user(tmp_acquire,
- (void __user *)args->acquire_info,
- tmp_size)) {
- kfree(tmp_acquire);
- return -EFAULT;
- }
-
- p_icp_out =
- (struct cam_icp_res_info *)(tmp_acquire +
- sizeof(icp_dev_acquire_info)-
- sizeof(struct cam_icp_res_info));
- ICP_DBG("out[0] %x %x %x %x\n",
- p_icp_out[0].format,
- p_icp_out[0].width,
- p_icp_out[0].height,
- p_icp_out[0].fps);
-
- ICP_DBG("out[1] %x %x %x %x\n",
- p_icp_out[1].format,
- p_icp_out[1].width,
- p_icp_out[1].height,
- p_icp_out[1].fps);
-
mutex_lock(&hw_mgr->hw_mgr_mutex);
ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
if (ctx_id >= CAM_ICP_CTX_MAX) {
- pr_err("No free ctx space in hw_mgr\n");
- kfree(tmp_acquire);
+ CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- return -EFAULT;
+ return -ENOSPC;
}
-
- /* Fill ctx with acquire info */
ctx_data = &hw_mgr->ctx_data[ctx_id];
+ ctx_data->ctx_id = ctx_id;
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- /* Fill ctx with acquire info */
mutex_lock(&ctx_data->ctx_mutex);
- ctx_data->icp_dev_acquire_info = icp_dev_acquire_info;
- for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
- ctx_data->icp_out_acquire_info[i] = p_icp_out[i];
+ rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+ if (rc) {
+ mutex_unlock(&ctx_data->ctx_mutex);
+ goto acquire_info_failed;
+ }
+ icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
mutex_unlock(&ctx_data->ctx_mutex);
/* Get IOCONFIG command info */
- if (ctx_data->icp_dev_acquire_info.secure_mode)
+ if (icp_dev_acquire_info->secure_mode)
rc = cam_mem_get_io_buf(
- icp_dev_acquire_info.io_config_cmd_handle,
+ icp_dev_acquire_info->io_config_cmd_handle,
hw_mgr->iommu_sec_hdl,
&io_buf_addr, &io_buf_size);
else
rc = cam_mem_get_io_buf(
- icp_dev_acquire_info.io_config_cmd_handle,
+ icp_dev_acquire_info->io_config_cmd_handle,
hw_mgr->iommu_hdl,
&io_buf_addr, &io_buf_size);
- ICP_DBG("io_config_cmd_handle : %d\n",
- icp_dev_acquire_info.io_config_cmd_handle);
- ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
- ICP_DBG("io_buf_size : %zu\n", io_buf_size);
- if (rc < 0) {
- pr_err("unable to get src buf info from io desc\n");
- goto cmd_cpu_buf_failed;
- }
-
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no free task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_create_task_failed;
- }
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-
- rc = cam_icp_mgr_send_ping(ctx_data, task);
if (rc) {
- pr_err("ping ack not received\n");
- goto create_handle_failed;
+ CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
+ goto get_io_buf_failed;
}
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no free task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_create_task_failed;
- }
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- /* Send create fw handle command */
- rc = cam_icp_mgr_create_handle(icp_dev_acquire_info.dev_type,
- ctx_data, task);
+ CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+ icp_dev_acquire_info->io_config_cmd_handle,
+ (void *)io_buf_addr, io_buf_size);
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->ctxt_cnt) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = cam_icp_mgr_download_fw(hw_mgr, ctx_data);
+ if (rc)
+ goto get_io_buf_failed;
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ rc = cam_icp_mgr_send_ping(ctx_data);
if (rc) {
- pr_err("create handle failed\n");
+ CAM_ERR(CAM_ICP, "ping ack not received");
+ goto send_ping_failed;
+ }
+
+ rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+ ctx_data);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "create handle failed");
goto create_handle_failed;
}
- /* Send IOCONFIG command */
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- if (!task) {
- pr_err("no empty task\n");
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- goto get_ioconfig_task_failed;
- }
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
-
- rc = cam_icp_mgr_send_config_io(ctx_data, task, io_buf_addr);
+ rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
if (rc) {
- pr_err("IO Config command failed\n");
+ CAM_ERR(CAM_ICP, "IO Config command failed");
goto ioconfig_failed;
}
mutex_lock(&ctx_data->ctx_mutex);
ctx_data->context_priv = args->context_data;
- args->ctxt_to_hw_map = &ctx_data->fw_handle;
+ args->ctxt_to_hw_map = ctx_data;
bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
ctx_data->hfi_frame_process.bitmap =
- kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+ kzalloc(bitmap_size, GFP_KERNEL);
+ if (!ctx_data->hfi_frame_process.bitmap)
+ goto ioconfig_failed;
ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
mutex_init(&ctx_data->hfi_frame_process.lock);
+ hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+ icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
mutex_unlock(&ctx_data->ctx_mutex);
- hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
-
- icp_dev_acquire_info.scratch_mem_size = ctx_data->scratch_mem_size;
if (copy_to_user((void __user *)args->acquire_info,
- &icp_dev_acquire_info,
- sizeof(icp_dev_acquire_info)))
+ icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
goto copy_to_user_failed;
- ICP_DBG("scratch mem size = %x fw_handle = %x\n",
- (unsigned int)icp_dev_acquire_info.scratch_mem_size,
+ CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
+ (unsigned int)icp_dev_acquire_info->scratch_mem_size,
(unsigned int)ctx_data->fw_handle);
- kfree(tmp_acquire);
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_mgr->ctxt_cnt++;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
return 0;
copy_to_user_failed:
+ kfree(ctx_data->hfi_frame_process.bitmap);
+ ctx_data->hfi_frame_process.bitmap = NULL;
ioconfig_failed:
-get_ioconfig_task_failed:
- mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
- task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
- mutex_unlock(&icp_hw_mgr.hw_mgr_mutex);
- if (task)
- cam_icp_mgr_destroy_handle(ctx_data, task);
+ cam_icp_mgr_destroy_handle(ctx_data);
+send_ping_failed:
create_handle_failed:
-get_create_task_failed:
-cmd_cpu_buf_failed:
- cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
- kfree(tmp_acquire);
+ if (!hw_mgr->ctxt_cnt)
+ cam_icp_mgr_hw_close(hw_mgr, NULL);
+get_io_buf_failed:
+ kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+ hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+ cam_icp_mgr_put_ctx(ctx_data);
return rc;
}
@@ -1634,31 +1648,29 @@
struct cam_query_cap_cmd *query_cap = hw_caps_args;
if ((!hw_mgr_priv) || (!hw_caps_args)) {
- pr_err("Invalid params: %pK %pK\n", hw_mgr_priv, hw_caps_args);
+ CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+ hw_mgr_priv, hw_caps_args);
return -EINVAL;
}
if (copy_from_user(&icp_hw_mgr.icp_caps,
- (void __user *)query_cap->caps_handle,
- sizeof(struct cam_icp_query_cap_cmd))) {
- pr_err("copy_from_user failed\n");
+ (void __user *)query_cap->caps_handle,
+ sizeof(struct cam_icp_query_cap_cmd))) {
+ CAM_ERR(CAM_ICP, "copy_from_user failed");
return -EFAULT;
}
mutex_lock(&hw_mgr->hw_mgr_mutex);
rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
- if (rc < 0) {
- pr_err("Unable to get caps from HFI: %d\n", rc);
+ if (rc)
goto hfi_get_caps_fail;
- }
icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
if (copy_to_user((void __user *)query_cap->caps_handle,
- &icp_hw_mgr.icp_caps,
- sizeof(struct cam_icp_query_cap_cmd))) {
- pr_err("copy_to_user failed\n");
+ &icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
+ CAM_ERR(CAM_ICP, "copy_to_user failed");
rc = -EFAULT;
goto hfi_get_caps_fail;
}
@@ -1672,7 +1684,6 @@
{
int count, i, rc = 0;
uint32_t num_dev;
- uint32_t num_ipe_dev;
const char *name = NULL;
struct device_node *child_node = NULL;
struct platform_device *child_pdev = NULL;
@@ -1682,7 +1693,7 @@
hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
if (!of_node || !hw_mgr_intf) {
- pr_err("Invalid args of_node %pK hw_mgr %pK\n",
+ CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
of_node, hw_mgr_intf);
return -EINVAL;
}
@@ -1705,15 +1716,15 @@
/* Get number of device objects */
count = of_property_count_strings(of_node, "compat-hw-name");
if (!count) {
- pr_err("no compat hw found in dev tree, count = %d\n", count);
+ CAM_ERR(CAM_ICP, "no compat hw found, count = %d", count);
rc = -EINVAL;
goto num_dev_failed;
}
/* Get number of a5 device nodes and a5 mem allocation */
rc = of_property_read_u32(of_node, "num-a5", &num_dev);
- if (rc < 0) {
- pr_err("getting num of a5 failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting num of a5 failed");
goto num_dev_failed;
}
@@ -1725,14 +1736,14 @@
}
/* Get number of ipe device nodes and ipe mem allocation */
- rc = of_property_read_u32(of_node, "num-ipe", &num_ipe_dev);
- if (rc < 0) {
- pr_err("getting number of ipe dev nodes failed\n");
+ rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
goto num_ipe_failed;
}
icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
- sizeof(struct cam_hw_intf *) * num_ipe_dev, GFP_KERNEL);
+ sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
rc = -ENOMEM;
goto num_ipe_failed;
@@ -1740,8 +1751,8 @@
/* Get number of bps device nodes and bps mem allocation */
rc = of_property_read_u32(of_node, "num-bps", &num_dev);
- if (rc < 0) {
- pr_err("read num bps devices failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "read num bps devices failed");
goto num_bps_failed;
}
icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kzalloc(
@@ -1753,22 +1764,22 @@
for (i = 0; i < count; i++) {
rc = of_property_read_string_index(of_node, "compat-hw-name",
- i, &name);
- if (rc < 0) {
- pr_err("getting dev object name failed\n");
+ i, &name);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "getting dev object name failed");
goto compat_hw_name_failed;
}
child_node = of_find_node_by_name(NULL, name);
if (!child_node) {
- pr_err("error! Cannot find node in dtsi %s\n", name);
+ CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
rc = -ENODEV;
goto compat_hw_name_failed;
}
child_pdev = of_find_device_by_node(child_node);
if (!child_pdev) {
- pr_err("failed to find device on bus %s\n",
+ CAM_ERR(CAM_ICP, "failed to find device on bus %s",
child_node->name);
rc = -ENODEV;
of_node_put(child_node);
@@ -1776,15 +1787,12 @@
}
child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
- child_pdev);
+ child_pdev);
if (!child_dev_intf) {
- pr_err("no child device\n");
+ CAM_ERR(CAM_ICP, "no child device");
of_node_put(child_node);
goto compat_hw_name_failed;
}
- ICP_DBG("child_intf %pK\n", child_dev_intf);
- ICP_DBG("child type %d index %d\n", child_dev_intf->hw_type,
- child_dev_intf->hw_idx);
icp_hw_mgr.devices[child_dev_intf->hw_type]
[child_dev_intf->hw_idx] = child_dev_intf;
@@ -1793,29 +1801,28 @@
}
rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
- if (rc < 0) {
- pr_err("icp get iommu handle failed\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "icp get iommu handle failed: %d", rc);
goto compat_hw_name_failed;
}
- pr_err("mmu handle :%d\n", icp_hw_mgr.iommu_hdl);
rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
- if (rc < 0) {
- pr_err("icp attach failed: %d\n", rc);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "icp attach failed: %d", rc);
goto icp_attach_failed;
}
rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
- if (rc < 0) {
- pr_err("unable to create a worker\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to create a worker");
goto cmd_work_failed;
}
rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
- if (rc < 0) {
- pr_err("unable to create a worker\n");
+ if (rc) {
+ CAM_ERR(CAM_ICP, "unable to create a worker");
goto msg_work_failed;
}
@@ -1856,6 +1863,7 @@
cmd_work_failed:
cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
icp_attach_failed:
+ cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
icp_hw_mgr.iommu_hdl = 0;
compat_hw_name_failed:
kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
similarity index 93%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 32d796a..6fa32fa 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -39,8 +39,8 @@
#define ICP_PACKET_SIZE 0
#define ICP_PACKET_TYPE 1
-#define ICP_PACKET_IPCODE 2
-#define ICP_IPE_MAX_OUTPUT_SUPPORTED 6
+#define ICP_PACKET_OPCODE 2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
/**
* struct icp_hfi_mem_info
@@ -100,7 +100,7 @@
void *bitmap;
size_t bits;
struct mutex lock;
- int32_t request_id[CAM_FRAME_CMD_MAX];
+ uint64_t request_id[CAM_FRAME_CMD_MAX];
uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
};
@@ -113,13 +113,13 @@
* @scratch_mem_size: Scratch memory size
* @acquire_dev_cmd: Acquire command
* @icp_dev_acquire_info: Acquire device info
- * @icp_out_acquire_info: Acquire out resource info
* @ctxt_event_cb: Context callback function
* @in_use: Flag for context usage
* @role: Role of a context in case of chaining
* @chain_ctx: Peer context
* @hfi_frame_process: Frame process command
* @wait_complete: Completion info
+ * @ctx_id: Context Id
* @temp_payload: Payload for destroy handle data
*/
struct cam_icp_hw_ctx_data {
@@ -128,15 +128,15 @@
uint32_t fw_handle;
uint32_t scratch_mem_size;
struct cam_acquire_dev_cmd acquire_dev_cmd;
- struct cam_icp_acquire_dev_info icp_dev_acquire_info;
- struct cam_icp_res_info icp_out_acquire_info[CAM_MAX_OUT_RES];
+ struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
cam_hw_event_cb_func ctxt_event_cb;
- uint32_t in_use;
+ bool in_use;
uint32_t role;
struct cam_icp_hw_ctx_data *chain_ctx;
struct hfi_frame_process_info hfi_frame_process;
struct completion wait_complete;
struct ipe_bps_destroy temp_payload;
+ uint32_t ctx_id;
};
/**
@@ -183,4 +183,6 @@
bool a5_debug;
};
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args);
#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/include/cam_icp_hw_mgr_intf.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 0000000..f904ea4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
similarity index 85%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 07f63d2..b7b3d7b 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "IPE-CORE %s:%d " fmt, __func__, __LINE__
-
#include <linux/of.h>
#include <linux/debugfs.h>
#include <linux/videodev2.h>
@@ -30,6 +28,7 @@
#include "cam_ipe_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
struct cam_icp_cpas_vote *cpas_vote)
@@ -44,7 +43,7 @@
&cpas_vote->axi_vote);
if (rc)
- pr_err("cpas vote is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
return rc;
}
@@ -59,7 +58,7 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
@@ -67,28 +66,29 @@
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
- cpas_vote.ahb_vote.vote.level = CAM_TURBO_VOTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
if (rc) {
- pr_err("cpass start failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
return rc;
}
core_info->cpas_start = true;
rc = cam_ipe_enable_soc_resources(soc_info);
if (rc) {
- pr_err("soc enable is failed : %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -105,24 +105,25 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid cam_dev_info\n");
+ CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
return -EINVAL;
}
soc_info = &ipe_dev->soc_info;
core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
if ((!soc_info) || (!core_info)) {
- pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+ CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
return -EINVAL;
}
rc = cam_ipe_disable_soc_resources(soc_info);
if (rc)
- pr_err("soc disable is failed : %d\n", rc);
+ CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
if (core_info->cpas_start) {
if (cam_cpas_stop(core_info->cpas_handle))
- pr_err("cpas stop is failed\n");
+ CAM_ERR(CAM_ICP, "cpas stop is failed");
else
core_info->cpas_start = false;
}
@@ -140,12 +141,12 @@
int rc = 0;
if (!device_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ICP, "Invalid arguments");
return -EINVAL;
}
if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
- pr_err("Invalid command : %x\n", cmd_type);
+ CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_core.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
similarity index 92%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
index 0efb1de..d95246f 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -23,9 +23,7 @@
#include "cam_icp_hw_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_ipe_device_hw_info cam_ipe_hw_info = {
.reserved = 0,
@@ -47,7 +45,7 @@
rc = cam_cpas_register_client(&cpas_register_params);
if (rc < 0) {
- pr_err("cam_cpas_register_client is failed: %d\n", rc);
+ CAM_ERR(CAM_ICP, "failed: %d", rc);
return rc;
}
core_info->cpas_handle = cpas_register_params.client_handle;
@@ -83,7 +81,7 @@
ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
- pr_debug("%s: type %d index %d\n", __func__,
+ CAM_DBG(CAM_ICP, "type %d index %d",
ipe_dev_intf->hw_type,
ipe_dev_intf->hw_idx);
@@ -101,7 +99,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_debug("%s: No ipe hardware info\n", __func__);
+ CAM_DBG(CAM_ICP, "No ipe hardware info");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
@@ -114,14 +112,14 @@
rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
ipe_dev);
if (rc < 0) {
- pr_err("%s: failed to init_soc\n", __func__);
+ CAM_ERR(CAM_ICP, "failed to init_soc");
kfree(ipe_dev->core_info);
kfree(ipe_dev);
kfree(ipe_dev_intf);
return rc;
}
- pr_debug("cam_ipe_init_soc_resources : %pK\n",
+ CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
(void *)&ipe_dev->soc_info);
rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
core_info, ipe_dev_intf->hw_idx);
@@ -136,7 +134,7 @@
spin_lock_init(&ipe_dev->hw_lock);
init_completion(&ipe_dev->hw_complete);
- pr_debug("%s: IPE%d probe successful\n", __func__,
+ CAM_DBG(CAM_ICP, "IPE%d probe successful",
ipe_dev_intf->hw_idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
similarity index 86%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 527e716..26dd6d2 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -18,9 +18,7 @@
#include <media/cam_icp.h>
#include "ipe_soc.h"
#include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -28,7 +26,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0)
- pr_err("get ipe dt prop is failed\n");
+ CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
return rc;
}
@@ -66,9 +64,10 @@
{
int rc = 0;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, false);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, false);
if (rc) {
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
@@ -81,7 +80,7 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
if (rc)
- pr_err("%s: enable platform failed\n", __func__);
+ CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
similarity index 100%
rename from drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/ipe_soc.h
rename to drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
index 77ad6fc..4851535 100644
--- a/drivers/media/platform/msm/camera/cam_isp/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -1,8 +1,9 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += isp_hw_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index a9064fa..a6f60f5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -14,14 +14,52 @@
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
#include "cam_isp_context.h"
#include "cam_isp_log.h"
#include "cam_mem_mgr.h"
#include "cam_sync_api.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
+{
+ uint64_t ts = 0;
+
+ if (!evt_data)
+ return 0;
+
+ switch (evt_id) {
+ case CAM_ISP_HW_EVENT_ERROR:
+ ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_SOF:
+ ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_REG_UPDATE:
+ ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EPOCH:
+ ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_EOF:
+ ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
+ timestamp;
+ break;
+ case CAM_ISP_HW_EVENT_DONE:
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
+ }
+
+ return ts;
+}
static int __cam_isp_ctx_handle_buf_done_in_activated_state(
struct cam_isp_context *ctx_isp,
@@ -35,14 +73,17 @@
struct cam_context *ctx = ctx_isp->base;
if (list_empty(&ctx->active_req_list)) {
- CDBG("Buf done with no active request!\n");
+ CAM_DBG(CAM_ISP, "Buf done with no active request!");
goto end;
}
- CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+ CAM_DBG(CAM_ISP, "Enter with bubble_state %d", bubble_state);
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
+
+ trace_cam_buf_done("ISP", ctx, req);
+
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
for (i = 0; i < done->num_handles; i++) {
for (j = 0; j < req_isp->num_fence_map_out; j++) {
@@ -52,29 +93,30 @@
}
if (j == req_isp->num_fence_map_out) {
- pr_err("Can not find matching lane handle 0x%x!\n",
+ CAM_ERR(CAM_ISP,
+ "Can not find matching lane handle 0x%x!",
done->resource_handle[i]);
rc = -EINVAL;
continue;
}
if (!bubble_state) {
- CDBG("%s: Sync with success: fd 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "Sync with success: fd 0x%x",
req_isp->fence_map_out[j].sync_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS);
if (rc)
- pr_err("%s: Sync failed with rc = %d\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+ rc);
} else if (!req_isp->bubble_report) {
- CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "Sync with failure: fd 0x%x",
req_isp->fence_map_out[j].sync_id);
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc)
- pr_err("%s: Sync failed with rc = %d\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+ rc);
} else {
/*
* Ignore the buffer done if bubble detect is on
@@ -87,7 +129,7 @@
continue;
}
- CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+ CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x",
req->request_id,
req_isp->fence_map_out[j].sync_id);
req_isp->num_acked++;
@@ -97,12 +139,42 @@
if (req_isp->num_acked == req_isp->num_fence_map_out) {
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ CAM_DBG(CAM_ISP,
+ "Move active request %lld to free list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
}
end:
return rc;
}
+static void __cam_isp_ctx_send_sof_timestamp(
+ struct cam_isp_context *ctx_isp, uint64_t request_id,
+ uint32_t sof_event_status)
+{
+ struct cam_req_mgr_message req_msg;
+
+ req_msg.session_hdl = ctx_isp->base->session_hdl;
+ req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+ req_msg.u.frame_msg.request_id = request_id;
+ req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
+ req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+ req_msg.u.frame_msg.sof_status = sof_event_status;
+
+ CAM_DBG(CAM_ISP,
+ "request id:%lld frame number:%lld SOF time stamp:0x%llx",
+ request_id, ctx_isp->frame_id,
+ ctx_isp->sof_timestamp_val);
+ CAM_DBG(CAM_ISP, " sof status:%d", sof_event_status);
+
+ if (cam_req_mgr_notify_frame_message(&req_msg,
+ V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_ISP,
+ "Error in notifying the sof time for req id:%lld",
+ request_id);
+}
+
static int __cam_isp_ctx_reg_upd_in_activated_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -112,7 +184,7 @@
struct cam_isp_ctx_req *req_isp;
if (list_empty(&ctx->pending_req_list)) {
- pr_err("Reg upd ack with no pending request\n");
+ CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
goto end;
}
req = list_first_entry(&ctx->pending_req_list,
@@ -121,12 +193,16 @@
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (req_isp->num_fence_map_out != 0) {
- CDBG("%s: move request %lld to active list\n", __func__,
- req->request_id);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
} else {
/* no io config, so the request is completed. */
list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active request %lld to free list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
}
/*
@@ -134,7 +210,7 @@
* state so change substate here.
*/
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
- CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
end:
return rc;
@@ -143,34 +219,61 @@
static int __cam_isp_ctx_notify_sof_in_actived_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
- int rc = 0;
struct cam_req_mgr_sof_notify notify;
struct cam_context *ctx = ctx_isp->base;
+ struct cam_ctx_request *req;
+ uint64_t request_id = 0;
- /* notify reqmgr with sof signal */
- if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
+ ctx_isp->active_req_cnt <= 2) {
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.frame_id = ctx_isp->frame_id;
ctx->ctx_crm_intf->notify_sof(¬ify);
- CDBG("%s: Notify CRM SOF frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
ctx_isp->frame_id);
+
+ list_for_each_entry(req, &ctx->active_req_list, list) {
+ if (req->request_id > ctx_isp->reported_req_id) {
+ request_id = req->request_id;
+ ctx_isp->reported_req_id = request_id;
+ break;
+ }
+ }
+
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
} else {
- pr_err("%s: Can not notify SOF to CRM\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
}
- return rc;
+ return 0;
}
-static int __cam_isp_ctx_sof_in_sof(struct cam_isp_context *ctx_isp,
- void *evt_data)
+static int __cam_isp_ctx_sof_in_activated_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
{
int rc = 0;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
- CDBG("%s: Enter\n", __func__);
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
return rc;
}
@@ -184,7 +287,7 @@
struct cam_context *ctx = ctx_isp->base;
if (ctx->state != CAM_CTX_ACTIVATED) {
- CDBG("%s: invalid RUP\n", __func__);
+ CAM_DBG(CAM_ISP, "invalid RUP");
goto end;
}
@@ -197,11 +300,16 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- if (req_isp->num_fence_map_out == req_isp->num_acked)
+ if (req_isp->num_fence_map_out == req_isp->num_acked) {
list_add_tail(&req->list, &ctx->free_req_list);
- else {
+ } else {
/* need to handle the buf done */
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP,
+ "move request %lld to active list(cnt = %d)",
+ req->request_id,
+ ctx_isp->active_req_cnt);
ctx_isp->substate_activated =
CAM_ISP_CTX_ACTIVATED_EPOCH;
}
@@ -213,18 +321,23 @@
static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
void *evt_data)
{
- int rc = 0;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
if (list_empty(&ctx->pending_req_list)) {
/*
* If no pending req in epoch, this is an error case.
* The recovery is to go back to sof state
*/
- pr_err("%s: No pending request\n", __func__);
+ CAM_ERR(CAM_ISP, "No pending request");
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
goto end;
}
@@ -232,7 +345,7 @@
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
- CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
@@ -242,7 +355,7 @@
notify.req_id = req->request_id;
notify.error = CRM_KMD_ERR_BUBBLE;
ctx->ctx_crm_intf->notify_err(¬ify);
- CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
/*
@@ -251,14 +364,21 @@
*/
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
- CDBG("%s: next substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "next substate %d",
ctx_isp->substate_activated);
end:
- return rc;
+ return 0;
}
@@ -278,15 +398,23 @@
void *evt_data)
{
int rc = 0;
- struct cam_context *ctx = ctx_isp->base;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+
if (list_empty(&ctx->active_req_list))
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
else
- CDBG("%s: Still need to wait for the buf done\n", __func__);
- CDBG("%s: next substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+ CAM_DBG(CAM_ISP, "next substate %d",
ctx_isp->substate_activated);
return rc;
@@ -303,14 +431,6 @@
return rc;
}
-
-static int __cam_isp_ctx_sof_in_bubble(struct cam_isp_context *ctx_isp,
- void *evt_data)
-{
- ctx_isp->frame_id++;
- return 0;
-}
-
static int __cam_isp_ctx_buf_done_in_bubble(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -322,20 +442,13 @@
return rc;
}
-static int __cam_isp_ctx_sof_in_bubble_applied(
- struct cam_isp_context *ctx_isp, void *evt_data)
-{
- ctx_isp->frame_id++;
- return 0;
-}
-
-
static int __cam_isp_ctx_epoch_in_bubble_applied(
struct cam_isp_context *ctx_isp, void *evt_data)
{
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_context *ctx = ctx_isp->base;
+ uint64_t request_id = 0;
/*
* This means we missed the reg upd ack. So we need to
@@ -347,7 +460,10 @@
* If no pending req in epoch, this is an error case.
* Just go back to the bubble state.
*/
- pr_err("%s: No pending request.\n", __func__);
+ CAM_ERR(CAM_ISP, "No pending request.");
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
goto end;
}
@@ -365,7 +481,7 @@
notify.req_id = req->request_id;
notify.error = CRM_KMD_ERR_BUBBLE;
ctx->ctx_crm_intf->notify_err(¬ify);
- CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
/*
@@ -374,11 +490,18 @@
*/
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
- CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
end:
return 0;
}
@@ -407,7 +530,7 @@
uint32_t error_type = error_event_data->error_type;
- CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
notify.error = CRM_KMD_ERR_FATAL;
@@ -419,7 +542,7 @@
*/
if (list_empty(&ctx->active_req_list)) {
- pr_err("handling error with no active request!\n");
+ CAM_ERR(CAM_ISP, "handling error with no active request");
rc = -EINVAL;
goto end;
}
@@ -433,10 +556,10 @@
notify.req_id = req->request_id;
ctx->ctx_crm_intf->notify_err(¬ify);
- pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+ CAM_ERR(CAM_ISP, "Notify CRM about ERROR frame %lld",
ctx_isp->frame_id);
} else {
- pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not notify ERRROR to CRM");
rc = -EFAULT;
}
@@ -445,7 +568,7 @@
/* might need to check if active list is empty */
end:
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -455,7 +578,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_sof,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_sof,
__cam_isp_ctx_notify_sof_in_actived_state,
NULL,
@@ -466,7 +589,7 @@
{
.irq_ops = {
__cam_isp_ctx_handle_error,
- __cam_isp_ctx_sof_in_sof,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_applied,
NULL,
@@ -488,7 +611,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_bubble,
+ __cam_isp_ctx_sof_in_activated_state,
NULL,
__cam_isp_ctx_notify_sof_in_actived_state,
NULL,
@@ -499,7 +622,7 @@
{
.irq_ops = {
NULL,
- __cam_isp_ctx_sof_in_bubble_applied,
+ __cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_bubble_applied,
NULL,
@@ -516,15 +639,14 @@
uint32_t next_state)
{
int rc = 0;
- int cnt = 0;
struct cam_ctx_request *req;
struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp;
struct cam_hw_config_args cfg;
if (list_empty(&ctx->pending_req_list)) {
- pr_err("%s: No available request for Apply id %lld\n",
- __func__, apply->request_id);
+ CAM_ERR(CAM_ISP, "No available request for Apply id %lld",
+ apply->request_id);
rc = -EFAULT;
goto end;
}
@@ -535,13 +657,13 @@
* The maximum number of request allowed to be outstanding is 2.
*
*/
- list_for_each_entry(req, &ctx->active_req_list, list) {
- if (++cnt > 2) {
- pr_err("%s: Apply failed due to pipeline congestion\n",
- __func__);
- rc = -EFAULT;
- goto end;
- }
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ if (ctx_isp->active_req_cnt >= 2) {
+ CAM_DBG(CAM_ISP,
+ "Reject apply request due to congestion(cnt = %d)",
+ ctx_isp->active_req_cnt);
+ rc = -EFAULT;
+ goto end;
}
req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
@@ -556,9 +678,8 @@
goto end;
}
- CDBG("%s: Apply request %lld\n", __func__, req->request_id);
+ CAM_DBG(CAM_ISP, "Apply request %lld", req->request_id);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
req_isp->bubble_report = apply->report_if_bubble;
@@ -568,11 +689,11 @@
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc) {
- pr_err("%s: Can not apply the configuration\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not apply the configuration");
} else {
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = next_state;
- CDBG("%s: new state %d\n", __func__, next_state);
+ CAM_DBG(CAM_ISP, "new state %d", next_state);
spin_unlock_bh(&ctx->lock);
}
end:
@@ -586,11 +707,11 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
@@ -602,11 +723,11 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
@@ -618,15 +739,106 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: current substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "current substate %d",
ctx_isp->substate_activated);
rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
- CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
return rc;
}
+static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
+ struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
+{
+ int i, rc;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_ctx_request *req;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp;
+
+ spin_lock(&ctx->lock);
+ if (list_empty(req_list)) {
+ spin_unlock(&ctx->lock);
+ CAM_DBG(CAM_ISP, "request list is empty");
+ return 0;
+ }
+
+ list_for_each_entry_safe(req, req_temp, req_list, list) {
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (req->request_id != flush_req->req_id))
+ continue;
+
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
+ req->request_id,
+ req_isp->fence_map_out[i].sync_id);
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ if (rc)
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "signal fence failed\n");
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ /* If flush request id found, exit the loop */
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ctx->lock);
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CAM_DBG(CAM_ISP,
+ "Flush request id:%lld is not found in the list",
+ flush_req->req_id);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_flush_req_in_top_state(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+ CAM_DBG(CAM_ISP, "Flush request in top state %d",
+ ctx->state);
+ return rc;
+}
+
+static int __cam_isp_ctx_flush_req_in_ready(
+ struct cam_context *ctx,
+ struct cam_req_mgr_flush_request *flush_req)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+ /* if nothing is in pending req list, change state to acquire*/
+ spin_lock(&ctx->lock);
+ if (list_empty(&ctx->pending_req_list))
+ ctx->state = CAM_CTX_ACQUIRED;
+ spin_unlock(&ctx->lock);
+
+ trace_cam_context_state("ISP", ctx);
+
+ CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
+ ctx->state);
+ return rc;
+}
+
static struct cam_ctx_ops
cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
/* SOF */
@@ -673,18 +885,422 @@
},
};
+static int __cam_isp_ctx_rdi_only_sof_in_top_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ int rc = 0;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_sof_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ /*
+ * notify reqmgr with sof signal. Note, due to scheduling delay
+ * we can run into situation that two active requests has already
+ * be in the active queue while we try to do the notification.
+ * In this case, we need to skip the current notification. This
+ * helps the state machine to catch up the delay.
+ */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
+ ctx_isp->active_req_cnt <= 2) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ if (list_empty(&ctx->active_req_list))
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ else
+ CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+ return rc;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ uint64_t request_id = 0;
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+ if (list_empty(&ctx->pending_req_list)) {
+ /*
+ * If no pending req in epoch, this is an error case.
+ * The recovery is to go back to sof state
+ */
+ CAM_ERR(CAM_ISP, "No pending request");
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ /* Send SOF event as empty frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ goto end;
+ }
+
+ req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ list);
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+ CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+ if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+ ctx->ctx_crm_intf->notify_err) {
+ struct cam_req_mgr_error_notify notify;
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.req_id = req->request_id;
+ notify.error = CRM_KMD_ERR_BUBBLE;
+ ctx->ctx_crm_intf->notify_err(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ /*
+ * Since can not bubble report, always move the request to
+ * active list.
+ */
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ req_isp->bubble_report = 0;
+ }
+
+ request_id = req->request_id;
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_ERROR);
+
+ /* change the state to bubble, as reg update has not come */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ uint32_t i;
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_req_mgr_sof_notify notify;
+ struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
+ struct cam_isp_ctx_req *req_isp;
+ uint64_t request_id = 0;
+
+ if (!evt_data) {
+ CAM_ERR(CAM_ISP, "in valid sof event data");
+ return -EINVAL;
+ }
+
+ ctx_isp->frame_id++;
+ ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+ CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+ ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+ /*
+ * Signal all active requests with error and move the all the active
+ * requests to free list
+ */
+ while (!list_empty(&ctx->active_req_list)) {
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
+ for (i = 0; i < req_isp->num_fence_map_out; i++)
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ list_add_tail(&req->list, &ctx->free_req_list);
+ }
+
+ /* notify reqmgr with sof signal */
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+
+ /*
+ * It is idle frame with out any applied request id, send
+ * request id as zero
+ */
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ CAM_DBG(CAM_ISP, "next substate %d",
+ ctx_isp->substate_activated);
+
+ return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ struct cam_ctx_request *req;
+ struct cam_context *ctx = ctx_isp->base;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_req_mgr_sof_notify notify;
+ uint64_t request_id = 0;
+
+ /* notify reqmgr with sof signal*/
+ if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+ if (list_empty(&ctx->pending_req_list)) {
+ CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
+ goto error;
+ }
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ request_id = req->request_id;
+ if (req_isp->num_fence_map_out != 0) {
+ list_add_tail(&req->list, &ctx->active_req_list);
+ ctx_isp->active_req_cnt++;
+ CAM_DBG(CAM_ISP,
+ "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ } else {
+ /* no io config, so the request is completed. */
+ list_add_tail(&req->list, &ctx->free_req_list);
+ CAM_DBG(CAM_ISP,
+ "move active req %lld to free list(cnt=%d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ }
+
+ notify.link_hdl = ctx->link_hdl;
+ notify.dev_hdl = ctx->dev_hdl;
+ notify.frame_id = ctx_isp->frame_id;
+
+ ctx->ctx_crm_intf->notify_sof(¬ify);
+ CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
+ ctx_isp->frame_id);
+ } else {
+ CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+ }
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+ return 0;
+error:
+ /* Send SOF event as idle frame*/
+ __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+ CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+ /*
+ * There is no request in the pending list, move the sub state machine
+ * to SOF sub state
+ */
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+ return 0;
+}
+
+static struct cam_isp_ctx_irq_ops
+ cam_isp_ctx_rdi_only_activated_state_machine_irq
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ __cam_isp_ctx_reg_upd_in_sof,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
+ /* APPLIED */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_applied_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_applied,
+ },
+ },
+ /* EPOCH */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_top_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_epoch,
+ },
+ },
+ /* BUBBLE*/
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_state,
+ NULL,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble,
+ },
+ },
+ /* BUBBLE APPLIED ie PRE_BUBBLE */
+ {
+ .irq_ops = {
+ __cam_isp_ctx_handle_error,
+ __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
+ __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
+ NULL,
+ NULL,
+ __cam_isp_ctx_buf_done_in_bubble_applied,
+ },
+ },
+
+ /* HALT */
+ {
+ },
+};
+
+static int __cam_isp_ctx_rdi_only_apply_req_top_state(
+ struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_isp_context *ctx_isp =
+ (struct cam_isp_context *) ctx->ctx_priv;
+
+ CAM_DBG(CAM_ISP, "current substate %d",
+ ctx_isp->substate_activated);
+ rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+ CAM_ISP_CTX_ACTIVATED_APPLIED);
+ CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+ return rc;
+}
+
+static struct cam_ctx_ops
+ cam_isp_ctx_rdi_only_activated_state_machine
+ [CAM_ISP_CTX_ACTIVATED_MAX] = {
+ /* SOF */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* APPLIED */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* EPOCH */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {
+ .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+ },
+ .irq_ops = NULL,
+ },
+ /* PRE BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* BUBBLE */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* HALT */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+};
+
/* top level state machine */
static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
int rc = 0;
- int i;
struct cam_hw_release_args rel_arg;
- struct cam_ctx_request *req;
- struct cam_isp_ctx_req *req_isp;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_req_mgr_flush_request flush_req;
if (ctx_isp->hw_ctx) {
rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -698,33 +1314,28 @@
ctx->link_hdl = 0;
ctx->ctx_crm_intf = NULL;
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
/*
* Ideally, we should never have any active request here.
* But we still add some sanity check code here to help the debug
*/
if (!list_empty(&ctx->active_req_list))
- pr_err("%s: Active list is empty.\n", __func__);
+ CAM_ERR(CAM_ISP, "Active list is not empty");
- /* flush the pending list */
- while (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- pr_err("%s: signal fence in pending list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
- for (i = 0; i < req_isp->num_fence_map_out; i++) {
- if (req_isp->fence_map_out[i].sync_id != -1) {
- cam_sync_signal(
- req_isp->fence_map_out[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- }
- }
- list_add_tail(&req->list, &ctx->free_req_list);
- }
+ /* Flush all the pending request list */
+ flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+ flush_req.link_hdl = ctx->link_hdl;
+ flush_req.dev_hdl = ctx->dev_hdl;
+
+ CAM_DBG(CAM_ISP, "try to flush pending list");
+ rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+
ctx->state = CAM_CTX_AVAILABLE;
- CDBG("%s: next state %d\n", __func__, ctx->state);
+
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -742,7 +1353,7 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: get free request object......\n", __func__);
+ CAM_DBG(CAM_ISP, "get free request object......");
/* get free request */
spin_lock_bh(&ctx->lock);
@@ -754,7 +1365,7 @@
spin_unlock_bh(&ctx->lock);
if (!req) {
- pr_err("%s: No more request obj free\n", __func__);
+ CAM_ERR(CAM_ISP, "No more request obj free");
rc = -ENOMEM;
goto end;
}
@@ -766,20 +1377,20 @@
rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
(uint64_t *) &packet_addr, &len);
if (rc != 0) {
- pr_err("%s: Can not get packet address\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not get packet address");
rc = -EINVAL;
goto free_req;
}
packet = (struct cam_packet *) (packet_addr + cmd->offset);
- CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
- CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
- CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
+ CAM_DBG(CAM_ISP, "packet address is 0x%llx", packet_addr);
+ CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
len, cmd->offset);
- CDBG("%s: Packet request id 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "Packet request id %lld",
packet->header.request_id);
- CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
- CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+ CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
+ CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
@@ -792,13 +1403,12 @@
cfg.out_map_entries = req_isp->fence_map_out;
cfg.in_map_entries = req_isp->fence_map_in;
- CDBG("%s: try to prepare config packet......\n", __func__);
+ CAM_DBG(CAM_ISP, "try to prepare config packet......");
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
if (rc != 0) {
- pr_err("%s: Prepare config packet failed in HW layer\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
rc = -EFAULT;
goto free_req;
}
@@ -807,8 +1417,8 @@
req_isp->num_fence_map_in = cfg.num_in_map_entries;
req_isp->num_acked = 0;
- CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
- __func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+ CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
+ req_isp->num_cfg, req_isp->num_fence_map_out,
req_isp->num_fence_map_in);
req->request_id = packet->header.request_id;
@@ -820,20 +1430,20 @@
add_req.req_id = req->request_id;
rc = ctx->ctx_crm_intf->add_req(&add_req);
if (rc) {
- pr_err("%s: Error: Adding request id=%llu\n", __func__,
+ CAM_ERR(CAM_ISP, "Error: Adding request id=%llu",
req->request_id);
goto free_req;
}
}
- CDBG("%s: Packet request id 0x%llx\n", __func__,
+ CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
packet->header.request_id);
spin_lock_bh(&ctx->lock);
list_add_tail(&req->list, &ctx->pending_req_list);
spin_unlock_bh(&ctx->lock);
- CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+ CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
req->request_id);
return rc;
@@ -856,26 +1466,28 @@
struct cam_hw_release_args release;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
+ struct cam_isp_hw_cmd_args hw_cmd_args;
if (!ctx->hw_mgr_intf) {
- pr_err("HW interface is not ready!\n");
+ CAM_ERR(CAM_ISP, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
- CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
- __func__, cmd->session_handle, cmd->num_resources,
+ CAM_DBG(CAM_ISP,
+ "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
+ cmd->session_handle, cmd->num_resources,
cmd->handle_type, cmd->resource_hdl);
if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
- pr_err("Too much resources in the acquire!\n");
+ CAM_ERR(CAM_ISP, "Too much resources in the acquire");
rc = -ENOMEM;
goto end;
}
/* for now we only support user pointer */
if (cmd->handle_type != 1) {
- pr_err("%s: Only user pointer is supported!", __func__);
+ CAM_ERR(CAM_ISP, "Only user pointer is supported");
rc = -EINVAL;
goto end;
}
@@ -887,8 +1499,8 @@
goto end;
}
- CDBG("%s: start copy %d resources from user\n",
- __func__, cmd->num_resources);
+ CAM_DBG(CAM_ISP, "start copy %d resources from user",
+ cmd->num_resources);
if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
sizeof(*isp_res)*cmd->num_resources)) {
@@ -905,10 +1517,39 @@
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
¶m);
if (rc != 0) {
- pr_err("Acquire device failed\n");
+ CAM_ERR(CAM_ISP, "Acquire device failed");
goto free_res;
}
+ /* Query the context has rdi only resource */
+ hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+ hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &hw_cmd_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "HW command failed");
+ goto free_hw;
+ }
+
+ if (hw_cmd_args.u.is_rdi_only_context) {
+ /*
+ * this context has rdi only resource assign rdi only
+ * state machine
+ */
+ CAM_DBG(CAM_ISP, "RDI only session Context");
+
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_rdi_only_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_rdi_only_activated_state_machine;
+ } else {
+ CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+ ctx_isp->substate_machine_irq =
+ cam_isp_ctx_activated_state_machine_irq;
+ ctx_isp->substate_machine =
+ cam_isp_ctx_activated_state_machine;
+ }
+
ctx_isp->hw_ctx = param.ctxt_to_hw_map;
req_hdl_param.session_hdl = cmd->session_handle;
@@ -918,11 +1559,11 @@
req_hdl_param.ops = ctx->crm_ctx_intf;
req_hdl_param.priv = ctx;
- CDBG("%s: get device handle form bridge\n", __func__);
+ CAM_DBG(CAM_ISP, "get device handle form bridge");
ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
if (ctx->dev_hdl <= 0) {
rc = -EFAULT;
- pr_err("Can not create device handle\n");
+ CAM_ERR(CAM_ISP, "Can not create device handle");
goto free_hw;
}
cmd->dev_handle = ctx->dev_hdl;
@@ -932,7 +1573,8 @@
ctx->state = CAM_CTX_ACQUIRED;
- CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+ trace_cam_context_state("ISP", ctx);
+ CAM_DBG(CAM_ISP, "Acquire success.");
kfree(isp_res);
return rc;
@@ -953,10 +1595,12 @@
rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
- if (!rc && ctx->link_hdl)
+ if (!rc && ctx->link_hdl) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
- CDBG("%s: next state %d\n", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -965,16 +1609,18 @@
{
int rc = 0;
- CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "Enter.........");
ctx->link_hdl = link->link_hdl;
ctx->ctx_crm_intf = link->crm_cb;
/* change state only if we had the init config */
- if (!list_empty(&ctx->pending_req_list))
+ if (!list_empty(&ctx->pending_req_list)) {
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
+ }
- CDBG("%s: next state %d\n", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -1021,8 +1667,7 @@
if (list_empty(&ctx->pending_req_list)) {
/* should never happen */
- pr_err("%s: Start device with empty configuration\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Start device with empty configuration");
rc = -EFAULT;
goto end;
} else {
@@ -1032,8 +1677,7 @@
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
if (!ctx_isp->hw_ctx) {
- pr_err("%s:%d: Wrong hw context pointer.\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
rc = -EFAULT;
goto end;
}
@@ -1042,6 +1686,8 @@
arg.num_hw_update_entries = req_isp->num_cfg;
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
/*
@@ -1050,14 +1696,16 @@
* irq handling comes early
*/
ctx->state = CAM_CTX_ACTIVATED;
+ trace_cam_context_state("ISP", ctx);
rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
if (rc) {
/* HW failure. user need to clean up the resource */
- pr_err("Start HW failed\n");
+ CAM_ERR(CAM_ISP, "Start HW failed");
ctx->state = CAM_CTX_READY;
+ trace_cam_context_state("ISP", ctx);
goto end;
}
- CDBG("%s: start device success\n", __func__);
+ CAM_DBG(CAM_ISP, "start device success");
end:
return rc;
}
@@ -1070,6 +1718,7 @@
ctx->link_hdl = 0;
ctx->ctx_crm_intf = NULL;
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1089,7 +1738,7 @@
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
spin_unlock_bh(&ctx->lock);
- CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
/* stop hw first */
if (ctx_isp->hw_ctx) {
@@ -1103,8 +1752,8 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- CDBG("%s: signal fence in pending list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
+ CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
+ req_isp->num_fence_map_out);
for (i = 0; i < req_isp->num_fence_map_out; i++)
if (req_isp->fence_map_out[i].sync_id != -1) {
cam_sync_signal(
@@ -1119,8 +1768,8 @@
struct cam_ctx_request, list);
list_del_init(&req->list);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- CDBG("%s: signal fence in active list. fence num %d\n",
- __func__, req_isp->num_fence_map_out);
+ CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+ req_isp->num_fence_map_out);
for (i = 0; i < req_isp->num_fence_map_out; i++)
if (req_isp->fence_map_out[i].sync_id != -1) {
cam_sync_signal(
@@ -1130,8 +1779,10 @@
list_add_tail(&req->list, &ctx->free_req_list);
}
ctx_isp->frame_id = 0;
+ ctx_isp->active_req_cnt = 0;
+ ctx_isp->reported_req_id = 0;
- CDBG("%s: next state %d", __func__, ctx->state);
+ CAM_DBG(CAM_ISP, "next state %d", ctx->state);
return rc;
}
@@ -1142,6 +1793,7 @@
__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
ctx->state = CAM_CTX_ACQUIRED;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1169,6 +1821,7 @@
ctx->ctx_crm_intf = NULL;
ctx->state = CAM_CTX_AVAILABLE;
+ trace_cam_context_state("ISP", ctx);
return rc;
}
@@ -1180,21 +1833,22 @@
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- CDBG("%s: Enter: apply req in Substate %d\n",
- __func__, ctx_isp->substate_activated);
+ trace_cam_apply_req("ISP", apply);
+ CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
+ ctx_isp->substate_activated, apply->request_id);
if (ctx_isp->substate_machine[ctx_isp->substate_activated].
crm_ops.apply_req) {
rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
crm_ops.apply_req(ctx, apply);
} else {
- pr_err("%s: No handle function in activated substate %d\n",
- __func__, ctx_isp->substate_activated);
+ CAM_ERR(CAM_ISP, "No handle function in activated substate %d",
+ ctx_isp->substate_activated);
rc = -EFAULT;
}
if (rc)
- pr_err("%s: Apply failed in active substate %d\n",
- __func__, ctx_isp->substate_activated);
+ CAM_ERR(CAM_ISP, "Apply failed in active substate %d",
+ ctx_isp->substate_activated);
return rc;
}
@@ -1209,18 +1863,22 @@
(struct cam_isp_context *)ctx->ctx_priv;
spin_lock_bh(&ctx->lock);
- CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
- __func__, ctx->state, ctx_isp->substate_activated, evt_id);
+
+ trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
+ __cam_isp_ctx_get_event_ts(evt_id, evt_data));
+
+ CAM_DBG(CAM_ISP, "Enter: State %d, Substate %d, evt id %d",
+ ctx->state, ctx_isp->substate_activated, evt_id);
if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
irq_ops[evt_id]) {
rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
irq_ops[evt_id](ctx_isp, evt_data);
} else {
- CDBG("%s: No handle function for substate %d\n", __func__,
+ CAM_DBG(CAM_ISP, "No handle function for substate %d",
ctx_isp->substate_activated);
}
- CDBG("%s: Exit: State %d Substate %d\n",
- __func__, ctx->state, ctx_isp->substate_activated);
+ CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
+ ctx->state, ctx_isp->substate_activated);
spin_unlock_bh(&ctx->lock);
return rc;
}
@@ -1252,6 +1910,7 @@
.link = __cam_isp_ctx_link_in_acquired,
.unlink = __cam_isp_ctx_unlink_in_acquired,
.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
.irq_ops = NULL,
},
@@ -1264,6 +1923,7 @@
},
.crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_ready,
+ .flush_req = __cam_isp_ctx_flush_req_in_ready,
},
.irq_ops = NULL,
},
@@ -1276,6 +1936,7 @@
},
.crm_ops = {
.apply_req = __cam_isp_ctx_apply_req,
+ .flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
},
@@ -1292,7 +1953,7 @@
int i;
if (!ctx || !ctx_base) {
- pr_err("%s: Invalid Context\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Context");
goto err;
}
@@ -1301,6 +1962,8 @@
ctx->base = ctx_base;
ctx->frame_id = 0;
+ ctx->active_req_cnt = 0;
+ ctx->reported_req_id = 0;
ctx->hw_ctx = NULL;
ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
ctx->substate_machine = cam_isp_ctx_activated_state_machine;
@@ -1315,7 +1978,7 @@
rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
CAM_CTX_REQ_MAX);
if (rc) {
- pr_err("%s: Camera Context Base init failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Camera Context Base init failed");
goto err;
}
@@ -1335,7 +1998,7 @@
cam_context_deinit(ctx->base);
if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
- pr_err("%s: ISP context substate is invalid\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP context substate is invalid");
memset(ctx, 0, sizeof(*ctx));
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index dae1dda..b0b883c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -106,7 +106,9 @@
* @req_base: Common request object storage
* @req_isp: ISP private request object storage
* @hw_ctx: HW object returned by the acquire device command
- *
+ * @sof_timestamp_val: Captured time stamp value at sof hw event
+ * @active_req_cnt: Counter for the active request
+ * @reported_req_id: Last reported request id
*/
struct cam_isp_context {
struct cam_context *base;
@@ -120,6 +122,9 @@
struct cam_isp_ctx_req req_isp[CAM_CTX_REQ_MAX];
void *hw_ctx;
+ uint64_t sof_timestamp_val;
+ int32_t active_req_cnt;
+ int64_t reported_req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index 4c819cf..2bf7795 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -25,6 +25,7 @@
#include "cam_hw_mgr_intf.h"
#include "cam_isp_hw_mgr_intf.h"
#include "cam_node.h"
+#include "cam_debug_util.h"
static struct cam_isp_dev g_isp_dev;
@@ -44,13 +45,13 @@
for (i = 0; i < CAM_CTX_MAX; i++) {
rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
if (rc)
- pr_err("%s: ISP context %d deinit failed\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
+ i);
}
rc = cam_subdev_remove(&g_isp_dev.sd);
if (rc)
- pr_err("%s: Unregister failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Unregister failed");
memset(&g_isp_dev, 0, sizeof(g_isp_dev));
return 0;
@@ -67,7 +68,7 @@
rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
CAM_IFE_DEVICE_TYPE);
if (rc) {
- pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
goto err;
}
node = (struct cam_node *) g_isp_dev.sd.token;
@@ -75,7 +76,7 @@
memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
if (rc != 0) {
- pr_err("%s: Can not initialized ISP HW manager!\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
goto unregister;
}
@@ -85,7 +86,7 @@
&node->crm_node_intf,
&node->hw_mgr_intf);
if (rc) {
- pr_err("%s: ISP context init failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP context init failed!");
goto unregister;
}
}
@@ -93,11 +94,11 @@
rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
CAM_ISP_DEV_NAME);
if (rc) {
- pr_err("%s: ISP node init failed!\n", __func__);
+ CAM_ERR(CAM_ISP, "ISP node init failed!");
goto unregister;
}
- pr_info("%s: Camera ISP probe complete\n", __func__);
+ CAM_INFO(CAM_ISP, "Camera ISP probe complete");
return 0;
unregister:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
index 2c6eaba..7e3c353 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -7,6 +7,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 2bc4b00..4a5b1c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -24,9 +24,7 @@
#include "cam_ife_hw_mgr.h"
#include "cam_cdm_intf_api.h"
#include "cam_packet_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define CAM_IFE_HW_ENTRIES_MAX 20
@@ -41,7 +39,7 @@
struct cam_query_cap_cmd *query = hw_caps_args;
struct cam_isp_query_cap_cmd query_isp;
- CDBG("%s: enter\n", __func__);
+ CAM_DBG(CAM_ISP, "enter");
if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
sizeof(struct cam_isp_query_cap_cmd))) {
@@ -66,7 +64,7 @@
sizeof(struct cam_isp_query_cap_cmd)))
rc = -EFAULT;
- CDBG("%s: exit rc :%d !\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "exit rc :%d !", rc);
return rc;
}
@@ -100,7 +98,7 @@
if (!isp_hw_res->hw_res[i])
continue;
hw_intf = isp_hw_res->hw_res[i]->hw_intf;
- CDBG("%s: enabled vfe hardware %d\n", __func__,
+ CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
hw_intf->hw_idx);
if (hw_intf->hw_ops.init) {
rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
@@ -113,7 +111,7 @@
return 0;
err:
- pr_err("%s: INIT HW res failed! (type:%d, id:%d)", __func__,
+ CAM_ERR(CAM_ISP, "INIT HW res failed! (type:%d, id:%d)",
isp_hw_res->res_type, isp_hw_res->res_id);
return rc;
}
@@ -134,19 +132,18 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
if (rc) {
- pr_err("%s: Can not start HW resources!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not start HW resources!");
goto err;
}
} else {
- pr_err("%s:function null\n", __func__);
+ CAM_ERR(CAM_ISP, "function null");
goto err;
}
}
return 0;
err:
- pr_err("%s: Start hw res failed! (type:%d, id:%d)", __func__,
+ CAM_ERR(CAM_ISP, "Start hw res failed! (type:%d, id:%d)",
isp_hw_res->res_type, isp_hw_res->res_id);
return rc;
}
@@ -166,7 +163,7 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
else
- pr_err("%s:stop null\n", __func__);
+ CAM_ERR(CAM_ISP, "stop null");
}
}
@@ -213,7 +210,7 @@
struct cam_ife_hw_mgr_res, list);
list_del_init(&res_ptr->list);
} else {
- pr_err("No more free ife hw mgr ctx!\n");
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
rc = -1;
}
*res = res_ptr;
@@ -237,11 +234,12 @@
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
if (rc)
- pr_err("%s:Release hw resrouce id %d failed!\n",
- __func__, isp_hw_res->res_id);
+ CAM_ERR(CAM_ISP,
+ "Release hw resrouce id %d failed!",
+ isp_hw_res->res_id);
isp_hw_res->hw_res[i] = NULL;
} else
- pr_err("%s:Release null\n", __func__);
+ CAM_ERR(CAM_ISP, "Release null");
}
/* caller should make sure the resource is in a list */
list_del_init(&isp_hw_res->list);
@@ -328,8 +326,8 @@
ife_ctx->common.cb_priv = NULL;
memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
- CDBG("%s:%d: release context completed ctx id:%d\n",
- __func__, __LINE__, ife_ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+ ife_ctx->ctx_index);
return 0;
}
@@ -364,7 +362,7 @@
struct cam_ife_hw_mgr_ctx, list);
list_del_init(&ctx_ptr->list);
} else {
- pr_err("No more free ife hw mgr ctx!\n");
+ CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
rc = -1;
}
*ife_ctx = ctx_ptr;
@@ -381,7 +379,7 @@
uint32_t i;
if (!ctx->num_base) {
- CDBG("%s: Add split id = %d for base idx = %d\n", __func__,
+ CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
split_id, base_idx);
ctx->base[0].split_id = split_id;
ctx->base[0].idx = base_idx;
@@ -400,8 +398,8 @@
}
if (i == CAM_IFE_HW_NUM_MAX) {
- CDBG("%s: Add split id = %d for base idx = %d\n",
- __func__, split_id, base_idx);
+ CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
+ split_id, base_idx);
ctx->base[ctx->num_base].split_id = split_id;
ctx->base[ctx->num_base].idx = base_idx;
ctx->num_base++;
@@ -417,7 +415,7 @@
uint32_t i;
if (list_empty(&ctx->res_list_ife_src)) {
- pr_err("%s: Error! Mux List empty\n", __func__);
+ CAM_ERR(CAM_ISP, "Error! Mux List empty");
return -ENODEV;
}
@@ -440,7 +438,7 @@
res->hw_intf->hw_idx);
}
}
- CDBG("%s: ctx base num = %d\n", __func__, ctx->num_base);
+ CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
return 0;
}
@@ -474,7 +472,7 @@
vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
break;
default:
- pr_err("%s: invalid resource type\n", __func__);
+ CAM_ERR(CAM_ISP, "invalid resource type");
goto err;
}
@@ -489,6 +487,7 @@
continue;
vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
vfe_acquire.vfe_out.out_port_info = out_port;
vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
@@ -497,15 +496,15 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s: Can not acquire out resource 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+ out_port->res_type);
goto err;
}
break;
}
if (i == in_port->num_out_res) {
- pr_err("%s: Can not acquire out resource\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire out resource");
goto err;
}
@@ -535,16 +534,16 @@
out_port = &in_port->data[i];
k = out_port->res_type & 0xFF;
if (k >= CAM_IFE_HW_OUT_RES_MAX) {
- pr_err("%s: invalid output resource type 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+ out_port->res_type);
continue;
}
if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
continue;
- CDBG("%s: res_type 0x%x\n",
- __func__, out_port->res_type);
+ CAM_DBG(CAM_ISP, "res_type 0x%x",
+ out_port->res_type);
ife_out_res = &ife_ctx->res_list_ife_out[k];
ife_out_res->is_dual_vfe = in_port->usage_type;
@@ -552,6 +551,7 @@
vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+ vfe_acquire.vfe_out.ctx = ife_ctx;
vfe_acquire.vfe_out.out_port_info = out_port;
vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
@@ -585,15 +585,16 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s:Can not acquire out resource 0x%x\n",
- __func__, out_port->res_type);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire out resource 0x%x",
+ out_port->res_type);
goto err;
}
ife_out_res->hw_res[j] =
vfe_acquire.vfe_out.rsrc_node;
- CDBG("%s: resource type :0x%x res id:0x%x\n",
- __func__, ife_out_res->hw_res[j]->res_type,
+ CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+ ife_out_res->hw_res[j]->res_type,
ife_out_res->hw_res[j]->res_id);
}
@@ -633,8 +634,7 @@
ife_src_res, in_port);
break;
default:
- pr_err("%s: Fatal: Unknown IFE SRC resource!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Unknown IFE SRC resource!");
break;
}
if (rc)
@@ -668,7 +668,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&ife_src_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
@@ -708,8 +708,7 @@
vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
break;
default:
- pr_err("%s: Wrong IFE CSID Resource Node!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node!");
goto err;
}
ife_src_res->res_type = vfe_acquire.rsrc_type;
@@ -734,13 +733,15 @@
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
if (rc) {
- pr_err("%s:Can not acquire IFE HW res %d!\n",
- __func__, csid_res->res_id);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire IFE HW res %d",
+ csid_res->res_id);
goto err;
}
ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
- CDBG("%s:acquire success res type :0x%x res id:0x%x\n",
- __func__, ife_src_res->hw_res[i]->res_type,
+ CAM_DBG(CAM_ISP,
+ "acquire success res type :0x%x res id:0x%x",
+ ife_src_res->hw_res[i]->res_type,
ife_src_res->hw_res[i]->res_id);
}
@@ -777,7 +778,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
@@ -808,13 +809,12 @@
}
if (i == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid ipp resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resrouce!");
goto err;
}
- CDBG("%s: acquired csid(%d) left ipp resrouce successfully!\n",
- __func__, i);
+ CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resrouce successfully!",
+ i);
csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
@@ -839,14 +839,14 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
csid_res->hw_res[1] = csid_acquire.node_res;
- CDBG("%s:acquired csid(%d)right ipp resrouce successfully!\n",
- __func__, j);
+ CAM_DBG(CAM_ISP,
+ "acquired csid(%d)right ipp resrouce successfully!", j);
}
csid_res->parent = &ife_ctx->res_list_ife_in;
@@ -879,7 +879,7 @@
break;
default:
path_id = CAM_IFE_PIX_PATH_RES_MAX;
- CDBG("%s: maximum rdi output type exceeded\n", __func__);
+ CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
break;
}
@@ -910,7 +910,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
&csid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n",
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!",
__func__);
goto err;
}
@@ -928,6 +928,7 @@
csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
csid_acquire.cid = cid_res_id;
csid_acquire.in_port = in_port;
+ csid_acquire.out_port = out_port;
csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
@@ -944,8 +945,8 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
@@ -978,7 +979,7 @@
ife_ctx->res_list_ife_in.res_id = in_port->res_type;
ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
- pr_err("%s: No Free resource for this context!\n", __func__);
+ CAM_ERR(CAM_ISP, "No Free resource for this context!");
goto err;
} else {
/* else do nothing */
@@ -1032,7 +1033,7 @@
/* no dual vfe for TPG */
if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
(in_port->usage_type != 0)) {
- pr_err("%s: No Dual VFE on TPG input!\n", __func__);
+ CAM_ERR(CAM_ISP, "No Dual VFE on TPG input!");
goto err;
}
@@ -1040,7 +1041,7 @@
rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
if (rc) {
- pr_err("%s: No more free hw mgr resource!\n", __func__);
+ CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
goto err;
}
cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
@@ -1062,8 +1063,7 @@
}
if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resrouce!");
goto err;
}
@@ -1093,8 +1093,8 @@
}
if (j == CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "Can not acquire ife csid rdi resrouce!");
goto err;
}
cid_res->hw_res[1] = csid_acquire.node_res;
@@ -1110,7 +1110,8 @@
}
static int cam_ife_mgr_acquire_hw_for_ctx(
struct cam_ife_hw_mgr_ctx *ife_ctx,
- struct cam_isp_in_port_info *in_port)
+ struct cam_isp_in_port_info *in_port,
+ uint32_t *num_pix_port, uint32_t *num_rdi_port)
{
int rc = -1;
int is_dual_vfe = 0;
@@ -1123,16 +1124,14 @@
/* get root node resource */
rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
if (rc) {
- pr_err("%s:%d: Can not acquire csid rx resource!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Can not acquire csid rx resource!");
goto err;
}
/* get cid resource */
rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CID resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed!");
goto err;
}
@@ -1140,7 +1139,7 @@
&pixel_count, &rdi_count);
if (!pixel_count && !rdi_count) {
- pr_err("%s: Error! no PIX or RDI resource\n", __func__);
+ CAM_ERR(CAM_ISP, "Error! no PIX or RDI resource");
return -EINVAL;
}
@@ -1149,8 +1148,8 @@
rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CSID IPP resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID IPP resource Failed!");
goto err;
}
}
@@ -1160,8 +1159,8 @@
rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
cid_res_id);
if (rc) {
- pr_err("%s%d: Acquire IFE CSID RDI resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP,
+ "Acquire IFE CSID RDI resource Failed!");
goto err;
}
}
@@ -1169,18 +1168,19 @@
/* get ife src resource */
rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
if (rc) {
- pr_err("%s%d: Acquire IFE SRC resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed!");
goto err;
}
rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
if (rc) {
- pr_err("%s%d: Acquire IFE OUT resource Failed!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed!");
goto err;
}
+ *num_pix_port += pixel_count;
+ *num_rdi_port += rdi_count;
+
return 0;
err:
/* release resource at the acquire entry funciton */
@@ -1190,11 +1190,11 @@
void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
enum cam_cdm_cb_status status, uint32_t cookie)
{
- CDBG("%s: Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d\n",
- __func__, handle, userdata, status, cookie);
+ CAM_DBG(CAM_ISP,
+ "Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
+ handle, userdata, status, cookie);
}
-
/* entry function: acquire_hw */
static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
void *acquire_hw_args)
@@ -1206,19 +1206,21 @@
struct cam_ife_hw_mgr_ctx *ife_ctx;
struct cam_isp_in_port_info *in_port = NULL;
struct cam_isp_resource *isp_resource = NULL;
- struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t num_pix_port = 0;
+ uint32_t num_rdi_port = 0;
- CDBG("%s: Enter...\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter...");
if (!acquire_args || acquire_args->num_acq <= 0) {
- pr_err("%s: Nothing to acquire. Seems like error\n", __func__);
+ CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
return -EINVAL;
}
/* get the ife ctx */
rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
if (rc || !ife_ctx) {
- pr_err("Get ife hw context failed!\n");
+ CAM_ERR(CAM_ISP, "Get ife hw context failed!");
goto err;
}
@@ -1245,12 +1247,12 @@
cdm_acquire.id = CAM_CDM_VIRTUAL;
cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
if (!cam_cdm_acquire(&cdm_acquire)) {
- CDBG("Successfully acquired the CDM HW hdl=%x\n",
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
cdm_acquire.handle);
ife_ctx->cdm_handle = cdm_acquire.handle;
ife_ctx->cdm_ops = cdm_acquire.ops;
} else {
- pr_err("Failed to acquire the CDM HW\n");
+ CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
goto err;
}
@@ -1261,32 +1263,38 @@
if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
continue;
- CDBG("%s: start copy from user handle %lld with len = %d\n",
- __func__, isp_resource[i].res_hdl,
+ CAM_DBG(CAM_ISP,
+ "start copy from user handle %lld with len = %d",
+ isp_resource[i].res_hdl,
isp_resource[i].length);
in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
isp_resource[i].length);
if (in_port > 0) {
- rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port);
+ rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+ &num_pix_port, &num_rdi_port);
kfree(in_port);
if (rc) {
- pr_err("%s: can not acquire resource!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "can not acquire resource");
goto free_res;
}
} else {
- pr_err("%s: copy from user failed with in_port = %pK",
- __func__, in_port);
+ CAM_ERR(CAM_ISP,
+ "copy from user failed with in_port = %pK",
+ in_port);
rc = -EFAULT;
goto free_res;
}
}
+
+ /* Check whether context has only RDI resource */
+ if (!num_pix_port)
+ ife_ctx->is_rdi_only_context = 1;
+
/* Process base info */
rc = cam_ife_mgr_process_base_info(ife_ctx);
if (rc) {
- pr_err("%s: Error process) base info!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Error process) base info!");
return -EINVAL;
}
@@ -1295,14 +1303,14 @@
cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
- CDBG("%s: Exit...(success)!\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)!");
return 0;
free_res:
cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
err:
- CDBG("%s: Exit...(rc=%d)!\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)!", rc);
return rc;
}
@@ -1316,25 +1324,25 @@
struct cam_cdm_bl_request *cdm_cmd;
struct cam_ife_hw_mgr_ctx *ctx;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_mgr_priv || !config_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
cfg = config_hw_args;
ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
if (!ctx) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
- pr_err("%s: Invalid context parameters !\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context parameters !");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__, ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "Enter ctx id:%d", ctx->ctx_index);
if (cfg->num_hw_update_entries > 0) {
cdm_cmd = ctx->cdm_cmd;
@@ -1353,11 +1361,11 @@
rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
if (rc)
- pr_err("Failed to apply the configs\n");
+ CAM_ERR(CAM_ISP, "Failed to apply the configs");
} else {
- pr_err("No commands to config\n");
+ CAM_ERR(CAM_ISP, "No commands to config");
}
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -1372,22 +1380,21 @@
uint32_t i, master_base_idx = 0;
if (!hw_mgr_priv || !stop_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
/* stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
- pr_err("%s%d: error number of bases are zero\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "error number of bases are zero");
return -EINVAL;
}
@@ -1443,7 +1450,7 @@
/* update vote bandwidth should be done at the HW layer */
- CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
ctx->ctx_index, rc);
return rc;
@@ -1459,23 +1466,22 @@
uint32_t i, master_base_idx = 0;
if (!hw_mgr_priv || !stop_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, " Enter...ctx id:%d",
ctx->ctx_index);
/* Note:stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
- pr_err("%s%d: error number of bases are zero\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "error number of bases are zero");
return -EINVAL;
}
@@ -1520,8 +1526,8 @@
}
if (cam_cdm_stream_off(ctx->cdm_handle))
- pr_err("%s%d: CDM stream off failed %d\n",
- __func__, __LINE__, ctx->cdm_handle);
+ CAM_ERR(CAM_ISP, "CDM stream off failed %d",
+ ctx->cdm_handle);
/* IFE mux in resources */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
@@ -1557,8 +1563,7 @@
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
- CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
- ctx->ctx_index, rc);
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", ctx->ctx_index, rc);
return rc;
}
@@ -1572,7 +1577,7 @@
struct cam_csid_reset_cfg_args csid_reset_args;
if (!hw_mgr) {
- CDBG("%s: Invalid arguments\n", __func__);
+ CAM_DBG(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1594,13 +1599,13 @@
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
continue;
- CDBG("%d:VFE (id = %d) reset\n", __LINE__, hw_idx);
+ CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
vfe_hw_intf = hw_mgr->ife_devices[i];
vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
break;
}
- CDBG("%d: Exit Successfully\n", __LINE__);
+ CAM_DBG(CAM_ISP, "Exit Successfully");
return 0;
}
@@ -1614,74 +1619,68 @@
uint32_t i;
if (!hw_mgr_priv || !start_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
- CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not start IFE OUT (%d)!\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!", i);
goto err;
}
}
- CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE MUX (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
- ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* Start IFE root node: do nothing */
- CDBG("%s: Exit...(success)\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
- CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1694,17 +1693,17 @@
uint32_t i;
if (!hw_mgr_priv || !start_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
ctx->ctx_index);
/* update Bandwidth should be done at the hw layer */
@@ -1713,127 +1712,127 @@
/* INIT IFE Root: do nothing */
- CDBG("%s%d INIT IFE CID ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
ctx->ctx_index);
/* INIT IFE CID */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE CID.(id :%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CID.(id :%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d INIT IFE csid ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
ctx->ctx_index);
/* INIT IFE csid */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE CSID.(id :%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE CSID.(id :%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* INIT IFE SRC */
- CDBG("%s%d INIT IFE SRC in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
ctx->ctx_index);
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not INIT IFE SRC (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* INIT IFE OUT */
- CDBG("%s%d INIT IFE OUT RESOURCES in ctx id:%d\n", __func__,
- __LINE__, ctx->ctx_index);
+ CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+ ctx->ctx_index);
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not INIT IFE OUT (%d)!\n",
- __func__, ctx->res_list_ife_out[i].res_id);
+ CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)!",
+ ctx->res_list_ife_out[i].res_id);
goto err;
}
}
- CDBG("%s: start cdm interface\n", __func__);
+ CAM_DBG(CAM_ISP, "start cdm interface");
rc = cam_cdm_stream_on(ctx->cdm_handle);
if (rc) {
- pr_err("%s: Can not start cdm (%d)!\n",
- __func__, ctx->cdm_handle);
+ CAM_ERR(CAM_ISP, "Can not start cdm (%d)!",
+ ctx->cdm_handle);
goto err;
}
/* Apply initial configuration */
- CDBG("%s: Config HW\n", __func__);
+ CAM_DBG(CAM_ISP, "Config HW");
rc = cam_ife_mgr_config_hw(hw_mgr_priv, start_hw_args);
if (rc) {
- pr_err("%s: Config HW failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Config HW failed");
goto err;
}
- CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
if (rc) {
- pr_err("%s: Can not start IFE OUT (%d)!\n",
- __func__, i);
+ CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!",
+ i);
goto err;
}
}
- CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE MUX (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
- CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
if (rc) {
- pr_err("%s: Can not start IFE CSID (%d)!\n",
- __func__, hw_mgr_res->res_id);
+ CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+ hw_mgr_res->res_id);
goto err;
}
}
/* Start IFE root node: do nothing */
- CDBG("%s: Exit...(success)\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
- CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1856,17 +1855,17 @@
struct cam_ife_hw_mgr_ctx *ctx;
if (!hw_mgr_priv || !release_hw_args) {
- pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
- pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
return -EPERM;
}
- CDBG("%s%d Enter...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
/* we should called the stop hw before this already */
@@ -1882,7 +1881,7 @@
/* clean context */
list_del_init(&ctx->list);
ctx->ctx_in_use = 0;
- CDBG("%s%d Exit...ctx id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
ctx->ctx_index);
cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
return rc;
@@ -1896,34 +1895,34 @@
(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr *hw_mgr;
- struct cam_isp_kmd_buf_info kmd_buf;
+ struct cam_kmd_buf_info kmd_buf;
uint32_t i;
bool fill_fence = true;
if (!hw_mgr_priv || !prepare_hw_update_args) {
- pr_err("%s: Invalid args\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid args");
return -EINVAL;
}
- CDBG("%s:%d enter\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enter");
ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
- rc = cam_isp_validate_packet(prepare->packet);
+ rc = cam_packet_util_validate_packet(prepare->packet);
if (rc)
return rc;
- CDBG("%s:%d enter\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enter");
/* Pre parse the packet*/
- rc = cam_isp_get_kmd_buffer(prepare->packet, &kmd_buf);
+ rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
if (rc)
return rc;
rc = cam_packet_util_process_patches(prepare->packet,
hw_mgr->mgr_common.cmd_iommu_hdl);
if (rc) {
- pr_err("%s: Patch ISP packet failed.\n", __func__);
+ CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
return rc;
}
@@ -1932,7 +1931,7 @@
prepare->num_out_map_entries = 0;
for (i = 0; i < ctx->num_base; i++) {
- CDBG("%s: process cmd buffer for device %d\n", __func__, i);
+ CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
/* Add change base */
rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
@@ -1980,6 +1979,92 @@
return rc;
}
+static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_isp_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_ife_hw_mgr_ctx *ctx;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_ISP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
+ if (!ctx || !ctx->ctx_in_use) {
+ CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+ return -EPERM;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+ if (ctx->is_rdi_only_context)
+ hw_cmd_args->u.is_rdi_only_context = 1;
+ else
+ hw_cmd_args->u.is_rdi_only_context = 0;
+
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+ hw_cmd_args->cmd_type);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int cam_ife_mgr_cmd_get_sof_timestamp(
+ struct cam_ife_hw_mgr_ctx *ife_ctx,
+ uint64_t *time_stamp)
+{
+ int rc = -EINVAL;
+ uint32_t i;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_get_time_stamp_args csid_get_time;
+
+ list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i] ||
+ (i == CAM_ISP_HW_SPLIT_RIGHT))
+ continue;
+ /*
+ * Get the SOF time stamp from left resource only.
+ * Left resource is master for dual vfe case and
+ * Rdi only context case left resource only hold
+ * the RDI resource
+ */
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf->hw_ops.process_cmd) {
+ csid_get_time.node_res =
+ hw_mgr_res->hw_res[i];
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+ &csid_get_time,
+ sizeof(
+ struct cam_csid_get_time_stamp_args));
+ if (!rc)
+ *time_stamp =
+ csid_get_time.time_stamp_val;
+ /*
+ * Single VFE case, Get the time stamp from available
+ * one csid hw in the context
+ * Dual VFE case, get the time stamp from master(left)
+ * would be sufficient
+ */
+ goto end;
+ }
+ }
+ }
+end:
+ if (rc)
+ CAM_ERR(CAM_ISP, "error in getting sof time stamp");
+
+ return rc;
+}
+
static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
{
int32_t rc = 0;
@@ -1993,13 +2078,14 @@
struct cam_ife_hw_mgr_ctx *ctx = NULL;
/* Here recovery is performed */
- CDBG("%s:Enter: ErrorType = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Enter: ErrorType = %d", error_type);
switch (error_type) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
if (!recovery_data->affected_ctx[0]) {
- pr_err("No context is affected but recovery called\n");
+ CAM_ERR(CAM_ISP,
+ "No context is affected but recovery called");
kfree(recovery_data);
return 0;
}
@@ -2027,9 +2113,9 @@
break;
default:
- pr_err("%s: Invalid Error\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Error");
}
- CDBG("%s:Exit: ErrorType = %d\n", __func__, error_type);
+ CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
kfree(recovery_data);
return rc;
@@ -2052,12 +2138,11 @@
memcpy(recovery_data, ife_mgr_recovery_data,
sizeof(struct cam_hw_event_recovery_data));
- CDBG("%s: Enter: error_type (%d)\n", __func__,
- recovery_data->error_type);
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
if (!task) {
- pr_err("%s: No empty task frame\n", __func__);
+ CAM_ERR(CAM_ISP, "No empty task frame");
kfree(recovery_data);
return -ENOMEM;
}
@@ -2086,7 +2171,7 @@
uint32_t max_idx = ife_hwr_mgr_ctx->num_base;
uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
- CDBG("%s:Enter:max_idx = %d\n", __func__, max_idx);
+ CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
while (i < max_idx) {
if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
@@ -2107,7 +2192,7 @@
j = j - 1;
}
}
- CDBG("%s:Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -2133,11 +2218,11 @@
struct cam_hw_stop_args stop_args;
uint32_t i = 0;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
return 0;
if (!recovery_data) {
- pr_err("%s: recovery_data parameter is NULL\n",
+ CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
__func__);
return -EINVAL;
}
@@ -2154,7 +2239,7 @@
* Check if current core_idx matches the HW associated
* with this context
*/
- CDBG("%s:Calling match Hw idx\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling match Hw idx");
if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
continue;
@@ -2164,7 +2249,7 @@
stop_args.ctxt_to_hw_map = ife_hwr_mgr_ctx;
/* Add affected_context in list of recovery data*/
- CDBG("%s:Add new entry in affected_ctx_list\n", __func__);
+ CAM_DBG(CAM_ISP, "Add new entry in affected_ctx_list");
if (recovery_data->no_of_context < CAM_CTX_MAX)
recovery_data->affected_ctx[
recovery_data->no_of_context++] =
@@ -2177,7 +2262,7 @@
*/
if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
&stop_args)) {
- CDBG("%s:Calling Error handler CB\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling Error handler CB");
ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_ERROR, error_event_data);
}
@@ -2185,10 +2270,10 @@
/* fill the affected_core in recovery data */
for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
recovery_data->affected_core[i] = affected_core[i];
- CDBG("%s: Vfe core %d is affected (%d)\n",
- __func__, i, recovery_data->affected_core[i]);
+ CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
+ i, recovery_data->affected_core[i]);
}
- CDBG("%s:Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
}
@@ -2208,7 +2293,7 @@
core_idx = evt_payload->core_index;
rc = evt_payload->error_type;
- CDBG("%s: Enter: error_type (%d)\n", __func__, evt_payload->error_type);
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
switch (evt_payload->error_type) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_P2I_ERROR:
@@ -2227,11 +2312,11 @@
cam_ife_hw_mgr_do_error_recovery(&recovery_data);
break;
default:
- CDBG("%s: None error. Error type (%d)\n", __func__,
+ CAM_DBG(CAM_ISP, "None error. Error type (%d)",
evt_payload->error_type);
}
- CDBG("%s: Exit (%d)\n", __func__, rc);
+ CAM_DBG(CAM_ISP, "Exit (%d)", rc);
return rc;
}
@@ -2241,26 +2326,26 @@
* of dual VFE.
* RDI path does not support DUAl VFE
*/
-static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
+static int cam_ife_hw_mgr_handle_reg_update(
void *handler_priv,
void *payload)
{
struct cam_isp_resource_node *hw_res;
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
struct cam_vfe_top_irq_evt_payload *evt_payload;
- struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
cam_hw_event_cb_func ife_hwr_irq_rup_cb;
struct cam_isp_hw_reg_update_event_data rup_event_data;
uint32_t core_idx;
uint32_t rup_status = -EINVAL;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
ife_hwr_mgr_ctx = handler_priv;
evt_payload = payload;
if (!handler_priv || !payload) {
- pr_err("%s: Invalid Parameter\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid Parameter");
return -EPERM;
}
@@ -2269,64 +2354,83 @@
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
- list_for_each_entry(isp_ife_camif_res,
+ list_for_each_entry(ife_src_res,
&ife_hwr_mgr_ctx->res_list_ife_src, list) {
- if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
continue;
- CDBG("%s: camif resource id = %d, curr_core_idx = %d\n",
- __func__, isp_ife_camif_res->res_id, core_idx);
- switch (isp_ife_camif_res->res_id) {
+ CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
+ ife_src_res->res_id, core_idx);
+ switch (ife_src_res->res_id) {
case CAM_ISP_HW_VFE_IN_CAMIF:
- if (isp_ife_camif_res->is_dual_vfe)
+ if (ife_src_res->is_dual_vfe)
/* It checks for slave core RUP ACK*/
- hw_res = isp_ife_camif_res->hw_res[1];
+ hw_res = ife_src_res->hw_res[1];
else
- hw_res = isp_ife_camif_res->hw_res[0];
+ hw_res = ife_src_res->hw_res[0];
if (!hw_res) {
- pr_err("%s: CAMIF device is NULL\n", __func__);
+ CAM_ERR(CAM_ISP, "CAMIF device is NULL");
break;
}
- CDBG("%s: current_core_id = %d , core_idx res = %d\n",
- __func__, core_idx,
- hw_res->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP,
+ "current_core_id = %d , core_idx res = %d",
+ core_idx, hw_res->hw_intf->hw_idx);
if (core_idx == hw_res->hw_intf->hw_idx) {
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
}
+
+ if (!rup_status) {
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
break;
case CAM_ISP_HW_VFE_IN_RDI0:
case CAM_ISP_HW_VFE_IN_RDI1:
case CAM_ISP_HW_VFE_IN_RDI2:
- hw_res = isp_ife_camif_res->hw_res[0];
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (!ife_hwr_mgr_ctx->is_rdi_only_context)
+ continue;
+
+ /*
+ * This is RDI only context, send Reg update and epoch
+ * HW event to cam context
+ */
+ hw_res = ife_src_res->hw_res[0];
if (!hw_res) {
- pr_err("%s: RDI Device is NULL\n", __func__);
+ CAM_ERR(CAM_ISP, "RDI Device is NULL");
break;
}
+
if (core_idx == hw_res->hw_intf->hw_idx)
- /* Need to process rdi reg update */
- rup_status = -EINVAL;
+ rup_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ if (!rup_status) {
+ /* Send the Reg update hw event */
+ ife_hwr_irq_rup_cb(
+ ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_REG_UPDATE,
+ &rup_event_data);
+ }
break;
default:
- pr_err("%s: invalid resource id (%d)", __func__,
- isp_ife_camif_res->res_id);
- }
-
- /* only do callback for pixel reg update for now */
- if (!rup_status && (isp_ife_camif_res->res_id ==
- CAM_ISP_HW_VFE_IN_CAMIF)) {
- ife_hwr_irq_rup_cb(ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+ CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
+ ife_src_res->res_id);
}
}
- CDBG("%s: Exit (rup_status = %d)!\n", __func__, rup_status);
+ if (!rup_status)
+ CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
return 0;
}
@@ -2351,13 +2455,13 @@
if ((epoch_cnt[core_idx0] - epoch_cnt[core_idx1] > 1) ||
(epoch_cnt[core_idx1] - epoch_cnt[core_idx0] > 1)) {
- pr_warn("%s:One of the VFE of dual VFE cound not generate error\n",
- __func__);
+ CAM_WARN(CAM_ISP,
+ "One of the VFE of dual VFE cound not generate error");
rc = -1;
return rc;
}
- CDBG("Only one core_index has given EPOCH\n");
+ CAM_DBG(CAM_ISP, "Only one core_index has given EPOCH");
return rc;
}
@@ -2379,7 +2483,7 @@
uint32_t core_index0;
uint32_t core_index1;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
ife_hwr_mgr_ctx = handler_priv;
evt_payload = payload;
@@ -2404,8 +2508,7 @@
case 0:
/* EPOCH check for Left side VFE */
if (!hw_res_l) {
- pr_err("%s: Left Device is NULL\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Left Device is NULL");
break;
}
@@ -2426,8 +2529,7 @@
/* SOF check for Left side VFE (Master)*/
if ((!hw_res_l) || (!hw_res_r)) {
- pr_err("%s: Dual VFE Device is NULL\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
break;
}
if (core_idx == hw_res_l->hw_intf->hw_idx) {
@@ -2465,12 +2567,14 @@
/* Error */
default:
- pr_err("%s: error with hw_res\n", __func__);
+ CAM_ERR(CAM_ISP, "error with hw_res");
}
}
- CDBG("%s: Exit (epoch_status = %d)!\n", __func__, epoch_status);
+ if (!epoch_status)
+ CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
+
return 0;
}
@@ -2495,142 +2599,197 @@
if ((sof_cnt[core_idx0] - sof_cnt[core_idx1] > 1) ||
(sof_cnt[core_idx1] - sof_cnt[core_idx0] > 1)) {
- pr_err("%s: One VFE of dual VFE cound not generate SOF\n",
- __func__);
+ CAM_ERR(CAM_ISP, "One VFE of dual VFE cound not generate SOF");
rc = -1;
return rc;
}
- pr_info("Only one core_index has given SOF\n");
+ CAM_INFO(CAM_ISP, "Only one core_index has given SOF");
return rc;
}
-static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
- void *handler_priv,
- void *payload)
+static int cam_ife_hw_mgr_process_camif_sof(
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res,
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
+ struct cam_vfe_top_irq_evt_payload *evt_payload)
{
- int32_t rc = -1;
+ struct cam_isp_hw_sof_event_data sof_done_event_data;
+ cam_hw_event_cb_func ife_hwr_irq_sof_cb;
struct cam_isp_resource_node *hw_res_l = NULL;
struct cam_isp_resource_node *hw_res_r = NULL;
- struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
- struct cam_vfe_top_irq_evt_payload *evt_payload;
- struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
- cam_hw_event_cb_func ife_hwr_irq_sof_cb;
- struct cam_isp_hw_sof_event_data sof_done_event_data;
+ int32_t rc = -EINVAL;
uint32_t core_idx;
uint32_t sof_status = 0;
uint32_t core_index0;
uint32_t core_index1;
- CDBG("%s:Enter\n", __func__);
-
- ife_hwr_mgr_ctx = handler_priv;
- evt_payload = payload;
- if (!evt_payload) {
- pr_err("%s: no payload\n", __func__);
- return IRQ_HANDLED;
- }
+ CAM_DBG(CAM_ISP, "Enter");
core_idx = evt_payload->core_index;
+ hw_res_l = isp_ife_camif_res->hw_res[0];
+ hw_res_r = isp_ife_camif_res->hw_res[1];
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+ isp_ife_camif_res->is_dual_vfe);
+
ife_hwr_irq_sof_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
- evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
-
- list_for_each_entry(isp_ife_camif_res,
- &ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
- if ((isp_ife_camif_res->res_type ==
- CAM_IFE_HW_MGR_RES_UNINIT) ||
- (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
- continue;
-
- hw_res_l = isp_ife_camif_res->hw_res[0];
- hw_res_r = isp_ife_camif_res->hw_res[1];
-
- CDBG("%s:is_dual_vfe ? = %d\n", __func__,
- isp_ife_camif_res->is_dual_vfe);
- switch (isp_ife_camif_res->is_dual_vfe) {
- /* Handling Single VFE Scenario */
- case 0:
- /* SOF check for Left side VFE */
- if (!hw_res_l) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, core idx hw = %d\n",
- __func__, core_idx,
- hw_res_l->hw_intf->hw_idx);
-
- if (core_idx == hw_res_l->hw_intf->hw_idx) {
- sof_status = hw_res_l->bottom_half_handler(
- hw_res_l, evt_payload);
- if (!sof_status)
- ife_hwr_irq_sof_cb(
- ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_SOF,
- &sof_done_event_data);
- }
-
+ switch (isp_ife_camif_res->is_dual_vfe) {
+ /* Handling Single VFE Scenario */
+ case 0:
+ /* SOF check for Left side VFE */
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
+ core_idx, hw_res_l->hw_intf->hw_idx);
- /* Handling Dual VFE Scenario */
- case 1:
- /* SOF check for Left side VFE */
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(hw_res_l,
+ evt_payload);
+ if (!sof_status) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hwr_mgr_ctx,
+ &sof_done_event_data.timestamp);
- if (!hw_res_l) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
- __func__, core_idx,
- hw_res_l->hw_intf->hw_idx);
-
- if (core_idx == hw_res_l->hw_intf->hw_idx) {
- sof_status = hw_res_l->bottom_half_handler(
- hw_res_l, evt_payload);
- if (!sof_status)
- ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
- }
-
- /* SOF check for Right side VFE */
- if (!hw_res_r) {
- pr_err("%s: VFE Device is NULL\n",
- __func__);
- break;
- }
- CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
- __func__, core_idx,
- hw_res_r->hw_intf->hw_idx);
- if (core_idx == hw_res_r->hw_intf->hw_idx) {
- sof_status = hw_res_r->bottom_half_handler(
- hw_res_r, evt_payload);
- if (!sof_status)
- ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
- }
-
- core_index0 = hw_res_l->hw_intf->hw_idx;
- core_index1 = hw_res_r->hw_intf->hw_idx;
-
- rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(
- ife_hwr_mgr_ctx, core_index0, core_index1);
-
- if (!rc)
ife_hwr_irq_sof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_SOF,
&sof_done_event_data);
+ }
+ }
+ break;
+
+ /* Handling Dual VFE Scenario */
+ case 1:
+ /* SOF check for Left side VFE */
+
+ if (!hw_res_l) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
+ core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ sof_status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ }
+
+ /* SOF check for Right side VFE */
+ if (!hw_res_r) {
+ CAM_ERR(CAM_ISP, "VFE Device is NULL");
+ break;
+ }
+ CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
+ core_idx,
+ hw_res_r->hw_intf->hw_idx);
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ sof_status = hw_res_r->bottom_half_handler(hw_res_r,
+ evt_payload);
+ if (!sof_status)
+ ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+ }
+
+ core_index0 = hw_res_l->hw_intf->hw_idx;
+ core_index1 = hw_res_r->hw_intf->hw_idx;
+
+ rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(ife_hwr_mgr_ctx,
+ core_index0, core_index1);
+
+ if (!rc)
+ ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+
+ break;
+
+ default:
+ CAM_ERR(CAM_ISP, "error with hw_res");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "Exit (sof_status = %d)!", sof_status);
+
+ return 0;
+}
+
+static int cam_ife_hw_mgr_handle_sof(
+ void *handler_priv,
+ void *payload)
+{
+ int32_t rc = -EINVAL;
+ struct cam_isp_resource_node *hw_res = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hw_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *ife_src_res = NULL;
+ cam_hw_event_cb_func ife_hw_irq_sof_cb;
+ struct cam_isp_hw_sof_event_data sof_done_event_data;
+ uint32_t sof_status = 0;
+
+ CAM_DBG(CAM_ISP, "Enter");
+
+ ife_hw_mgr_ctx = handler_priv;
+ evt_payload = payload;
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "no payload");
+ return IRQ_HANDLED;
+ }
+ ife_hw_irq_sof_cb =
+ ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+ list_for_each_entry(ife_src_res,
+ &ife_hw_mgr_ctx->res_list_ife_src, list) {
+
+ if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+ continue;
+
+ switch (ife_src_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ /* check if it is rdi only context */
+ if (ife_hw_mgr_ctx->is_rdi_only_context) {
+ hw_res = ife_src_res->hw_res[0];
+ sof_status = hw_res->bottom_half_handler(
+ hw_res, evt_payload);
+
+ if (!sof_status) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hw_mgr_ctx,
+ &sof_done_event_data.timestamp);
+
+ ife_hw_irq_sof_cb(
+ ife_hw_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_SOF,
+ &sof_done_event_data);
+ }
+
+ CAM_DBG(CAM_ISP, "sof_status = %d", sof_status);
+
+ /* this is RDI only context so exit from here */
+ return 0;
+ }
break;
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ rc = cam_ife_hw_mgr_process_camif_sof(ife_src_res,
+ ife_hw_mgr_ctx, evt_payload);
+ break;
default:
- pr_err("%s: error with hw_res\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid resource id :%d",
+ ife_src_res->res_id);
+ break;
}
}
- CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);
return 0;
}
@@ -2640,11 +2799,11 @@
{
int32_t buf_done_status = 0;
- int32_t i = 0;
+ int32_t i;
int32_t rc = 0;
cam_hw_event_cb_func ife_hwr_irq_wm_done_cb;
struct cam_isp_resource_node *hw_res_l = NULL;
- struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = handler_priv;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
struct cam_vfe_bus_irq_evt_payload *evt_payload = payload;
struct cam_ife_hw_mgr_res *isp_ife_out_res = NULL;
struct cam_hw_event_recovery_data recovery_data;
@@ -2653,8 +2812,9 @@
uint32_t error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
uint32_t num_of_error_handles = 0;
- CDBG("%s:Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
+ ife_hwr_mgr_ctx = evt_payload->ctx;
ife_hwr_irq_wm_done_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
@@ -2715,7 +2875,7 @@
/* Report for Successful buf_done event if any */
if (buf_done_event_data.num_handles > 0 &&
ife_hwr_irq_wm_done_cb) {
- CDBG("%s: notify isp context\n", __func__);
+ CAM_DBG(CAM_ISP, "notify isp context");
ife_hwr_irq_wm_done_cb(
ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_DONE,
@@ -2734,13 +2894,12 @@
}
break;
}
- CDBG("%s:buf_done status:(%d),isp_ife_out_res->res_id : 0x%x\n",
- __func__, buf_done_status, isp_ife_out_res->res_id);
+ if (!buf_done_status)
+ CAM_DBG(CAM_ISP,
+ "buf_done status:(%d),out_res->res_id: 0x%x",
+ buf_done_status, isp_ife_out_res->res_id);
}
-
- CDBG("%s: Exit (buf_done_status (Success) = %d)!\n", __func__,
- buf_done_status);
return rc;
err:
@@ -2759,8 +2918,8 @@
* for the first phase, we are going to reset entire HW.
*/
- CDBG("%s: Exit (buf_done_status (Error) = %d)!\n", __func__,
- buf_done_status);
+ CAM_DBG(CAM_ISP, "Exit (buf_done_status (Error) = %d)!",
+ buf_done_status);
return rc;
}
@@ -2775,31 +2934,21 @@
return rc;
evt_payload = evt_payload_priv;
- ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+ ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
- CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
- CDBG("bus_irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
- CDBG("bus_irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
- CDBG("bus_irq_status_2: = %x\n", evt_payload->irq_reg_val[2]);
- CDBG("bus_irq_comp_err: = %x\n", evt_payload->irq_reg_val[3]);
- CDBG("bus_irq_comp_owrt: = %x\n", evt_payload->irq_reg_val[4]);
- CDBG("bus_irq_dual_comp_err: = %x\n", evt_payload->irq_reg_val[5]);
- CDBG("bus_irq_dual_comp_owrt: = %x\n", evt_payload->irq_reg_val[6]);
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+ CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+ CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+ evt_payload->irq_reg_val[4]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+ evt_payload->irq_reg_val[5]);
+ CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+ evt_payload->irq_reg_val[6]);
- /*
- * If overflow/overwrite/error/violation are pending
- * for this context it needs to be handled remaining
- * interrupts are ignored.
- */
- rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
- evt_payload_priv);
- if (rc) {
- pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
- __func__, rc);
- return IRQ_HANDLED;
- }
-
- CDBG("%s: Calling Buf_done\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling Buf_done");
/* WM Done */
return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -2817,10 +2966,11 @@
evt_payload = evt_payload_priv;
ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
- CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
- CDBG("irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
- CDBG("irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
- CDBG("Violation register: = %x\n", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+ CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+ CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+ CAM_DBG(CAM_ISP, "Violation register: = %x",
+ evt_payload->irq_reg_val[2]);
/*
* If overflow/overwrite/error/violation are pending
@@ -2830,22 +2980,22 @@
rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
evt_payload_priv);
if (rc) {
- pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
- __func__, rc);
+ CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
+ rc);
return IRQ_HANDLED;
}
- CDBG("%s: Calling SOF\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling SOF");
/* SOF IRQ */
- cam_ife_hw_mgr_handle_sof_for_camif_hw_res(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
evt_payload_priv);
- CDBG("%s: Calling RUP\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling RUP");
/* REG UPDATE */
- cam_ife_hw_mgr_handle_rup_for_camif_hw_res(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
evt_payload_priv);
- CDBG("%s: Calling EPOCH\n", __func__);
+ CAM_DBG(CAM_ISP, "Calling EPOCH");
/* EPOCH IRQ */
cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -2891,15 +3041,14 @@
int i, j;
struct cam_iommu_handle cdm_handles;
- CDBG("%s: Enter\n", __func__);
+ CAM_DBG(CAM_ISP, "Enter");
memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
mutex_init(&g_ife_hw_mgr.ctx_mutex);
if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
- pr_err("%s: Fatal, CSID num is different then IFE num!\n",
- __func__);
+ CAM_ERR(CAM_ISP, "Fatal, CSID num is different then IFE num!");
goto end;
}
@@ -2915,15 +3064,16 @@
j++;
g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
- CDBG("reg_map: mem base = 0x%llx, cam_base = 0x%llx\n",
- (uint64_t) soc_info->reg_map[0].mem_base,
+ CAM_DBG(CAM_ISP,
+ "reg_map: mem base = %pK cam_base = 0x%llx",
+ (void __iomem *)soc_info->reg_map[0].mem_base,
(uint64_t) soc_info->reg_map[0].mem_cam_base);
} else {
g_ife_hw_mgr.cdm_reg_map[i] = NULL;
}
}
if (j == 0) {
- pr_err("%s: no valid IFE HW!\n", __func__);
+ CAM_ERR(CAM_ISP, "no valid IFE HW!");
goto end;
}
@@ -2934,7 +3084,7 @@
j++;
}
if (!j) {
- pr_err("%s: no valid IFE CSID HW!\n", __func__);
+ CAM_ERR(CAM_ISP, "no valid IFE CSID HW!");
goto end;
}
@@ -2953,26 +3103,27 @@
*/
if (cam_smmu_get_handle("ife",
&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
- pr_err("%s: Can not get iommu handle.\n", __func__);
+ CAM_ERR(CAM_ISP, "Can not get iommu handle.");
goto end;
}
if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
CAM_SMMU_ATTACH)) {
- pr_err("%s: Attach iommu handle failed.\n", __func__);
+ CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
goto end;
}
- CDBG("got iommu_handle=%d\n", g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+ CAM_DBG(CAM_ISP, "got iommu_handle=%d",
+ g_ife_hw_mgr.mgr_common.img_iommu_hdl);
g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
- CDBG("Successfully acquired the CDM iommu handles\n");
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
cdm_handles.secure;
} else {
- CDBG("Failed to acquire the CDM iommu handles\n");
+ CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
}
@@ -3007,7 +3158,7 @@
sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
rc = -ENOMEM;
- pr_err("Allocation Failed for cdm command\n");
+ CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
goto end;
}
@@ -3028,7 +3179,7 @@
&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
if (rc < 0) {
- pr_err("%s: Unable to create worker\n", __func__);
+ CAM_ERR(CAM_ISP, "Unable to create worker");
goto end;
}
@@ -3043,8 +3194,9 @@
hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+ hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
- CDBG("%s: Exit\n", __func__);
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
end:
if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 174d2ce..6dfdb21 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -108,6 +108,7 @@
* @epoch_cnt epoch count value per core, used for dual VFE
* @overflow_pending flat to specify the overflow is pending for the
* context
+ * @is_rdi_only_context flag to specify the context has only rdi resource
*/
struct cam_ife_hw_mgr_ctx {
struct list_head list;
@@ -138,6 +139,7 @@
uint32_t sof_cnt[CAM_IFE_HW_NUM_MAX];
uint32_t epoch_cnt[CAM_IFE_HW_NUM_MAX];
atomic_t overflow_pending;
+ uint32_t is_rdi_only_context;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
index 2e23222..2f18895 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -12,6 +12,7 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
int cam_isp_hw_mgr_init(struct device_node *of_node,
@@ -26,7 +27,7 @@
if (strnstr(compat_str, "ife", strlen(compat_str)))
rc = cam_ife_hw_mgr_init(hw_mgr);
else {
- pr_err("%s: Invalid ISP hw type\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid ISP hw type");
rc = -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
index 19da180..b60e7de 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
@@ -6,6 +6,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_tasklet_util.o cam_isp_packet_parser.o
obj-$(CONFIG_SPECTRA_CAMERA) += irq_controller/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 3c72279..0a0eecb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -15,130 +15,13 @@
#include "cam_mem_mgr.h"
#include "cam_vfe_hw_intf.h"
#include "cam_isp_packet_parser.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-static int cam_isp_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
- size_t *len)
-{
- int rc = 0;
- uint64_t kmd_buf_addr = 0;
-
- rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
- if (rc) {
- pr_err("%s:%d Unable to get the virtual address rc:%d\n",
- __func__, __LINE__, rc);
- rc = -ENOMEM;
- } else {
- if (kmd_buf_addr && *len)
- *buf_addr = (uint32_t *)kmd_buf_addr;
- else {
- pr_err("%s:%d Invalid addr and length :%ld\n",
- __func__, __LINE__, *len);
- rc = -ENOMEM;
- }
- }
- return rc;
-}
-
-static int cam_isp_validate_cmd_desc(
- struct cam_cmd_buf_desc *cmd_desc)
-{
- if (cmd_desc->length > cmd_desc->size ||
- (cmd_desc->mem_handle <= 0)) {
- pr_err("%s:%d invalid cmd arg %d %d %d %d\n",
- __func__, __LINE__, cmd_desc->offset,
- cmd_desc->length, cmd_desc->mem_handle,
- cmd_desc->size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int cam_isp_validate_packet(struct cam_packet *packet)
-{
- if (!packet)
- return -EINVAL;
-
- CDBG("%s:%d num cmd buf:%d num of io config:%d kmd buf index:%d\n",
- __func__, __LINE__, packet->num_cmd_buf,
- packet->num_io_configs, packet->kmd_cmd_buf_index);
-
- if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf ||
- (!packet->header.size) ||
- packet->cmd_buf_offset > packet->header.size ||
- packet->io_configs_offset > packet->header.size) {
- pr_err("%s:%d invalid packet:%d %d %d %d %d\n",
- __func__, __LINE__, packet->kmd_cmd_buf_index,
- packet->num_cmd_buf, packet->cmd_buf_offset,
- packet->io_configs_offset, packet->header.size);
- return -EINVAL;
- }
-
- CDBG("%s:%d exit\n", __func__, __LINE__);
- return 0;
-}
-
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
- struct cam_isp_kmd_buf_info *kmd_buf)
-{
- int rc = 0;
- size_t len = 0;
- struct cam_cmd_buf_desc *cmd_desc;
- uint32_t *cpu_addr;
-
- if (!packet || !kmd_buf) {
- pr_err("%s:%d Invalid arg\n", __func__, __LINE__);
- rc = -EINVAL;
- return rc;
- }
-
- /* Take first command descriptor and add offset to it for kmd*/
- cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
- &packet->payload + packet->cmd_buf_offset);
- cmd_desc += packet->kmd_cmd_buf_index;
-
- CDBG("%s:%d enter\n", __func__, __LINE__);
- rc = cam_isp_validate_cmd_desc(cmd_desc);
- if (rc)
- return rc;
-
- CDBG("%s:%d enter\n", __func__, __LINE__);
- rc = cam_isp_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
- &len);
- if (rc)
- return rc;
-
- if (len < cmd_desc->size) {
- pr_err("%s:%d invalid memory len:%ld and cmd desc size:%d\n",
- __func__, __LINE__, len, cmd_desc->size);
- return -EINVAL;
- }
-
- cpu_addr += cmd_desc->offset/4 + packet->kmd_cmd_buf_offset/4;
- CDBG("%s:%d total size %d, cmd size: %d, KMD buffer size: %d\n",
- __func__, __LINE__, cmd_desc->size, cmd_desc->length,
- cmd_desc->size - cmd_desc->length);
- CDBG("%s:%d: handle 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK\n",
- __func__, __LINE__, cmd_desc->mem_handle, cmd_desc->offset,
- packet->kmd_cmd_buf_offset, cpu_addr);
-
- kmd_buf->cpu_addr = cpu_addr;
- kmd_buf->handle = cmd_desc->mem_handle;
- kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset;
- kmd_buf->size = cmd_desc->size - cmd_desc->length;
- kmd_buf->used_bytes = 0;
-
- return rc;
-}
+#include "cam_debug_util.h"
int cam_isp_add_change_base(
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info)
+ struct cam_kmd_buf_info *kmd_buf_info)
{
int rc = -EINVAL;
struct cam_ife_hw_mgr_res *hw_mgr_res;
@@ -152,9 +35,8 @@
/* Max one hw entries required for each base */
if (num_ent + 1 >= prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, num_ent,
- prepare->max_hw_update_entries);
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
return -EINVAL;
}
@@ -220,8 +102,8 @@
((uint8_t *)&prepare->packet->payload +
prepare->packet->cmd_buf_offset);
- CDBG("%s:%d split id = %d, number of command buffers:%d\n", __func__,
- __LINE__, split_id, prepare->packet->num_cmd_buf);
+ CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+ split_id, prepare->packet->num_cmd_buf);
for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
if (!cmd_desc[i].length)
@@ -229,19 +111,18 @@
/* One hw entry space required for left or right or common */
if (num_ent + 1 >= prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, num_ent,
- prepare->max_hw_update_entries);
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ num_ent, prepare->max_hw_update_entries);
return -EINVAL;
}
- rc = cam_isp_validate_cmd_desc(&cmd_desc[i]);
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
if (rc)
return rc;
cmd_meta_data = cmd_desc[i].meta_data;
- CDBG("%s:%d meta type: %d, split_id: %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
cmd_meta_data, split_id);
switch (cmd_meta_data) {
@@ -288,8 +169,8 @@
num_ent++;
break;
default:
- pr_err("%s:%d invalid cdm command meta data %d\n",
- __func__, __LINE__, cmd_meta_data);
+ CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+ cmd_meta_data);
return -EINVAL;
}
}
@@ -304,7 +185,7 @@
int iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info,
+ struct cam_kmd_buf_info *kmd_buf_info,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out,
bool fill_fence)
@@ -331,33 +212,31 @@
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__, prepare->num_hw_update_entries,
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
prepare->max_hw_update_entries);
return -EINVAL;
}
for (i = 0; i < prepare->packet->num_io_configs; i++) {
- CDBG("%s:%d ======= io config idx %d ============\n",
- __func__, __LINE__, i);
- CDBG("%s:%d resource_type:%d fence:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
+ CAM_DBG(CAM_ISP, "resource_type:%d fence:%d",
io_cfg[i].resource_type, io_cfg[i].fence);
- CDBG("%s:%d format: %d\n", __func__, __LINE__,
- io_cfg[i].format);
- CDBG("%s:%d direction %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
+ CAM_DBG(CAM_ISP, "direction %d",
io_cfg[i].direction);
if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
res_id_out = io_cfg[i].resource_type & 0xFF;
if (res_id_out >= size_isp_out) {
- pr_err("%s:%d invalid out restype:%x\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "invalid out restype:%x",
io_cfg[i].resource_type);
return -EINVAL;
}
- CDBG("%s:%d configure output io with fill fence %d\n",
- __func__, __LINE__, fill_fence);
+ CAM_DBG(CAM_ISP,
+ "configure output io with fill fence %d",
+ fill_fence);
if (fill_fence) {
if (num_out_buf <
prepare->max_out_map_entries) {
@@ -368,8 +247,7 @@
sync_id = io_cfg[i].fence;
num_out_buf++;
} else {
- pr_err("%s:%d ln_out:%d max_ln:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
num_out_buf,
prepare->max_out_map_entries);
return -EINVAL;
@@ -378,15 +256,15 @@
hw_mgr_res = &res_list_isp_out[res_id_out];
if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
- pr_err("%s:%d io res id:%d not valid\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "io res id:%d not valid",
io_cfg[i].resource_type);
return -EINVAL;
}
} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
res_id_in = io_cfg[i].resource_type & 0xFF;
- CDBG("%s:%d configure input io with fill fence %d\n",
- __func__, __LINE__, fill_fence);
+ CAM_DBG(CAM_ISP,
+ "configure input io with fill fence %d",
+ fill_fence);
if (fill_fence) {
if (num_in_buf < prepare->max_in_map_entries) {
prepare->in_map_entries[num_in_buf].
@@ -397,8 +275,7 @@
io_cfg[i].fence;
num_in_buf++;
} else {
- pr_err("%s:%d ln_in:%d imax_ln:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
num_in_buf,
prepare->max_in_map_entries);
return -EINVAL;
@@ -406,13 +283,12 @@
}
continue;
} else {
- pr_err("%s:%d Invalid io config direction :%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
io_cfg[i].direction);
return -EINVAL;
}
- CDBG("%s:%d setup mem io\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "setup mem io");
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
if (!hw_mgr_res->hw_res[j])
continue;
@@ -422,9 +298,9 @@
res = hw_mgr_res->hw_res[j];
if (res->res_id != io_cfg[i].resource_type) {
- pr_err("%s:%d wm err res id:%d io res id:%d\n",
- __func__, __LINE__, res->res_id,
- io_cfg[i].resource_type);
+ CAM_ERR(CAM_ISP,
+ "wm err res id:%d io res id:%d",
+ res->res_id, io_cfg[i].resource_type);
return -EINVAL;
}
@@ -439,14 +315,16 @@
io_cfg[i].mem_handle[plane_id],
iommu_hdl, &io_addr[plane_id], &size);
if (rc) {
- pr_err("%s:%d no io addr for plane%d\n",
- __func__, __LINE__, plane_id);
+ CAM_ERR(CAM_ISP,
+ "no io addr for plane%d",
+ plane_id);
rc = -ENOMEM;
return rc;
}
if (io_addr[plane_id] >> 32) {
- pr_err("Invalid mapped address\n");
+ CAM_ERR(CAM_ISP,
+ "Invalid mapped address");
rc = -EINVAL;
return rc;
}
@@ -454,13 +332,13 @@
/* need to update with offset */
io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
- CDBG("%s: get io_addr for plane %d: 0x%llx\n",
- __func__, plane_id,
- io_addr[plane_id]);
+ CAM_DBG(CAM_ISP,
+ "get io_addr for plane %d: 0x%llx",
+ plane_id, io_addr[plane_id]);
}
if (!plane_id) {
- pr_err("%s:%d No valid planes for res%d\n",
- __func__, __LINE__, res->res_id);
+ CAM_ERR(CAM_ISP, "No valid planes for res%d",
+ res->res_id);
rc = -ENOMEM;
return rc;
}
@@ -471,8 +349,9 @@
(kmd_buf_info->used_bytes +
io_cfg_used_bytes);
} else {
- pr_err("%s:%d no free kmd memory for base %d\n",
- __func__, __LINE__, base_idx);
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base %d",
+ base_idx);
rc = -ENOMEM;
return rc;
}
@@ -485,8 +364,8 @@
update_buf.num_buf = plane_id;
update_buf.io_cfg = &io_cfg[i];
- CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
- __LINE__, update_buf.cdm.cmd_buf_addr,
+ CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+ update_buf.cdm.cmd_buf_addr,
update_buf.cdm.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
@@ -494,8 +373,8 @@
sizeof(struct cam_isp_hw_get_buf_update));
if (rc) {
- pr_err("%s:%d get buf cmd error:%d\n",
- __func__, __LINE__, res->res_id);
+ CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+ res->res_id);
rc = -ENOMEM;
return rc;
}
@@ -503,7 +382,7 @@
}
}
- CDBG("%s: io_cfg_used_bytes %d, fill_fence %d\n", __func__,
+ CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
io_cfg_used_bytes, fill_fence);
if (io_cfg_used_bytes) {
/* Update the HW entries */
@@ -533,7 +412,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info)
+ struct cam_kmd_buf_info *kmd_buf_info)
{
int rc = -EINVAL;
struct cam_isp_resource_node *res;
@@ -546,8 +425,7 @@
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
prepare->max_hw_update_entries) {
- pr_err("%s:%d Insufficient HW entries :%d %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
prepare->num_hw_update_entries,
prepare->max_hw_update_entries);
return -EINVAL;
@@ -572,9 +450,8 @@
(kmd_buf_info->used_bytes +
reg_update_size);
} else {
- pr_err("%s:%d no free mem %d %d %d\n",
- __func__, __LINE__, base_idx,
- kmd_buf_info->size,
+ CAM_ERR(CAM_ISP, "no free mem %d %d %d",
+ base_idx, kmd_buf_info->size,
kmd_buf_info->used_bytes +
reg_update_size);
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index ecc71b3..4a7eff8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -19,9 +17,7 @@
#include <linux/ratelimit.h>
#include "cam_tasklet_util.h"
#include "cam_irq_controller.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
#define CAM_TASKLETQ_SIZE 256
@@ -95,14 +91,14 @@
*tasklet_cmd = NULL;
if (!atomic_read(&tasklet->tasklet_active)) {
- pr_err_ratelimited("Tasklet is not active!\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active!\n");
rc = -EPIPE;
return rc;
}
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
if (list_empty(&tasklet->free_cmd_list)) {
- pr_err_ratelimited("No more free tasklet cmd!\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd!\n");
rc = -ENODEV;
goto spin_unlock;
} else {
@@ -162,22 +158,22 @@
*tasklet_cmd = NULL;
if (!atomic_read(&tasklet->tasklet_active)) {
- pr_err("Tasklet is not active!\n");
+ CAM_ERR(CAM_ISP, "Tasklet is not active!");
rc = -EPIPE;
return rc;
}
- CDBG("Dequeue before lock.\n");
+ CAM_DBG(CAM_ISP, "Dequeue before lock.");
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
if (list_empty(&tasklet->used_cmd_list)) {
- CDBG("End of list reached. Exit\n");
+ CAM_DBG(CAM_ISP, "End of list reached. Exit");
rc = -ENODEV;
goto spin_unlock;
} else {
*tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
struct cam_tasklet_queue_cmd, list);
list_del_init(&(*tasklet_cmd)->list);
- CDBG("Dequeue Successful\n");
+ CAM_DBG(CAM_ISP, "Dequeue Successful");
}
spin_unlock:
@@ -197,14 +193,14 @@
int rc;
if (!bottom_half) {
- pr_err("NULL bottom half\n");
+ CAM_ERR(CAM_ISP, "NULL bottom half");
return -EINVAL;
}
rc = cam_tasklet_get_cmd(tasklet, &tasklet_cmd);
if (tasklet_cmd) {
- CDBG("%s: Enqueue tasklet cmd\n", __func__);
+ CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
tasklet_cmd->bottom_half_handler = bottom_half_handler;
tasklet_cmd->payload = evt_payload_priv;
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
@@ -213,7 +209,7 @@
spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
tasklet_schedule(&tasklet->tasklet);
} else {
- pr_err("%s: tasklet cmd is NULL!\n", __func__);
+ CAM_ERR(CAM_ISP, "tasklet cmd is NULL!");
}
return rc;
@@ -229,7 +225,8 @@
tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
if (!tasklet) {
- CDBG("Error! Unable to allocate memory for tasklet");
+ CAM_DBG(CAM_ISP,
+ "Error! Unable to allocate memory for tasklet");
*tasklet_info = NULL;
return -ENOMEM;
}
@@ -271,7 +268,8 @@
struct cam_tasklet_queue_cmd *tasklet_cmd_temp;
if (atomic_read(&tasklet->tasklet_active)) {
- pr_err("Tasklet already active. idx = %d\n", tasklet->index);
+ CAM_ERR(CAM_ISP, "Tasklet already active. idx = %d",
+ tasklet->index);
return -EBUSY;
}
atomic_set(&tasklet->tasklet_active, 1);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 9730fc2..7ac729f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -18,47 +18,7 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_ife_hw_mgr.h"
#include "cam_hw_intf.h"
-
-/**
- * @brief KMD scratch buffer information
- *
- * @handle: Memory handle
- * @cpu_addr: Cpu address
- * @offset: Offset from the start of the buffer
- * @size: Size of the buffer
- * @used_bytes: Used memory in bytes
- *
- */
-struct cam_isp_kmd_buf_info {
- int handle;
- uint32_t *cpu_addr;
- uint32_t offset;
- uint32_t size;
- uint32_t used_bytes;
-};
-
-
-/**
- * @brief Validate the packet
- *
- * @packet: Packet to be validated
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_validate_packet(struct cam_packet *packet);
-
-/**
- * @brief Get the kmd buffer from the packet command descriptor
- *
- * @packet: Packet data
- * @kmd_buf: Extracted the KMD buffer information
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+#include "cam_packet_util.h"
/**
* @brief Add change base in the hw entries list
@@ -77,7 +37,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+ struct cam_kmd_buf_info *kmd_buf_info);
/**
* @brief Add command buffer in the HW entries list for given
@@ -112,7 +72,7 @@
int cam_isp_add_io_buffers(int iommu_hdl,
struct cam_hw_prepare_update_args *prepare,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info,
+ struct cam_kmd_buf_info *kmd_buf_info,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out,
bool fill_fence);
@@ -134,7 +94,7 @@
struct cam_hw_prepare_update_args *prepare,
struct list_head *res_list_isp_src,
uint32_t base_idx,
- struct cam_isp_kmd_buf_info *kmd_buf_info);
+ struct cam_kmd_buf_info *kmd_buf_info);
#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index bf4d174..2341b38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -10,16 +10,12 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include "cam_io_util.h"
#include "cam_irq_controller.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
/**
* struct cam_irq_evt_handler:
@@ -143,21 +139,21 @@
if (!register_info->num_registers || !register_info->irq_reg_set ||
!name || !mem_base) {
- pr_err("Invalid parameters\n");
+ CAM_ERR(CAM_ISP, "Invalid parameters");
rc = -EINVAL;
return rc;
}
controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
if (!controller) {
- CDBG("Failed to allocate IRQ Controller\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ Controller");
return -ENOMEM;
}
controller->irq_register_arr = kzalloc(register_info->num_registers *
sizeof(struct cam_irq_register_obj), GFP_KERNEL);
if (!controller->irq_register_arr) {
- CDBG("Failed to allocate IRQ register Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ register Arr");
rc = -ENOMEM;
goto reg_alloc_error;
}
@@ -165,7 +161,7 @@
controller->irq_status_arr = kzalloc(register_info->num_registers *
sizeof(uint32_t), GFP_KERNEL);
if (!controller->irq_status_arr) {
- CDBG("Failed to allocate IRQ status Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate IRQ status Arr");
rc = -ENOMEM;
goto status_alloc_error;
}
@@ -174,14 +170,14 @@
kzalloc(register_info->num_registers * sizeof(uint32_t),
GFP_KERNEL);
if (!controller->th_payload.evt_status_arr) {
- CDBG("Failed to allocate BH payload bit mask Arr\n");
+ CAM_DBG(CAM_ISP, "Failed to allocate BH payload bit mask Arr");
rc = -ENOMEM;
goto evt_mask_alloc_error;
}
controller->name = name;
- CDBG("num_registers: %d\n", register_info->num_registers);
+ CAM_DBG(CAM_ISP, "num_registers: %d", register_info->num_registers);
for (i = 0; i < register_info->num_registers; i++) {
controller->irq_register_arr[i].index = i;
controller->irq_register_arr[i].mask_reg_offset =
@@ -190,11 +186,11 @@
register_info->irq_reg_set[i].clear_reg_offset;
controller->irq_register_arr[i].status_reg_offset =
register_info->irq_reg_set[i].status_reg_offset;
- CDBG("i %d mask_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d mask_reg_offset: 0x%x", i,
controller->irq_register_arr[i].mask_reg_offset);
- CDBG("i %d clear_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d clear_reg_offset: 0x%x", i,
controller->irq_register_arr[i].clear_reg_offset);
- CDBG("i %d status_reg_offset: 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "i %d status_reg_offset: 0x%x", i,
controller->irq_register_arr[i].status_reg_offset);
}
controller->num_registers = register_info->num_registers;
@@ -202,11 +198,11 @@
controller->global_clear_offset = register_info->global_clear_offset;
controller->mem_base = mem_base;
- CDBG("global_clear_bitmask: 0x%x\n",
+ CAM_DBG(CAM_ISP, "global_clear_bitmask: 0x%x",
controller->global_clear_bitmask);
- CDBG("global_clear_offset: 0x%x\n",
+ CAM_DBG(CAM_ISP, "global_clear_offset: 0x%x",
controller->global_clear_offset);
- CDBG("mem_base: 0x%llx\n", (uint64_t)controller->mem_base);
+ CAM_DBG(CAM_ISP, "mem_base: %pK", (void __iomem *)controller->mem_base);
INIT_LIST_HEAD(&controller->evt_handler_list_head);
for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
@@ -246,19 +242,21 @@
unsigned long flags;
if (!controller || !handler_priv || !evt_bit_mask_arr) {
- pr_err("Invalid params: ctlr=%pK handler_priv=%pK bit_mask_arr = %pK\n",
+ CAM_ERR(CAM_ISP,
+ "Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
controller, handler_priv, evt_bit_mask_arr);
return -EINVAL;
}
if (!top_half_handler) {
- pr_err("Missing top half handler\n");
+ CAM_ERR(CAM_ISP, "Missing top half handler");
return -EINVAL;
}
if (bottom_half_handler &&
(!bottom_half || !bottom_half_enqueue_func)) {
- pr_err("Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK\n",
+ CAM_ERR(CAM_ISP,
+ "Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
bottom_half_handler,
bottom_half,
bottom_half_enqueue_func);
@@ -266,29 +264,21 @@
}
if (priority >= CAM_IRQ_PRIORITY_MAX) {
- pr_err("Invalid priority=%u, max=%u\n", priority,
+ CAM_ERR(CAM_ISP, "Invalid priority=%u, max=%u", priority,
CAM_IRQ_PRIORITY_MAX);
return -EINVAL;
}
- if (sizeof(evt_bit_mask_arr) !=
- sizeof(uint32_t) * controller->num_registers) {
- pr_err("Invalid evt_mask size = %lu expected = %lu\n",
- sizeof(evt_bit_mask_arr),
- sizeof(uint32_t) * controller->num_registers);
- return -EINVAL;
- }
-
evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
if (!evt_handler) {
- CDBG("Error allocating hlist_node\n");
+ CAM_DBG(CAM_ISP, "Error allocating hlist_node");
return -ENOMEM;
}
evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
controller->num_registers, GFP_KERNEL);
if (!evt_handler->evt_bit_mask_arr) {
- CDBG("Error allocating hlist_node\n");
+ CAM_DBG(CAM_ISP, "Error allocating hlist_node");
rc = -ENOMEM;
goto free_evt_handler;
}
@@ -306,6 +296,8 @@
evt_handler->bottom_half = bottom_half;
evt_handler->bottom_half_enqueue_func = bottom_half_enqueue_func;
evt_handler->index = controller->hdl_idx++;
+
+ /* Avoid rollover to negative values */
if (controller->hdl_idx > 0x3FFFFFFF)
controller->hdl_idx = 1;
@@ -352,7 +344,7 @@
list_for_each_entry_safe(evt_handler, evt_handler_temp,
&controller->evt_handler_list_head, list_node) {
if (evt_handler->index == handle) {
- CDBG("unsubscribe item %d\n", handle);
+ CAM_DBG(CAM_ISP, "unsubscribe item %d", handle);
list_del_init(&evt_handler->list_node);
list_del_init(&evt_handler->th_list_node);
found = 1;
@@ -435,7 +427,7 @@
int rc = -EINVAL;
int i;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (list_empty(th_list_head))
return;
@@ -447,7 +439,7 @@
if (!is_irq_match)
continue;
- CDBG("match found\n");
+ CAM_DBG(CAM_ISP, "match found");
cam_irq_th_payload_init(th_payload);
th_payload->handler_priv = evt_handler->handler_priv;
@@ -468,7 +460,8 @@
(void *)th_payload);
if (!rc && evt_handler->bottom_half_handler) {
- CDBG("Enqueuing bottom half\n");
+ CAM_DBG(CAM_ISP, "Enqueuing bottom half for %s",
+ controller->name);
if (evt_handler->bottom_half_enqueue_func) {
evt_handler->bottom_half_enqueue_func(
evt_handler->bottom_half,
@@ -479,7 +472,7 @@
}
}
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
}
irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
@@ -492,6 +485,8 @@
if (!controller)
return IRQ_NONE;
+ CAM_DBG(CAM_ISP, "locking controller %pK name %s rw_lock %pK",
+ controller, controller->name, &controller->rw_lock);
read_lock(&controller->rw_lock);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_status_arr[i] = cam_io_r_mb(
@@ -500,30 +495,34 @@
cam_io_w_mb(controller->irq_status_arr[i],
controller->mem_base +
controller->irq_register_arr[i].clear_reg_offset);
- CDBG("Read irq status%d = 0x%x\n", i,
+ CAM_DBG(CAM_ISP, "Read irq status%d (0x%x) = 0x%x", i,
+ controller->irq_register_arr[i].status_reg_offset,
controller->irq_status_arr[i]);
for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
if (controller->irq_register_arr[i].
top_half_enable_mask[j] &
controller->irq_status_arr[i])
need_th_processing[j] = true;
- CDBG("i %d j %d need_th_processing = %d\n",
+ CAM_DBG(CAM_ISP,
+ "i %d j %d need_th_processing = %d",
i, j, need_th_processing[j]);
}
}
read_unlock(&controller->rw_lock);
+ CAM_DBG(CAM_ISP, "unlocked controller %pK name %s rw_lock %pK",
+ controller, controller->name, &controller->rw_lock);
- CDBG("Status Registers read Successful\n");
+ CAM_DBG(CAM_ISP, "Status Registers read Successful");
if (controller->global_clear_offset)
cam_io_w_mb(controller->global_clear_bitmask,
controller->mem_base + controller->global_clear_offset);
- CDBG("Status Clear done\n");
+ CAM_DBG(CAM_ISP, "Status Clear done");
for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
if (need_th_processing[i]) {
- CDBG("%s: Invoke TH processing\n", __func__);
+ CAM_DBG(CAM_ISP, "Invoke TH processing");
cam_irq_controller_th_processing(controller,
&controller->th_list_head[i]);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index 9f2204b4..0480cd3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -50,32 +50,32 @@
/**
* struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the sof event
*
*/
struct cam_isp_hw_sof_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_reg_update_event_data - Event payload for
* CAM_HW_EVENT_REG_UPDATE
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the reg update event
*
*/
struct cam_isp_hw_reg_update_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Time stamp for the epoch event
*
*/
struct cam_isp_hw_epoch_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
@@ -90,31 +90,53 @@
uint32_t num_handles;
uint32_t resource_handle[
CAM_NUM_OUT_PER_COMP_IRQ_MAX];
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
*
- * @timestamp: Timestamp for the buf done event
+ * @timestamp: Timestamp for the eof event
*
*/
struct cam_isp_hw_eof_event_data {
- struct timeval timestamp;
+ uint64_t timestamp;
};
/**
* struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
*
- * @error_type: error type for the error event
- * @timestamp: Timestamp for the buf done event
+ * @error_type: Error type for the error event
+ * @timestamp: Timestamp for the error event
*
*/
struct cam_isp_hw_error_event_data {
uint32_t error_type;
- struct timeval timestamp;
+ uint64_t timestamp;
};
+/* enum cam_isp_hw_mgr_command - Hardware manager command type */
+enum cam_isp_hw_mgr_command {
+ CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+ CAM_ISP_HW_MGR_CMD_MAX,
+};
+
+/**
+ * struct cam_isp_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @cmd_type HW command type
+ * @get_context Get context type information
+ */
+struct cam_isp_hw_cmd_args {
+ void *ctxt_to_hw_map;
+ uint32_t cmd_type;
+ union {
+ uint32_t is_rdi_only_context;
+ } u;
+};
+
+
/**
* cam_isp_hw_mgr_init()
*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
index 1615d21f..4c6745c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -1,5 +1,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index f09fdc7..a2f773e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -19,10 +19,7 @@
#include "cam_isp_hw.h"
#include "cam_soc_util.h"
#include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
+#include "cam_debug_util.h"
/* Timeout value in msec */
#define IFE_CSID_TIMEOUT 1000
@@ -62,79 +59,127 @@
return rc;
}
-static int cam_ife_csid_get_format(uint32_t res_id,
- uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+static int cam_ife_csid_get_format(uint32_t input_fmt,
+ uint32_t *path_fmt)
{
int rc = 0;
- if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
- res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
- *path_fmt = 0xf;
- return 0;
- }
-
- switch (decode_fmt) {
+ switch (input_fmt) {
case CAM_FORMAT_MIPI_RAW_6:
*path_fmt = 0;
- *plain_fmt = 0;
break;
case CAM_FORMAT_MIPI_RAW_8:
*path_fmt = 1;
- *plain_fmt = 0;
break;
case CAM_FORMAT_MIPI_RAW_10:
*path_fmt = 2;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_12:
*path_fmt = 3;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_14:
*path_fmt = 4;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_16:
*path_fmt = 5;
- *plain_fmt = 1;
break;
case CAM_FORMAT_MIPI_RAW_20:
*path_fmt = 6;
- *plain_fmt = 2;
break;
case CAM_FORMAT_DPCM_10_6_10:
*path_fmt = 7;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_10_8_10:
*path_fmt = 8;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_12_6_12:
*path_fmt = 9;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_12_8_12:
*path_fmt = 0xA;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_14_8_14:
*path_fmt = 0xB;
- *plain_fmt = 1;
break;
case CAM_FORMAT_DPCM_14_10_14:
*path_fmt = 0xC;
- *plain_fmt = 1;
break;
default:
- pr_err("%s:%d:CSID:%d un supported format\n",
- __func__, __LINE__, decode_fmt);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported format",
+ input_fmt);
rc = -EINVAL;
}
return rc;
}
+static int cam_ife_csid_get_rdi_format(uint32_t input_fmt,
+ uint32_t output_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_ISP, "input format:%d output format:%d",
+ input_fmt, output_fmt);
+
+ switch (output_fmt) {
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ *path_fmt = 0xF;
+ *plain_fmt = 0;
+ break;
+
+ case CAM_FORMAT_PLAIN8:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 0;
+ break;
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 1;
+ break;
+ case CAM_FORMAT_PLAIN32_20:
+ rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+ if (rc)
+ goto error;
+
+ *plain_fmt = 2;
+ break;
+ default:
+ *path_fmt = 0xF;
+ *plain_fmt = 0;
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "path format value:%d plain format value:%d",
+ *path_fmt, *plain_fmt);
+
+ return 0;
+error:
+ return rc;
+
+}
+
+
static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
uint32_t res_type)
@@ -166,8 +211,8 @@
if (i == CAM_IFE_CSID_CID_RES_MAX) {
if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d TPG CID not available\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d TPG CID not available",
+ csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
}
@@ -182,8 +227,7 @@
csid_hw->cid_res[j].res_state =
CAM_ISP_RESOURCE_STATE_RESERVED;
*res = &csid_hw->cid_res[j];
- CDBG("%s:%d:CSID:%d CID %d allocated\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
csid_hw->hw_intf->hw_idx,
csid_hw->cid_res[j].res_id);
break;
@@ -191,8 +235,8 @@
}
if (j == CAM_IFE_CSID_CID_RES_MAX) {
- pr_err("%s:%d:CSID:%d Free cid is not available\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+ csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
}
}
@@ -213,13 +257,13 @@
csid_reg = csid_hw->csid_info->csid_reg;
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
- CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d Csid reset",
csid_hw->hw_intf->hw_idx);
init_completion(&csid_hw->csid_top_complete);
@@ -287,14 +331,12 @@
soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_rst_strobes_addr);
- CDBG("%s:%d: Waiting for reset complete from irq handler\n",
- __func__, __LINE__);
-
+ CAM_DBG(CAM_ISP, " Waiting for reset complete from irq handler");
rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+ CAM_ERR(CAM_ISP, "CSID:%d reset completion in fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
if (rc == 0)
rc = -ETIMEDOUT;
} else {
@@ -331,26 +373,26 @@
res = reset->node_res;
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
@@ -368,8 +410,8 @@
} else {
id = res->res_id;
if (!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
@@ -401,8 +443,8 @@
rc = wait_for_completion_timeout(complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, rc);
if (rc == 0)
rc = -ETIMEDOUT;
@@ -423,8 +465,9 @@
int rc = 0;
struct cam_ife_csid_cid_data *cid_data;
- CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->res_type,
cid_reserv->in_port->lane_type,
cid_reserv->in_port->lane_num,
@@ -432,8 +475,8 @@
cid_reserv->in_port->vc);
if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
- pr_err("%s:%d:CSID:%d Invalid phy sel %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid phy sel %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->res_type);
rc = -EINVAL;
goto end;
@@ -441,8 +484,8 @@
if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d Invalid lane type %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane type %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_type);
rc = -EINVAL;
goto end;
@@ -451,8 +494,8 @@
if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_DPHY &&
cid_reserv->in_port->lane_num > 4) &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_num);
rc = -EINVAL;
goto end;
@@ -460,8 +503,8 @@
if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
cid_reserv->in_port->lane_num > 3) &&
cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
- pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->lane_type,
cid_reserv->in_port->lane_num);
rc = -EINVAL;
@@ -471,8 +514,8 @@
/* CSID CSI2 v2.0 supports 31 vc */
if (cid_reserv->in_port->dt > 0x3f ||
cid_reserv->in_port->vc > 0x1f) {
- pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->vc, cid_reserv->in_port->dt);
rc = -EINVAL;
goto end;
@@ -481,8 +524,8 @@
if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
- pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, " CSID:%d Invalid tpg decode fmt %d",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->in_port->format);
rc = -EINVAL;
goto end;
@@ -538,8 +581,7 @@
csid_hw->csi2_rx_cfg.phy_sel = 0;
if (cid_reserv->in_port->format >
CAM_FORMAT_MIPI_RAW_16) {
- pr_err("%s:%d: Wrong TPG format\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_ISP, " Wrong TPG format");
rc = -EINVAL;
goto end;
}
@@ -562,8 +604,9 @@
cid_reserv->node_res = &csid_hw->cid_res[0];
csid_hw->csi2_reserve_cnt++;
- CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d CID :%d resource acquired successfully",
+ csid_hw->hw_intf->hw_idx,
cid_reserv->node_res->res_id);
} else {
rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
@@ -572,14 +615,13 @@
/* if success then increment the reserve count */
if (!rc) {
if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
- pr_err("%s:%d:CSID%d reserve cnt reached max\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP,
+ "CSID%d reserve cnt reached max",
csid_hw->hw_intf->hw_idx);
rc = -EINVAL;
} else {
csid_hw->csi2_reserve_cnt++;
- CDBG("%s:%d:CSID:%d CID:%d acquired\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
csid_hw->hw_intf->hw_idx,
cid_reserv->node_res->res_id);
}
@@ -601,8 +643,8 @@
/* CSID CSI2 v2.0 supports 31 vc */
if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+ csid_hw->hw_intf->hw_idx,
reserve->in_port->vc, reserve->in_port->dt,
reserve->sync_mode);
rc = -EINVAL;
@@ -613,8 +655,9 @@
case CAM_IFE_PIX_PATH_RES_IPP:
if (csid_hw->ipp_res.res_state !=
CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource not available %d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->ipp_res.res_state);
rc = -EINVAL;
goto end;
@@ -622,8 +665,8 @@
if (cam_ife_csid_is_ipp_format_supported(
reserve->in_port->format)) {
- pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res id:%d un support format %d",
csid_hw->hw_intf->hw_idx, reserve->res_id,
reserve->in_port->format);
rc = -EINVAL;
@@ -632,8 +675,8 @@
/* assign the IPP resource */
res = &csid_hw->ipp_res;
- CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d IPP resource:%d acquired successfully",
csid_hw->hw_intf->hw_idx, res->res_id);
break;
@@ -643,23 +686,24 @@
case CAM_IFE_PIX_PATH_RES_RDI_3:
if (csid_hw->rdi_res[reserve->res_id].res_state !=
CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI:%d resource not available %d",
+ csid_hw->hw_intf->hw_idx,
reserve->res_id,
csid_hw->rdi_res[reserve->res_id].res_state);
rc = -EINVAL;
goto end;
} else {
res = &csid_hw->rdi_res[reserve->res_id];
- CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d RDI resource:%d acquire success",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
}
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
csid_hw->hw_intf->hw_idx, reserve->res_id);
rc = -EINVAL;
goto end;
@@ -668,6 +712,18 @@
res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
path_data = (struct cam_ife_csid_path_cfg *)res->res_priv;
+ /* store the output format for RDI */
+ switch (reserve->res_id) {
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ path_data->output_fmt = reserve->out_port->format;
+ break;
+ default:
+ break;
+ }
+
path_data->cid = reserve->cid;
path_data->decode_fmt = reserve->in_port->format;
path_data->master_idx = reserve->master_idx;
@@ -711,31 +767,30 @@
/* overflow check before increment */
if (csid_hw->hw_info->open_count == UINT_MAX) {
- pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
return -EINVAL;
}
/* Increment ref Count */
csid_hw->hw_info->open_count++;
if (csid_hw->hw_info->open_count > 1) {
- CDBG("%s:%d: CSID hw has already been enabled\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
return rc;
}
- CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
csid_hw->hw_intf->hw_idx);
rc = cam_ife_csid_enable_soc_resources(soc_info);
if (rc) {
- pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
csid_hw->hw_intf->hw_idx);
goto err;
}
- CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d enable top irq interrupt",
csid_hw->hw_intf->hw_idx);
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
@@ -745,8 +800,8 @@
rc = cam_ife_csid_global_reset(csid_hw);
if (rc) {
- pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+ CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+ csid_hw->hw_intf->hw_idx, rc);
rc = -ETIMEDOUT;
goto disable_soc;
}
@@ -756,7 +811,7 @@
* SW register reset also reset the mask irq, so poll the irq status
* to check the reset complete.
*/
- CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d Reset Software registers",
csid_hw->hw_intf->hw_idx);
cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
@@ -768,8 +823,7 @@
status, (status & 0x1) == 0x1,
CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
if (rc < 0) {
- pr_err("%s:%d: software register reset timeout.....\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "software register reset timeout.....");
rc = -ETIMEDOUT;
goto disable_soc;
}
@@ -801,7 +855,7 @@
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_hw_version_addr);
- CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
csid_hw->hw_intf->hw_idx, val);
return 0;
@@ -830,7 +884,7 @@
soc_info = &csid_hw->hw_info->soc_info;
csid_reg = csid_hw->csid_info->csid_reg;
- CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
csid_hw->hw_intf->hw_idx);
/*disable the top IRQ interrupt */
@@ -839,8 +893,8 @@
rc = cam_ife_csid_disable_soc_resources(soc_info);
if (rc)
- pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+ csid_hw->hw_intf->hw_idx);
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
return rc;
@@ -856,8 +910,8 @@
csid_hw->tpg_start_cnt++;
if (csid_hw->tpg_start_cnt == 1) {
/*Enable the TPG */
- CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG",
+ csid_hw->hw_intf->hw_idx);
soc_info = &csid_hw->hw_info->soc_info;
{
@@ -865,44 +919,37 @@
uint32_t i;
uint32_t base = 0x600;
- CDBG("%s:%d: ================== TPG ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ TPG ============");
for (i = 0; i < 16; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
- CDBG("%s:%d: ================== IPP ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ IPP =============");
base = 0x200;
for (i = 0; i < 10; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
- CDBG("%s:%d: ================== RX ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "================ RX =============");
base = 0x100;
for (i = 0; i < 5; i++) {
val = cam_io_r_mb(
soc_info->reg_map[0].mem_base +
base + i * 4);
- CDBG("%s:%d reg 0x%x = 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
(base + i*4), val);
}
}
- CDBG("%s:%d: =============== TPG control ===============\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "============ TPG control ============");
val = (4 << 20);
val |= (0x80 << 8);
val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
@@ -912,8 +959,7 @@
csid_tpg_ctrl_addr);
val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
- CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
- 0x600, val);
+ CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val);
}
return 0;
@@ -934,8 +980,8 @@
/* disable the TPG */
if (!csid_hw->tpg_start_cnt) {
- CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
+ csid_hw->hw_intf->hw_idx);
/*stop the TPG */
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
@@ -956,8 +1002,8 @@
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d TPG config\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID:%d TPG config",
+ csid_hw->hw_intf->hw_idx);
/* configure one DT, infinite frames */
val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
@@ -1012,13 +1058,13 @@
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+ CAM_DBG(CAM_ISP, "CSID:%d count:%d config csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
/* overflow check before increment */
if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
- pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+ csid_hw->hw_intf->hw_idx);
return -EINVAL;
}
@@ -1033,7 +1079,7 @@
val = (csid_hw->csi2_rx_cfg.lane_num - 1) |
(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
(csid_hw->csi2_rx_cfg.lane_type << 24);
- val |= csid_hw->csi2_rx_cfg.phy_sel & 0x3;
+ val |= (csid_hw->csi2_rx_cfg.phy_sel & 0x3) << 20;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
@@ -1081,15 +1127,15 @@
struct cam_hw_soc_info *soc_info;
if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
- pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id :%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
- CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+ CAM_DBG(CAM_ISP, "CSID:%d cnt : %d Disable csi2 rx",
+ csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
if (csid_hw->csi2_cfg_cnt)
csid_hw->csi2_cfg_cnt--;
@@ -1114,22 +1160,21 @@
struct cam_ife_csid_path_cfg *path_data;
struct cam_ife_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
- uint32_t path_format = 0, plain_format = 0, val = 0;
+ uint32_t path_format = 0, val = 0;
path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
- CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
- rc = cam_ife_csid_get_format(res->res_id,
- path_data->decode_fmt, &path_format, &plain_format);
+ CAM_DBG(CAM_ISP, "Enabled IPP Path.......");
+ rc = cam_ife_csid_get_format(path_data->decode_fmt, &path_format);
if (rc)
return rc;
@@ -1150,6 +1195,10 @@
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ /* select the post irq sub sample strobe for time stamp capture */
+ cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_cfg1_addr);
+
if (path_data->crop_enable) {
val = ((path_data->width +
path_data->start_pixel) & 0xFFFF <<
@@ -1228,15 +1277,16 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
- pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d Res type %d res_id:%d in wrong state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
rc = -EINVAL;
}
@@ -1266,20 +1316,21 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
- pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res type:%d res_id:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
return -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP %d not supported on HW",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
return -EINVAL;
}
- CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "enable IPP path.......");
/*Resume at frame boundary */
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1296,8 +1347,7 @@
/* for slave mode, not need to resume for slave device */
/* Enable the required ipp interrupts */
- val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
- CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
@@ -1322,40 +1372,40 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
if (!csid_reg->ipp_reg) {
- pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_ERR(CAM_ISP, "CSID:%d IPP%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
- pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, stop_cmd);
return -EINVAL;
}
- CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1407,13 +1457,13 @@
id = res->res_id;
if (!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+ CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+ csid_hw->hw_intf->hw_idx, id);
return -EINVAL;
}
- rc = cam_ife_csid_get_format(res->res_id,
- path_data->decode_fmt, &path_format, &plain_fmt);
+ rc = cam_ife_csid_get_rdi_format(path_data->decode_fmt,
+ path_data->output_fmt, &path_format, &plain_fmt);
if (rc)
return rc;
@@ -1435,6 +1485,10 @@
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ /* select the post irq sub sample strobe for time stamp capture */
+ cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+
if (path_data->crop_enable) {
val = ((path_data->width +
path_data->start_pixel) & 0xFFFF <<
@@ -1506,8 +1560,8 @@
if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
@@ -1538,8 +1592,9 @@
if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
!csid_reg->rdi_reg[id]) {
- pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d invalid res type:%d res_id:%d state%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
return -EINVAL;
}
@@ -1550,8 +1605,7 @@
csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
/* Enable the required RDI interrupts */
- val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
- CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+ val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
@@ -1577,35 +1631,35 @@
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
!csid_reg->rdi_reg[res->res_id]) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid res_state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
- pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+ csid_hw->hw_intf->hw_idx, stop_cmd);
return -EINVAL;
}
- CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_id);
init_completion(&csid_hw->csid_rdin_complete[id]);
@@ -1646,15 +1700,15 @@
if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
return -EINVAL;
}
if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
- pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+ csid_hw->hw_intf->hw_idx,
csid_hw->hw_info->hw_state);
return -EINVAL;
}
@@ -1698,22 +1752,22 @@
soc_info = &csid_hw->hw_info->soc_info;
if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
- CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_id);
return -EINVAL;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, res->res_state);
return rc;
}
if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
- CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+ csid_hw->hw_intf->hw_idx, res->res_id,
res->res_state);
return -EINVAL;
}
@@ -1726,8 +1780,8 @@
rc = wait_for_completion_timeout(complete,
msecs_to_jiffies(IFE_CSID_TIMEOUT));
if (rc <= 0) {
- pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID%d stop at frame boundary failid:%drc:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, rc);
if (rc == 0)
/* continue even have timeout */
@@ -1766,7 +1820,7 @@
struct cam_ife_csid_reg_offset *csid_reg;
if (!hw_priv || !get_hw_cap_args) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1781,8 +1835,9 @@
hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
- CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+ csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
hw_caps->version_incr);
@@ -1799,7 +1854,7 @@
if (!hw_priv || !reset_args || (arg_size !=
sizeof(struct cam_csid_reset_cfg_args))) {
- pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID:Invalid args");
return -EINVAL;
}
@@ -1815,8 +1870,8 @@
rc = cam_ife_csid_path_reset(csid_hw, reset);
break;
default:
- pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
- __LINE__, reset->reset_type);
+ CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+ reset->reset_type);
rc = -EINVAL;
break;
}
@@ -1834,7 +1889,7 @@
if (!hw_priv || !reserve_args || (arg_size !=
sizeof(struct cam_csid_hw_reserve_resource_args))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1851,8 +1906,8 @@
rc = cam_ife_csid_path_reserve(csid_hw, reserv);
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+ csid_hw->hw_intf->hw_idx, reserv->res_type);
rc = -EINVAL;
break;
}
@@ -1871,7 +1926,7 @@
if (!hw_priv || !release_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1884,31 +1939,33 @@
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
}
if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- CDBG("%s:%d:CSID:%d res type:%d Res %d in released state\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res type:%d Res %d in released state",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id);
goto end;
}
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP,
+ "CSID:%d res type:%d Res id:%d invalid state:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
- csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
switch (res->res_type) {
case CAM_ISP_RESOURCE_CID:
@@ -1926,8 +1983,8 @@
memset(&csid_hw->csi2_rx_cfg, 0,
sizeof(struct cam_ife_csid_csi2_rx_cfg));
- CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+ csid_hw->hw_intf->hw_idx,
res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
break;
@@ -1935,8 +1992,8 @@
res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
break;
@@ -1958,7 +2015,7 @@
if (!hw_priv || !init_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -1972,8 +2029,8 @@
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
@@ -1982,14 +2039,15 @@
if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
- pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP,
+ "CSID:%d res type:%d res_id:%dInvalid state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type, res->res_id, res->res_state);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
@@ -2010,8 +2068,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
@@ -2033,7 +2091,7 @@
if (!hw_priv || !deinit_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
return -EINVAL;
}
@@ -2043,8 +2101,8 @@
mutex_lock(&csid_hw->hw_info->hw_mutex);
if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
- CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+ csid_hw->hw_intf->hw_idx,
res->res_id);
goto end;
}
@@ -2061,8 +2119,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
goto end;
}
@@ -2086,7 +2144,7 @@
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
@@ -2095,19 +2153,18 @@
res = (struct cam_isp_resource_node *)start_args;
csid_reg = csid_hw->csid_info->csid_reg;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
if ((res->res_type == CAM_ISP_RESOURCE_CID &&
res->res_id >= CAM_IFE_CSID_CID_MAX) ||
(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
- CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+ CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
res->res_id);
rc = -EINVAL;
goto end;
}
- CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
switch (res->res_type) {
@@ -2122,13 +2179,12 @@
rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type%d\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
end:
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
}
@@ -2144,14 +2200,13 @@
if (!hw_priv || !stop_args ||
(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
- pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid args");
return -EINVAL;
}
csid_stop = (struct cam_csid_hw_stop_args *) stop_args;
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
/* Stop the resource first */
for (i = 0; i < csid_stop->num_res; i++) {
res = csid_stop->node_res[i];
@@ -2170,8 +2225,8 @@
break;
default:
- pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx,
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+ csid_hw->hw_intf->hw_idx,
res->res_type);
break;
}
@@ -2183,9 +2238,10 @@
if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+ else
+ res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
}
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
}
@@ -2193,7 +2249,7 @@
static int cam_ife_csid_read(void *hw_priv,
void *read_args, uint32_t arg_size)
{
- pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: un supported");
return -EINVAL;
}
@@ -2201,7 +2257,7 @@
static int cam_ife_csid_write(void *hw_priv,
void *write_args, uint32_t arg_size)
{
- pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: un supported");
return -EINVAL;
}
@@ -2213,25 +2269,23 @@
struct cam_hw_info *csid_hw_info;
if (!hw_priv || !cmd_args) {
- pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
return -EINVAL;
}
csid_hw_info = (struct cam_hw_info *)hw_priv;
csid_hw = (struct cam_ife_csid_hw *)csid_hw_info->core_info;
- mutex_lock(&csid_hw->hw_info->hw_mutex);
switch (cmd_type) {
case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
break;
default:
- pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
- __LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+ CAM_ERR(CAM_ISP, "CSID:%d un supported cmd:%d",
+ csid_hw->hw_intf->hw_idx, cmd_type);
rc = -EINVAL;
break;
}
- mutex_unlock(&csid_hw->hw_info->hw_mutex);
return rc;
@@ -2247,11 +2301,10 @@
csid_hw = (struct cam_ife_csid_hw *)data;
- CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
- csid_hw->hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx);
if (!data) {
- pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
return IRQ_HANDLED;
}
@@ -2290,55 +2343,52 @@
cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
csid_reg->cmn_reg->csid_irq_cmd_addr);
- CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
- irq_status_rx);
- CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
- irq_status_ipp);
+ CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
+ CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
if (irq_status_top) {
- CDBG("%s:%d: CSID global reset complete......Exit\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID global reset complete......Exit");
complete(&csid_hw->csid_top_complete);
return IRQ_HANDLED;
}
if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
- CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "csi rx reset complete");
complete(&csid_hw->csid_csi2_complete);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 0 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 1 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 2 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d lane 3 over flow",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
- pr_err_ratelimited("%s:%d:CSID:%d TG OVER FLOW\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d TG OVER FLOW",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_EOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_SOT_RECEPTION",
+ csid_hw->hw_intf->hw_idx);
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
- pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
- __func__, __LINE__, csid_hw->hw_intf->hw_idx);
+ pr_err_ratelimited("CSID:%d CPHY_PH_CRC",
+ csid_hw->hw_intf->hw_idx);
}
/*read the IPP errors */
@@ -2346,25 +2396,23 @@
/* IPP reset done bit */
if (irq_status_ipp &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
- CDBG("%s%d: CSID IPP reset complete\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "CSID IPP reset complete");
complete(&csid_hw->csid_ipp_complete);
}
if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
- CDBG("%s: CSID IPP SOF received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP SOF received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
- CDBG("%s: CSID IPP SOL received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP SOL received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
- CDBG("%s: CSID IPP EOL received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP EOL received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
- CDBG("%s: CSID IPP EOF received\n", __func__);
+ CAM_DBG(CAM_ISP, "CSID IPP EOF received");
if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
complete(&csid_hw->csid_ipp_complete);
if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop IPP path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2376,17 +2424,20 @@
for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
if (irq_status_rdi[i] &
BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
- CDBG("%s:%d: CSID rdi%d reset complete\n",
- __func__, __LINE__, i);
+ CAM_DBG(CAM_ISP, "CSID rdi%d reset complete", i);
complete(&csid_hw->csid_rdin_complete[i]);
}
+ if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF)
+ CAM_DBG(CAM_ISP, "CSID RDI SOF received");
+ if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
+ CAM_DBG(CAM_ISP, "CSID RDI EOF received");
+
if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
complete(&csid_hw->csid_rdin_complete[i]);
if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
- pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
csid_hw->hw_intf->hw_idx);
/*Stop RDI path immediately */
cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2395,7 +2446,7 @@
}
}
- CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "IRQ Handling exit");
return IRQ_HANDLED;
}
@@ -2410,8 +2461,7 @@
struct cam_ife_csid_hw *ife_csid_hw = NULL;
if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
- pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
- csid_idx);
+ CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
return rc;
}
@@ -2421,7 +2471,7 @@
ife_csid_hw->hw_intf = csid_hw_intf;
ife_csid_hw->hw_info = csid_hw_info;
- CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "type %d index %d",
ife_csid_hw->hw_intf->hw_type, csid_idx);
@@ -2440,8 +2490,7 @@
rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
cam_ife_csid_irq, ife_csid_hw);
if (rc < 0) {
- pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
- csid_idx);
+ CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
goto err;
}
@@ -2533,7 +2582,7 @@
uint32_t i;
if (!ife_csid_hw) {
- pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "Invalid param");
return rc;
}
@@ -2547,8 +2596,7 @@
for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
kfree(ife_csid_hw->cid_res[i].res_priv);
+ cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
return 0;
}
-
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index d36c576..ef585c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -65,6 +65,7 @@
#define CSID_PATH_ERROR_PIX_COUNT BIT(13)
#define CSID_PATH_ERROR_LINE_COUNT BIT(14)
+/* enum cam_csid_path_halt_mode select the path halt mode control */
enum cam_csid_path_halt_mode {
CSID_HALT_MODE_INTERNAL,
CSID_HALT_MODE_GLOBAL,
@@ -72,6 +73,16 @@
CSID_HALT_MODE_SLAVE,
};
+/**
+ *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to
+ * capture the timestamp
+ */
+enum cam_csid_path_timestamp_stb_sel {
+ CSID_TIMESTAMP_STB_PRE_HALT,
+ CSID_TIMESTAMP_STB_POST_HALT,
+ CSID_TIMESTAMP_STB_POST_IRQ,
+ CSID_TIMESTAMP_STB_MAX,
+};
struct cam_ife_csid_ipp_reg_offset {
/*Image pixel path register offsets*/
@@ -343,6 +354,7 @@
* @dt : Data type number
* @cid cid number, it is same as DT_ID number in HW
* @decode_fmt: input decode format
+ * @output_fmt: output resource format, needed for RDI resource
* @crop_enable: crop is enable or disabled, if enabled
* then remaining parameters are valid.
* @start_pixel: start pixel
@@ -362,6 +374,7 @@
uint32_t dt;
uint32_t cid;
uint32_t decode_fmt;
+ uint32_t output_fmt;
bool crop_enable;
uint32_t start_pixel;
uint32_t width;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
index 003d83f..5a57046 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -16,9 +16,7 @@
#include "cam_ife_csid_core.h"
#include "cam_ife_csid_dev.h"
#include "cam_ife_csid_hw_intf.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
0, 0, 0, 0};
@@ -34,7 +32,7 @@
uint32_t csid_dev_idx;
int rc = 0;
- CDBG("%s:%d probe called\n", __func__, __LINE__);
+ CAM_DBG(CAM_ISP, "probe called");
csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
if (!csid_hw_intf) {
@@ -60,8 +58,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("%s:%d No matching table for the IFE CSID HW!\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!");
rc = -EINVAL;
goto free_dev;
}
@@ -72,6 +69,7 @@
csid_hw_info->core_info = csid_dev;
csid_hw_info->soc_info.pdev = pdev;
+ csid_hw_info->soc_info.index = csid_dev_idx;
csid_hw_data = (struct cam_ife_csid_hw_info *)match_dev->data;
/* need to setup the pdev before call the ife hw probe init */
@@ -82,7 +80,7 @@
goto free_dev;
platform_set_drvdata(pdev, csid_dev);
- CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d probe successful",
csid_hw_intf->hw_idx);
@@ -113,7 +111,7 @@
csid_hw_intf = csid_dev->hw_intf;
csid_hw_info = csid_dev->hw_info;
- CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+ CAM_DBG(CAM_ISP, "CSID:%d remove",
csid_dev->hw_intf->hw_idx);
cam_ife_csid_hw_deinit(csid_dev);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
index 4ed4da5..36c6df0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -15,7 +15,6 @@
#include "cam_ife_csid_core.h"
#include "cam_ife_csid_dev.h"
-
#define CAM_CSID_LITE_DRV_NAME "csid_lite_170"
#define CAM_CSID_LITE_VERSION_V170 0x10070000
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index f07c45e..72050aa 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -9,11 +9,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
+#include <linux/slab.h>
#include "cam_ife_csid_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -50,6 +49,14 @@
irq_handler_t csid_irq_handler, void *irq_data)
{
int rc = 0;
+ struct cam_cpas_register_params cpas_register_param;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = kzalloc(sizeof(struct cam_csid_soc_private), GFP_KERNEL);
+ if (!soc_private)
+ return -ENOMEM;
+
+ soc_info->soc_private = soc_private;
rc = cam_ife_csid_get_dt_properties(soc_info);
if (rc < 0)
@@ -59,35 +66,117 @@
rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
irq_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Error Request platform resources failed rc=%d", rc);
+ goto free_soc_private;
+ }
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier, "csid",
+ CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+ goto release_soc;
+ } else {
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ }
+
+ return rc;
+
+release_soc:
+ cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+ kfree(soc_private);
+
+ return rc;
+}
+
+int cam_ife_csid_deinit_soc_resources(
+ struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
if (rc < 0)
return rc;
- CDBG("%s: mem_base is 0x%llx\n", __func__,
- (uint64_t) soc_info->reg_map[0].mem_base);
-
return rc;
}
int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ soc_private = soc_info->soc_private;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 640000000;
+ axi_vote.uncompressed_bw = 640000000;
+
+ CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
+ axi_vote.compressed_bw, axi_vote.uncompressed_bw);
+
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
if (rc) {
- pr_err("%s: enable platform failed\n", __func__);
- return rc;
+ CAM_ERR(CAM_ISP, "Error CPAS start failed");
+ rc = -EFAULT;
+ goto end;
}
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "enable platform failed");
+ goto stop_cpas;
+ }
+
+ return rc;
+
+stop_cpas:
+ cam_cpas_stop(soc_private->cpas_handle);
+end:
return rc;
}
int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
+ struct cam_csid_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ return -EINVAL;
+ }
+ soc_private = soc_info->soc_private;
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc)
- pr_err("%s: Disable platform failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Disable platform failed");
+
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+ return rc;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
index 218e05a..1a30722 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -15,6 +15,19 @@
#include "cam_isp_hw.h"
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief: Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle: Handle returned on registering with CPAS driver.
+ * This handle is used for all further interface
+ * with CPAS.
+ */
+struct cam_csid_soc_private {
+ uint32_t cpas_handle;
+};
+
/**
* struct csid_device_soc_info - CSID SOC info object
*
@@ -38,6 +51,17 @@
int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
irq_handler_t csid_irq_handler, void *irq_data);
+
+/**
+ * cam_ife_csid_deinit_soc_resources()
+ *
+ * @brief: csid de initialization function for the soc info
+ *
+ * @soc_info: soc info structure pointer
+ *
+ */
+int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
/**
* cam_ife_csid_enable_soc_resources()
*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index ecc6f0e..52b712a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -69,6 +69,7 @@
* if type is path then res id need to be filled
* @res_id : res id to be reserved
* @in_port : input port resource info
+ * @out_port: output port resource info, used for RDI path only
* @sync_mode : Sync mode
* Sync mode could be master, slave or none
* @master_idx: master device index to be configured in the slave path
@@ -83,6 +84,7 @@
enum cam_isp_resource_type res_type;
uint32_t res_id;
struct cam_isp_in_port_info *in_port;
+ struct cam_isp_out_port_info *out_port;
enum cam_isp_hw_sync_mode sync_mode;
uint32_t master_idx;
uint32_t cid;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 15db6a6..418280a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -108,6 +108,7 @@
* (Default is Master in case of Single VFE)
* @dual_slave_core: If Master and Slave exists, HW Index of Slave
* @cdm_ops: CDM operations
+ * @ctx: Context data
*/
struct cam_vfe_hw_vfe_out_acquire_args {
struct cam_isp_resource_node *rsrc_node;
@@ -118,6 +119,7 @@
uint32_t is_master;
uint32_t dual_slave_core;
struct cam_cdm_utils_ops *cdm_ops;
+ void *ctx;
};
/*
@@ -192,22 +194,21 @@
*
* @list: list_head node for the payload
* @core_index: Index of VFE HW that generated this IRQ event
- * @core_info: Private data of handler in bottom half context
* @evt_id: IRQ event
* @irq_reg_val: IRQ and Error register values, read when IRQ was
* handled
* @error_type: Identify different errors
* @ts: Timestamp
+ * @ctx: Context data received during acquire
*/
struct cam_vfe_bus_irq_evt_payload {
- struct list_head list;
- uint32_t core_index;
- void *core_info;
- uint32_t evt_id;
- uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
- uint32_t error_type;
- struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_isp_timestamp ts;
+ struct list_head list;
+ uint32_t core_index;
+ uint32_t evt_id;
+ uint32_t irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+ uint32_t error_type;
+ struct cam_isp_timestamp ts;
+ void *ctx;
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index f6aab7f..e330c84 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -24,9 +22,7 @@
#include "cam_vfe_bus.h"
#include "cam_vfe_top.h"
#include "cam_ife_hw_mgr.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static const char drv_name[] = "vfe";
@@ -51,11 +47,6 @@
0x00000000,
};
-static uint32_t bus_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
- 0x00000200,
- 0x00000000,
-};
-
static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
struct cam_vfe_top_irq_evt_payload **evt_payload)
{
@@ -63,7 +54,7 @@
if (list_empty(&core_info->free_payload_list)) {
*evt_payload = NULL;
spin_unlock(&core_info->spin_lock);
- pr_err_ratelimited("No free payload, core info 0x%x\n",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
core_info->cpas_handle);
return -ENODEV;
}
@@ -83,11 +74,11 @@
unsigned long flags;
if (!core_info) {
- pr_err("Invalid param core_info NULL");
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
return -EINVAL;
}
if (*evt_payload == NULL) {
- pr_err("No payload to put\n");
+ CAM_ERR(CAM_ISP, "No payload to put");
return -EINVAL;
}
@@ -105,9 +96,9 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("%s: Invalid arguments\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -118,7 +109,7 @@
core_info->vfe_top->top_priv,
get_hw_cap_args, arg_size);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -130,11 +121,11 @@
handler_priv = th_payload->handler_priv;
- CDBG("Enter\n");
- CDBG("IRQ status_0 = 0x%x\n", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "Enter");
+ CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
if (th_payload->evt_status_arr[0] & (1<<31)) {
- CDBG("Calling Complete for RESET CMD\n");
+ CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
complete(handler_priv->reset_complete);
/*
@@ -148,7 +139,7 @@
rc = 0;
}
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -159,9 +150,9 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -169,7 +160,7 @@
vfe_hw->open_count++;
if (vfe_hw->open_count > 1) {
mutex_unlock(&vfe_hw->hw_mutex);
- CDBG("VFE has already been initialized cnt %d\n",
+ CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d",
vfe_hw->open_count);
return 0;
}
@@ -181,22 +172,32 @@
/* Turn ON Regulators, Clocks and other SOC resources */
rc = cam_vfe_enable_soc_resources(soc_info);
if (rc) {
- pr_err("Enable SOC failed\n");
+ CAM_ERR(CAM_ISP, "Enable SOC failed");
rc = -EFAULT;
goto decrement_open_cnt;
}
- CDBG("Enable soc done\n");
+ CAM_DBG(CAM_ISP, "Enable soc done");
+
+ rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
+ goto disable_soc;
+ }
/* Do HW Reset */
rc = cam_vfe_reset(hw_priv, NULL, 0);
if (rc) {
- pr_err("Reset Failed\n");
- goto disable_soc;
+ CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
+ goto deinit_bus;
}
return 0;
+deinit_bus:
+ core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+ NULL, 0);
disable_soc:
cam_vfe_disable_soc_resources(soc_info);
decrement_open_cnt:
@@ -212,22 +213,22 @@
struct cam_hw_soc_info *soc_info = NULL;
int rc = 0;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("%s: Invalid arguments\n", __func__);
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
mutex_lock(&vfe_hw->hw_mutex);
if (!vfe_hw->open_count) {
mutex_unlock(&vfe_hw->hw_mutex);
- pr_err("Error! Unbalanced deinit\n");
+ CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
return -EFAULT;
}
vfe_hw->open_count--;
if (vfe_hw->open_count) {
mutex_unlock(&vfe_hw->hw_mutex);
- CDBG("open_cnt non-zero =%d\n", vfe_hw->open_count);
+ CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count);
return 0;
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -235,14 +236,14 @@
soc_info = &vfe_hw->soc_info;
/* Turn OFF Regulators, Clocks and other SOC resources */
- CDBG("Disable SOC resource\n");
+ CAM_DBG(CAM_ISP, "Disable SOC resource");
rc = cam_vfe_disable_soc_resources(soc_info);
if (rc)
- pr_err("Disable SOC failed\n");
+ CAM_ERR(CAM_ISP, "Disable SOC failed");
vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -253,10 +254,10 @@
struct cam_vfe_hw_core_info *core_info = NULL;
int rc;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -274,28 +275,28 @@
top_reset_irq_reg_mask, &core_info->irq_payload,
cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
if (core_info->irq_handle < 0) {
- pr_err("subscribe irq controller failed\n");
+ CAM_ERR(CAM_ISP, "subscribe irq controller failed");
return -EFAULT;
}
reinit_completion(&vfe_hw->hw_complete);
- CDBG("calling RESET\n");
+ CAM_DBG(CAM_ISP, "calling RESET");
core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
- CDBG("waiting for vfe reset complete\n");
+ CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
/* Wait for Completion or Timeout of 500ms */
rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
if (!rc)
- pr_err("Error! Reset Timeout\n");
+ CAM_ERR(CAM_ISP, "Error! Reset Timeout");
- CDBG("reset complete done (%d)\n", rc);
+ CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
rc = cam_irq_controller_unsubscribe_irq(
core_info->vfe_irq_controller, core_info->irq_handle);
if (rc)
- pr_err("Error! Unsubscribe failed\n");
+ CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -319,12 +320,13 @@
handler_priv = th_payload->handler_priv;
- CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
- CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
- pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
return rc;
}
@@ -341,14 +343,14 @@
evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
irq_reg_offset[i]);
}
- CDBG("Violation status = %x\n", evt_payload->irq_reg_val[2]);
+ CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
/*
* need to handle overflow condition here, otherwise irq storm
* will block everything.
*/
if (evt_payload->irq_reg_val[1]) {
- pr_err("Mask all the interrupts\n");
+ CAM_ERR(CAM_ISP, "Mask all the interrupts");
cam_io_w(0, handler_priv->mem_base + 0x60);
cam_io_w(0, handler_priv->mem_base + 0x5C);
@@ -357,7 +359,7 @@
th_payload->evt_payload_priv = evt_payload;
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -371,7 +373,7 @@
if (!hw_priv || !reserve_args || (arg_size !=
sizeof(struct cam_vfe_acquire_args))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
@@ -382,12 +384,13 @@
rc = core_info->vfe_top->hw_ops.reserve(
core_info->vfe_top->top_priv,
acquire,
- sizeof(acquire));
+ sizeof(*acquire));
else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT)
- rc = core_info->vfe_bus->acquire_resource(
- core_info->vfe_bus->bus_priv, acquire);
+ rc = core_info->vfe_bus->hw_ops.reserve(
+ core_info->vfe_bus->bus_priv, acquire,
+ sizeof(*acquire));
else
- pr_err("Invalid res type:%d\n", acquire->rsrc_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
mutex_unlock(&vfe_hw->hw_mutex);
@@ -404,7 +407,7 @@
if (!hw_priv || !release_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -415,12 +418,13 @@
if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN)
rc = core_info->vfe_top->hw_ops.release(
core_info->vfe_top->top_priv, isp_res,
- sizeof(struct cam_isp_resource_node));
+ sizeof(*isp_res));
else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT)
- rc = core_info->vfe_bus->release_resource(
- core_info->vfe_bus->bus_priv, isp_res);
+ rc = core_info->vfe_bus->hw_ops.release(
+ core_info->vfe_bus->bus_priv, isp_res,
+ sizeof(*isp_res));
else
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
mutex_unlock(&vfe_hw->hw_mutex);
@@ -437,7 +441,7 @@
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -466,20 +470,12 @@
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
else
- pr_err("Error! subscribe irq controller failed\n");
+ CAM_ERR(CAM_ISP,
+ "Error! subscribe irq controller failed");
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_2,
- bus_irq_reg_mask, &core_info->irq_payload,
- core_info->vfe_bus->top_half_handler,
- cam_ife_mgr_do_tasklet_buf_done,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
- if (isp_res->irq_handle > 0)
- rc = core_info->vfe_bus->start_resource(isp_res);
- else
- pr_err("Error! subscribe irq controller failed\n");
+ rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
} else {
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -496,7 +492,7 @@
if (!hw_priv || !stop_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
- pr_err("Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid input arguments");
return -EINVAL;
}
@@ -513,9 +509,9 @@
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
cam_irq_controller_unsubscribe_irq(
core_info->vfe_irq_controller, isp_res->irq_handle);
- rc = core_info->vfe_bus->stop_resource(isp_res);
+ rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
} else {
- pr_err("Invalid res type:%d\n", isp_res->res_type);
+ CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -543,7 +539,7 @@
int rc = 0;
if (!hw_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -560,13 +556,13 @@
break;
case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
- rc = core_info->vfe_bus->process_cmd(
+ rc = core_info->vfe_bus->hw_ops.process_cmd(
core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
arg_size);
break;
default:
- pr_err("Invalid cmd type:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type);
rc = -EINVAL;
break;
}
@@ -596,13 +592,13 @@
int rc = -EINVAL;
int i;
- CDBG("Enter");
+ CAM_DBG(CAM_ISP, "Enter");
rc = cam_irq_controller_init(drv_name,
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
if (rc) {
- pr_err("Error! cam_irq_controller_init failed\n");
+ CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
return rc;
}
@@ -610,16 +606,16 @@
soc_info, hw_intf, vfe_hw_info->top_hw_info,
&core_info->vfe_top);
if (rc) {
- pr_err("Error! cam_vfe_top_init failed\n");
- return rc;
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
+ goto deinit_controller;
}
- rc = cam_vfe_bus_init(vfe_hw_info->bus_version,
- soc_info->reg_map[0].mem_base, hw_intf,
- vfe_hw_info->bus_hw_info, NULL, &core_info->vfe_bus);
+ rc = cam_vfe_bus_init(vfe_hw_info->bus_version, soc_info, hw_intf,
+ vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
+ &core_info->vfe_bus);
if (rc) {
- pr_err("Error! cam_vfe_bus_init failed\n");
- return rc;
+ CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
+ goto deinit_top;
}
INIT_LIST_HEAD(&core_info->free_payload_list);
@@ -632,4 +628,47 @@
spin_lock_init(&core_info->spin_lock);
return rc;
+
+deinit_top:
+ cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+
+deinit_controller:
+ cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+
+ return rc;
}
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info)
+{
+ int rc = -EINVAL;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&core_info->spin_lock, flags);
+
+ INIT_LIST_HEAD(&core_info->free_payload_list);
+ for (i = 0; i < CAM_VFE_EVT_MAX; i++)
+ INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+
+ rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
+ &core_info->vfe_bus);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc);
+
+ rc = cam_vfe_top_deinit(vfe_hw_info->top_version,
+ &core_info->vfe_top);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc);
+
+ rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error cam_irq_controller_deinit failed rc=%d", rc);
+
+ spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index 94b4cf0..ee29e1cf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -89,4 +89,7 @@
struct cam_hw_intf *hw_intf,
struct cam_vfe_hw_info *vfe_hw_info);
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+ struct cam_vfe_hw_info *vfe_hw_info);
+
#endif /* _CAM_VFE_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
index 40279ae..3e2307c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
@@ -18,9 +17,7 @@
#include "cam_vfe_dev.h"
#include "cam_vfe_core.h"
#include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
@@ -62,14 +59,15 @@
vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
- CDBG("type %d index %d\n", vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
platform_set_drvdata(pdev, vfe_hw_intf);
vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
GFP_KERNEL);
if (!vfe_hw->core_info) {
- CDBG("Failed to alloc for core\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for core");
rc = -ENOMEM;
goto free_vfe_hw;
}
@@ -78,7 +76,7 @@
match_dev = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
if (!match_dev) {
- pr_err("Of_match Failed\n");
+ CAM_ERR(CAM_ISP, "Of_match Failed");
rc = -EINVAL;
goto free_core_info;
}
@@ -88,14 +86,14 @@
rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
vfe_hw);
if (rc < 0) {
- pr_err("Failed to init soc\n");
+ CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
goto free_core_info;
}
rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
vfe_hw_intf, hw_info);
if (rc < 0) {
- pr_err("Failed to init core\n");
+ CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
goto deinit_soc;
}
@@ -110,11 +108,13 @@
cam_vfe_init_hw(vfe_hw, NULL, 0);
cam_vfe_deinit_hw(vfe_hw, NULL, 0);
- CDBG("VFE%d probe successful\n", vfe_hw_intf->hw_idx);
+ CAM_DBG(CAM_ISP, "VFE%d probe successful", vfe_hw_intf->hw_idx);
return rc;
deinit_soc:
+ if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info))
+ CAM_ERR(CAM_ISP, "Failed to deinit soc");
free_core_info:
kfree(vfe_hw->core_info);
free_vfe_hw:
@@ -125,6 +125,61 @@
return rc;
}
+int cam_vfe_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *vfe_hw = NULL;
+ struct cam_hw_intf *vfe_hw_intf = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ int rc = 0;
+
+ vfe_hw_intf = platform_get_drvdata(pdev);
+ if (!vfe_hw_intf) {
+ CAM_ERR(CAM_ISP, "Error! No data in pdev");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_ISP, "type %d index %d",
+ vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+ if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+ cam_vfe_hw_list[vfe_hw_intf->hw_idx] = NULL;
+
+ vfe_hw = vfe_hw_intf->hw_priv;
+ if (!vfe_hw) {
+ CAM_ERR(CAM_ISP, "Error! HW data is NULL");
+ rc = -ENODEV;
+ goto free_vfe_hw_intf;
+ }
+
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Error! core data NULL");
+ rc = -EINVAL;
+ goto deinit_soc;
+ }
+
+ rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
+
+ kfree(vfe_hw->core_info);
+
+deinit_soc:
+ rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&vfe_hw->hw_mutex);
+ kfree(vfe_hw);
+
+ CAM_DBG(CAM_ISP, "VFE%d remove successful", vfe_hw_intf->hw_idx);
+
+free_vfe_hw_intf:
+ kfree(vfe_hw_intf);
+
+ return rc;
+}
+
int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx)
{
int rc = 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
index ca54d81..9e73528 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
@@ -27,4 +27,16 @@
*/
int cam_vfe_probe(struct platform_device *pdev);
+/*
+ * cam_vfe_remove()
+ *
+ * @brief: Driver remove function
+ *
+ * @pdev: Platform Device pointer
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_remove(struct platform_device *pdev);
+
#endif /* _CAM_VFE_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 3670ca9..3b2ead2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -10,14 +10,10 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_cpas_api.h"
#include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
@@ -25,7 +21,7 @@
rc = cam_soc_util_get_dt_properties(soc_info);
if (rc) {
- pr_err("Error! get DT properties failed\n");
+ CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
return rc;
}
@@ -40,6 +36,21 @@
rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
irq_data);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resource failed rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resource failed rc=%d", rc);
return rc;
}
@@ -54,21 +65,22 @@
soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
GFP_KERNEL);
if (!soc_private) {
- CDBG("Error! soc_private Alloc Failed\n");
+ CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
return -ENOMEM;
}
soc_info->soc_private = soc_private;
rc = cam_vfe_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("Error! Get DT properties failed\n");
+ CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc);
goto free_soc_private;
}
rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
irq_data);
if (rc < 0) {
- pr_err("Error! Request platform resources failed\n");
+ CAM_ERR(CAM_ISP,
+ "Error! Request platform resources failed rc=%d", rc);
goto free_soc_private;
}
@@ -79,7 +91,7 @@
cpas_register_param.dev = &soc_info->pdev->dev;
rc = cam_cpas_register_client(&cpas_register_param);
if (rc) {
- pr_err("CPAS registration failed\n");
+ CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
goto release_soc;
} else {
soc_private->cpas_handle = cpas_register_param.client_handle;
@@ -95,6 +107,36 @@
return rc;
}
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error! soc_info NULL");
+ return -ENODEV;
+ }
+
+ soc_private = soc_info->soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error! soc_private NULL");
+ return -ENODEV;
+ }
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+ rc = cam_vfe_release_platform_resource(soc_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Release platform resources failed rc=%d", rc);
+
+ kfree(soc_private);
+
+ return rc;
+}
+
int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
@@ -103,7 +145,7 @@
struct cam_axi_vote axi_vote;
if (!soc_info) {
- pr_err("Error! Invalid params\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
rc = -EINVAL;
goto end;
}
@@ -112,19 +154,20 @@
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
- axi_vote.compressed_bw = 640000000;
- axi_vote.uncompressed_bw = 640000000;
+ axi_vote.compressed_bw = 10640000000L;
+ axi_vote.uncompressed_bw = 10640000000L;
rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
if (rc) {
- pr_err("Error! CPAS start failed.\n");
+ CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc);
rc = -EFAULT;
goto end;
}
- rc = cam_soc_util_enable_platform_resource(soc_info, true, true);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc) {
- pr_err("Error! enable platform failed\n");
+ CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
goto stop_cpas;
}
@@ -143,7 +186,7 @@
struct cam_vfe_soc_private *soc_private;
if (!soc_info) {
- pr_err("Error! Invalid params\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid params");
rc = -EINVAL;
return rc;
}
@@ -151,13 +194,13 @@
rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
if (rc) {
- pr_err("%s: disable platform failed\n", __func__);
+ CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
return rc;
}
rc = cam_cpas_stop(soc_private->cpas_handle);
if (rc) {
- pr_err("Error! CPAS stop failed.\n");
+ CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
index 27fb192..094c977 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -45,6 +45,18 @@
irq_handler_t vfe_irq_handler, void *irq_data);
/*
+ * cam_vfe_deinit_soc_resources()
+ *
+ * @Brief: Deinitialize SOC resources including private data
+ *
+ * @soc_info: Device soc information
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
* cam_vfe_enable_soc_resources()
*
* @brief: Enable regulator, irq resources, start CPAS
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
index 2245ab1..0af32ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
@@ -16,9 +16,6 @@
#include "cam_vfe_core.h"
#include "cam_vfe_dev.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
static const struct of_device_id cam_vfe170_dt_match[] = {
{
.compatible = "qcom,vfe170",
@@ -30,6 +27,7 @@
static struct platform_driver cam_vfe170_driver = {
.probe = cam_vfe_probe,
+ .remove = cam_vfe_remove,
.driver = {
.name = "cam_vfe170",
.owner = THIS_MODULE,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index b550071..275c7b5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -117,6 +117,28 @@
.reg_update_cmd = 0x000004AC,
};
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+ .reg_update_cmd = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_0_data = {
+ .reg_update_cmd_data = 0x2,
+ .sof_irq_mask = 0x8000000,
+ .reg_update_irq_mask = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_1_data = {
+ .reg_update_cmd_data = 0x4,
+ .sof_irq_mask = 0x10000000,
+ .reg_update_irq_mask = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data vfe_170_rdi_2_data = {
+ .reg_update_cmd_data = 0x8,
+ .sof_irq_mask = 0x20000000,
+ .reg_update_irq_mask = 0x80,
+};
+
static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
.common_reg = &vfe170_top_common_reg,
.camif_hw_info = {
@@ -124,6 +146,16 @@
.camif_reg = &vfe170_camif_reg,
.reg_data = &vfe_170_camif_reg_data,
},
+ .rdi_hw_info = {
+ .common_reg = &vfe170_top_common_reg,
+ .rdi_reg = &vfe170_rdi_reg,
+ .reg_data = {
+ &vfe_170_rdi_0_data,
+ &vfe_170_rdi_1_data,
+ &vfe_170_rdi_2_data,
+ NULL,
+ },
+ },
.mux_type = {
CAM_VFE_CAMIF_VER_2_0,
CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
index cea1137..4a328ee 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -1,7 +1,10 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
index 50952f8..c6c3272 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -10,30 +10,47 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include "cam_vfe_bus.h"
#include "cam_vfe_bus_ver1.h"
#include "cam_vfe_bus_ver2.h"
+#include "cam_debug_util.h"
int cam_vfe_bus_init(uint32_t bus_version,
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
- struct cam_vfe_bus **vfe_bus)
+ struct cam_vfe_bus **vfe_bus)
{
int rc = -ENODEV;
switch (bus_version) {
case CAM_VFE_BUS_VER_2_0:
- rc = cam_vfe_bus_ver2_init(mem_base, hw_intf, bus_hw_info,
+ rc = cam_vfe_bus_ver2_init(soc_info, hw_intf, bus_hw_info,
vfe_irq_controller, vfe_bus);
break;
default:
- pr_err("Unsupported Bus Version %x\n", bus_version);
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
break;
}
return rc;
}
+
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus)
+{
+ int rc = -ENODEV;
+
+ switch (bus_version) {
+ case CAM_VFE_BUS_VER_2_0:
+ rc = cam_vfe_bus_ver2_deinit(vfe_bus);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+ break;
+ }
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 92a17d8..5d7a1b9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -10,43 +10,42 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include "cam_io_util.h"
#include "cam_cdm_util.h"
#include "cam_hw_intf.h"
+#include "cam_ife_hw_mgr.h"
#include "cam_vfe_hw_intf.h"
#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
#include "cam_vfe_bus.h"
#include "cam_vfe_bus_ver2.h"
#include "cam_vfe_core.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static const char drv_name[] = "vfe_bus";
-#define FRAME_BASED_EN 0
+#define CAM_VFE_BUS_IRQ_REG0 0
+#define CAM_VFE_BUS_IRQ_REG1 1
+#define CAM_VFE_BUS_IRQ_REG2 2
+#define CAM_VFE_BUS_IRQ_MAX 3
-#define MAX_BUF_UPDATE_REG_NUM 20
+#define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256
+
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH 0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE 0xFF01
+
+#define MAX_BUF_UPDATE_REG_NUM \
+ (sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
#define MAX_REG_VAL_PAIR_SIZE \
- (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+ (MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val) \
- do { \
- buf_array[index++] = offset; \
- buf_array[index++] = val; \
- } while (0)
-
-static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
- 0x0000205C,
- 0x00002060,
- 0x00002064,
- 0x0000206C,
- 0x00002070,
- 0x00002074,
- 0x00002078,
-};
+ do { \
+ buf_array[index++] = offset; \
+ buf_array[index++] = val; \
+ } while (0)
enum cam_vfe_bus_packer_format {
PACKER_FMT_PLAIN_128 = 0x0,
@@ -69,23 +68,29 @@
};
struct cam_vfe_bus_ver2_common_data {
+ uint32_t core_index;
void __iomem *mem_base;
struct cam_hw_intf *hw_intf;
void *bus_irq_controller;
void *vfe_irq_controller;
struct cam_vfe_bus_ver2_reg_offset_common *common_reg;
uint32_t io_buf_update[
- MAX_REG_VAL_PAIR_SIZE];
+ MAX_REG_VAL_PAIR_SIZE];
+
+ struct cam_vfe_bus_irq_evt_payload evt_payload[
+ CAM_VFE_BUS_VER2_PAYLOAD_MAX];
+ struct list_head free_payload_list;
};
struct cam_vfe_bus_ver2_wm_resource_data {
uint32_t index;
struct cam_vfe_bus_ver2_common_data *common_data;
struct cam_vfe_bus_ver2_reg_offset_bus_client *hw_regs;
+ void *ctx;
uint32_t irq_enabled;
-
uint32_t init_cfg_done;
+
uint32_t offset;
uint32_t width;
uint32_t height;
@@ -126,6 +131,8 @@
uint32_t dual_slave_core;
uint32_t intra_client_mask;
uint32_t composite_mask;
+
+ void *ctx;
};
struct cam_vfe_bus_ver2_vfe_out_data {
@@ -146,7 +153,6 @@
struct cam_cdm_utils_ops *cdm_util_ops;
};
-
struct cam_vfe_bus_ver2_priv {
struct cam_vfe_bus_ver2_common_data common_data;
@@ -158,12 +164,59 @@
struct list_head free_dual_comp_grp;
struct list_head used_comp_grp;
- struct cam_vfe_bus_irq_evt_payload evt_payload[128];
- struct list_head free_payload_list;
+ uint32_t irq_handle;
};
+static int cam_vfe_bus_get_evt_payload(
+ struct cam_vfe_bus_ver2_common_data *common_data,
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ if (list_empty(&common_data->free_payload_list)) {
+ *evt_payload = NULL;
+ CAM_ERR(CAM_ISP, "No free payload");
+ return -ENODEV;
+ }
+
+ *evt_payload = list_first_entry(&common_data->free_payload_list,
+ struct cam_vfe_bus_irq_evt_payload, list);
+ list_del_init(&(*evt_payload)->list);
+ return 0;
+}
+
static int cam_vfe_bus_put_evt_payload(void *core_info,
- struct cam_vfe_bus_irq_evt_payload **evt_payload);
+ struct cam_vfe_bus_irq_evt_payload **evt_payload)
+{
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
+ uint32_t *ife_irq_regs = NULL;
+ uint32_t status_reg0, status_reg1, status_reg2;
+
+ if (!core_info) {
+ CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+ return -EINVAL;
+ }
+ if (*evt_payload == NULL) {
+ CAM_ERR(CAM_ISP, "No payload to put");
+ return -EINVAL;
+ }
+
+ ife_irq_regs = (*evt_payload)->irq_reg_val;
+ status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+ status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+ status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+ if (status_reg0 || status_reg1 || status_reg2) {
+ CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+ status_reg0, status_reg1, status_reg2);
+ return 0;
+ }
+
+ common_data = core_info;
+ list_add_tail(&(*evt_payload)->list,
+ &common_data->free_payload_list);
+ *evt_payload = NULL;
+
+ return 0;
+}
static int cam_vfe_bus_ver2_get_intra_client_mask(
enum cam_vfe_bus_ver2_vfe_core_id dual_slave_core,
@@ -175,7 +228,8 @@
*intra_client_mask = 0;
if (dual_slave_core == current_core) {
- pr_err("Invalid params. Same core as Master and Slave\n");
+ CAM_ERR(CAM_ISP,
+ "Invalid params. Same core as Master and Slave");
return -EINVAL;
}
@@ -189,7 +243,7 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
@@ -204,7 +258,7 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
@@ -219,14 +273,15 @@
*intra_client_mask = 0x2;
break;
default:
- pr_err("Invalid value for slave core %u\n",
+ CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
dual_slave_core);
rc = -EINVAL;
break;
}
break;
default:
- pr_err("Invalid value for master core %u\n", current_core);
+ CAM_ERR(CAM_ISP,
+ "Invalid value for master core %u", current_core);
rc = -EINVAL;
break;
}
@@ -294,6 +349,19 @@
case CAM_FORMAT_MIPI_RAW_14:
case CAM_FORMAT_MIPI_RAW_16:
case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_DPCM_10_6_10:
+ case CAM_FORMAT_DPCM_10_8_10:
+ case CAM_FORMAT_DPCM_12_6_12:
+ case CAM_FORMAT_DPCM_12_8_12:
+ case CAM_FORMAT_DPCM_14_8_14:
+ case CAM_FORMAT_DPCM_14_10_14:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
case CAM_FORMAT_PLAIN128:
return 1;
default:
@@ -388,7 +456,8 @@
break;
}
- pr_err("Unsupported format %u for resource_type %u", format, res_type);
+ CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+ format, res_type);
return -EINVAL;
}
@@ -581,7 +650,32 @@
switch (out_fmt) {
case CAM_FORMAT_NV21:
case CAM_FORMAT_NV12:
- return PACKER_FMT_PLAIN_8;
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ return PACKER_FMT_PLAIN_8_LSB_MSB_10;
+ case CAM_FORMAT_PLAIN64:
+ return PACKER_FMT_PLAIN_64;
+ case CAM_FORMAT_MIPI_RAW_6:
+ case CAM_FORMAT_MIPI_RAW_8:
+ case CAM_FORMAT_MIPI_RAW_10:
+ case CAM_FORMAT_MIPI_RAW_12:
+ case CAM_FORMAT_MIPI_RAW_14:
+ case CAM_FORMAT_MIPI_RAW_16:
+ case CAM_FORMAT_MIPI_RAW_20:
+ case CAM_FORMAT_PLAIN128:
+ case CAM_FORMAT_PLAIN8:
+ case CAM_FORMAT_PLAIN16_8:
+ case CAM_FORMAT_PLAIN16_10:
+ case CAM_FORMAT_PLAIN16_12:
+ case CAM_FORMAT_PLAIN16_14:
+ case CAM_FORMAT_PLAIN16_16:
+ case CAM_FORMAT_PLAIN32_20:
+ case CAM_FORMAT_PD8:
+ case CAM_FORMAT_PD10:
+ return PACKER_FMT_PLAIN_128;
+ case CAM_FORMAT_UBWC_TP10:
+ case CAM_FORMAT_TP10:
+ return PACKER_FMT_TP_10;
default:
return PACKER_FMT_MAX;
}
@@ -590,6 +684,8 @@
static int cam_vfe_bus_acquire_wm(
struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
enum cam_vfe_bus_plane_type plane,
enum cam_isp_hw_split_id split_id,
@@ -607,16 +703,18 @@
/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
if (wm_idx < 0 || wm_idx >= CAM_VFE_BUS_VER2_MAX_CLIENTS) {
- pr_err("Unsupported VFE out %d plane %d\n",
+ CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
vfe_out_res_id, plane);
return -EINVAL;
}
wm_res_local = &ver2_bus_priv->bus_client[wm_idx];
+ wm_res_local->tasklet_info = tasklet;
wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
rsrc_data = wm_res_local->res_priv;
rsrc_data->irq_enabled = subscribe_irq;
+ rsrc_data->ctx = ctx;
rsrc_data->format = out_port_info->format;
rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format);
@@ -624,52 +722,67 @@
rsrc_data->height = out_port_info->height;
if (rsrc_data->index < 3) {
- rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
- rsrc_data->height = 1;
+ /* Write master 0-2 refers to RDI 0/ RDI 1/RDI 2 */
+ rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+ rsrc_data->height = 0;
+ rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
rsrc_data->pack_fmt = 0x0;
rsrc_data->en_cfg = 0x3;
- } else if (rsrc_data->index < 5) {
- switch (plane) {
- case PLANE_Y:
- switch (rsrc_data->format) {
- case CAM_FORMAT_UBWC_NV12:
- case CAM_FORMAT_UBWC_NV12_4R:
- case CAM_FORMAT_UBWC_TP10:
- rsrc_data->en_ubwc = 1;
+ } else if (rsrc_data->index < 5 ||
+ rsrc_data->index == 7 || rsrc_data->index == 8) {
+ /* Write master 3, 4 - for Full OUT , 7-8 FD OUT */
+ switch (rsrc_data->format) {
+ case CAM_FORMAT_UBWC_NV12:
+ case CAM_FORMAT_UBWC_NV12_4R:
+ rsrc_data->en_ubwc = 1;
+ /* Fall through for NV12 */
+ case CAM_FORMAT_NV21:
+ case CAM_FORMAT_NV12:
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
break;
default:
- break;
+ CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+ return -EINVAL;
}
break;
- case PLANE_C:
- switch (rsrc_data->format) {
- case CAM_FORMAT_NV21:
- case CAM_FORMAT_NV12:
+ case CAM_FORMAT_UBWC_TP10:
+ rsrc_data->en_ubwc = 1;
+ /* Fall through for LINEAR TP10 */
+ case CAM_FORMAT_TP10:
+ rsrc_data->width = rsrc_data->width * 4 / 3;
+ switch (plane) {
+ case PLANE_C:
rsrc_data->height /= 2;
break;
- case CAM_FORMAT_UBWC_NV12:
- case CAM_FORMAT_UBWC_NV12_4R:
- case CAM_FORMAT_UBWC_TP10:
- rsrc_data->height /= 2;
- rsrc_data->en_ubwc = 1;
+ case PLANE_Y:
break;
default:
- break;
+ CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+ return -EINVAL;
}
break;
default:
- pr_err("Invalid plane type %d\n", plane);
+ CAM_ERR(CAM_ISP, "Invalid format %d\n",
+ rsrc_data->format);
return -EINVAL;
}
- rsrc_data->pack_fmt = 0xE;
rsrc_data->en_cfg = 0x1;
+ } else if (rsrc_data->index >= 11) {
+ /* Write master 11-19 stats */
+ rsrc_data->width = 0;
+ rsrc_data->height = 0;
+ rsrc_data->stride = 1;
+ rsrc_data->en_cfg = 0x3;
} else {
+ /* Write master 5-6 DS ports , 9 - Raw dump , 10 PDAF */
rsrc_data->width = rsrc_data->width * 4;
rsrc_data->height = rsrc_data->height / 2;
- rsrc_data->pack_fmt = 0x0;
rsrc_data->en_cfg = 0x1;
}
-
if (vfe_out_res_id >= CAM_ISP_IFE_OUT_RES_RDI_0 &&
vfe_out_res_id <= CAM_ISP_IFE_OUT_RES_RDI_3)
rsrc_data->frame_based = 1;
@@ -709,6 +822,8 @@
rsrc_data->ubwc_meta_offset = 0;
rsrc_data->init_cfg_done = 0;
rsrc_data->en_cfg = 0;
+
+ wm_res->tasklet_info = NULL;
wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
return 0;
@@ -721,10 +836,10 @@
wm_res->res_priv;
struct cam_vfe_bus_ver2_common_data *common_data =
rsrc_data->common_data;
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
- cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
cam_io_w_mb(rsrc_data->width,
@@ -734,24 +849,48 @@
cam_io_w(rsrc_data->pack_fmt,
common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
- cam_io_w(0xFFFFFFFF, common_data->mem_base +
- rsrc_data->hw_regs->irq_subsample_pattern);
- cam_io_w(0x0, common_data->mem_base +
- rsrc_data->hw_regs->irq_subsample_period);
+ /* Configure stride for RDIs */
+ if (rsrc_data->index < 3)
+ cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+ rsrc_data->hw_regs->stride));
- cam_io_w(0xFFFFFFFF,
- common_data->mem_base + rsrc_data->hw_regs->framedrop_pattern);
- cam_io_w(0x0,
- common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
+ /* Subscribe IRQ */
+ if (rsrc_data->irq_enabled) {
+ CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
+ (1 << rsrc_data->index);
+ wm_res->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, wm_res,
+ wm_res->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ wm_res->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (wm_res->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
+ rsrc_data->index);
+ return -EFAULT;
+ }
+ }
- CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
+ /* enable ubwc if needed*/
+ if (rsrc_data->en_ubwc) {
+ cam_io_w_mb(0x1, common_data->mem_base +
+ rsrc_data->hw_regs->ubwc_regs->mode_cfg);
+ }
+
+ /* Enable WM */
+ cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+ rsrc_data->hw_regs->cfg);
+
+ CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index,
rsrc_data->width, rsrc_data->height);
- CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
+ CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index,
rsrc_data->pack_fmt & PACKER_FMT_MAX);
- CDBG("WM res %d stride = %d, burst len = %d\n",
+ CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d",
rsrc_data->index, rsrc_data->stride, 0xf);
- CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
- (uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
+ CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x",
+ rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+ rsrc_data->en_cfg);
wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
@@ -770,14 +909,12 @@
cam_io_w_mb(0x0,
common_data->mem_base + rsrc_data->hw_regs->cfg);
- CDBG("irq_enabled %d", rsrc_data->irq_enabled);
+ CAM_DBG(CAM_ISP, "irq_enabled %d", rsrc_data->irq_enabled);
/* Unsubscribe IRQ */
- if (rsrc_data->irq_enabled) {
- /*
- * Currently all WM IRQ are unsubscribed in one place. Need to
- * make it dynamic.
- */
- }
+ if (rsrc_data->irq_enabled)
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ wm_res->irq_handle);
/* Halt & Reset WM */
cam_io_w_mb(BIT(rsrc_data->index),
@@ -791,7 +928,43 @@
static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- return -EPERM;
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *wm_res = NULL;
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ wm_res = th_payload->handler_priv;
+ if (!wm_res) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+ return -ENODEV;
+ }
+
+ rsrc_data = wm_res->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
}
static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
@@ -816,9 +989,10 @@
~BIT(rsrc_data->index);
rc = CAM_VFE_IRQ_STATUS_SUCCESS;
}
+ CAM_DBG(CAM_ISP, "status_reg %x rc %d", status_reg, rc);
if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
&evt_payload);
return rc;
@@ -829,15 +1003,13 @@
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
struct cam_isp_resource_node *wm_res)
{
- int rc = 0;
struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Failed to alloc for wm res priv\n");
- rc = -ENOMEM;
- return rc;
+ CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+ return -ENOMEM;
}
wm_res->res_priv = rsrc_data;
@@ -854,7 +1026,32 @@
wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
- return rc;
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_wm_resource(
+ struct cam_isp_resource_node *wm_res)
+{
+ struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+ wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&wm_res->list);
+
+ wm_res->start = NULL;
+ wm_res->stop = NULL;
+ wm_res->top_half_handler = NULL;
+ wm_res->bottom_half_handler = NULL;
+ wm_res->hw_intf = NULL;
+
+ rsrc_data = wm_res->res_priv;
+ wm_res->res_priv = NULL;
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! WM res priv is NULL");
+ return -ENOMEM;
+ }
+ kfree(rsrc_data);
+
+ return 0;
}
static void cam_vfe_bus_add_wm_to_comp_grp(
@@ -892,6 +1089,8 @@
static int cam_vfe_bus_acquire_comp_grp(
struct cam_vfe_bus_ver2_priv *ver2_bus_priv,
struct cam_isp_out_port_info *out_port_info,
+ void *tasklet,
+ void *ctx,
uint32_t unique_id,
uint32_t is_dual,
uint32_t is_master,
@@ -910,7 +1109,7 @@
/* First find a free group */
if (is_dual) {
if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
- pr_err("No Free Composite Group\n");
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
return -ENODEV;
}
comp_grp_local = list_first_entry(
@@ -923,7 +1122,7 @@
&rsrc_data->intra_client_mask);
} else {
if (list_empty(&ver2_bus_priv->free_comp_grp)) {
- pr_err("No Free Composite Group\n");
+ CAM_ERR(CAM_ISP, "No Free Composite Group");
return -ENODEV;
}
comp_grp_local = list_first_entry(
@@ -933,6 +1132,7 @@
}
list_del(&comp_grp_local->list);
+ comp_grp_local->tasklet_info = tasklet;
comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
rsrc_data->is_master = is_master;
@@ -948,13 +1148,14 @@
/* Do not support runtime change in composite mask */
if (comp_grp_local->res_state ==
CAM_ISP_RESOURCE_STATE_STREAMING) {
- pr_err("Invalid State %d Comp Grp %u\n",
+ CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
comp_grp_local->res_state,
rsrc_data->comp_grp_type);
return -EBUSY;
}
}
+ rsrc_data->ctx = ctx;
*comp_grp = comp_grp_local;
return rc;
@@ -969,7 +1170,7 @@
int match_found = 0;
if (!in_comp_grp) {
- pr_err("Invalid Params Comp Grp %pK\n", in_rsrc_data);
+ CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_rsrc_data);
return -EINVAL;
}
@@ -988,7 +1189,7 @@
}
if (!match_found) {
- pr_err("Could not find matching Comp Grp type %u\n",
+ CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u",
in_rsrc_data->comp_grp_type);
return -ENODEV;
}
@@ -1008,6 +1209,7 @@
in_rsrc_data->composite_mask = 0;
in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+ comp_grp->tasklet_info = NULL;
comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
return 0;
@@ -1020,17 +1222,14 @@
comp_grp->res_priv;
struct cam_vfe_bus_ver2_common_data *common_data =
rsrc_data->common_data;
-
- /*
- * Individual Comp_Grp Subscribe IRQ can be done here once
- * dynamic IRQ enable support is added.
- */
+ uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
rsrc_data->hw_regs->comp_mask);
- CDBG("composite_mask is 0x%x\n", rsrc_data->composite_mask);
- CDBG("composite_mask addr 0x%x\n", rsrc_data->hw_regs->comp_mask);
+ CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
+ CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
+ rsrc_data->hw_regs->comp_mask);
if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
@@ -1046,9 +1245,30 @@
cam_io_w_mb(intra_client_en, common_data->mem_base +
common_data->common_reg->dual_master_comp_cfg);
+
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] = (1 << dual_comp_grp);
+ } else {
+ /* IRQ bits for COMP GRP start at 5. So add 5 to the shift */
+ bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] =
+ (1 << (rsrc_data->comp_grp_type + 5));
+ }
+
+ /* Subscribe IRQ */
+ CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
+ comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+ common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+ bus_irq_reg_mask, comp_grp,
+ comp_grp->top_half_handler,
+ cam_ife_mgr_do_tasklet_buf_done,
+ comp_grp->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (comp_grp->irq_handle < 0) {
+ CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+ rsrc_data->comp_grp_type);
+ return -EFAULT;
}
comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
return rc;
}
@@ -1061,6 +1281,9 @@
rsrc_data->common_data;
/* Unsubscribe IRQ */
+ rc = cam_irq_controller_unsubscribe_irq(
+ common_data->bus_irq_controller,
+ comp_grp->irq_handle);
cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
rsrc_data->hw_regs->comp_mask);
@@ -1088,7 +1311,43 @@
static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- return -EPERM;
+ int32_t rc;
+ int i;
+ struct cam_isp_resource_node *comp_grp = NULL;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+ struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+ comp_grp = th_payload->handler_priv;
+ if (!comp_grp) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+ return -ENODEV;
+ }
+
+ rsrc_data = comp_grp->res_priv;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+ CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+ rc = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->ctx = rsrc_data->ctx;
+ evt_payload->core_index = rsrc_data->common_data->core_index;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ CAM_DBG(CAM_ISP, "Exit");
+ return rc;
}
static int cam_vfe_bus_handle_comp_done_bottom_half(
@@ -1104,12 +1363,13 @@
uint32_t comp_err_reg;
uint32_t comp_grp_id;
+ CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type);
+
if (!evt_payload)
return rc;
cam_ife_irq_regs = evt_payload->irq_reg_val;
- CDBG("comp grp type %d\n", rsrc_data->comp_grp_type);
switch (rsrc_data->comp_grp_type) {
case CAM_VFE_BUS_VER2_COMP_GRP_0:
case CAM_VFE_BUS_VER2_COMP_GRP_1:
@@ -1146,8 +1406,8 @@
rc = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- CDBG("status reg = 0x%x, bit index = %d\n",
- status_reg, (comp_grp_id + 5));
+ CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d",
+ status_reg, (comp_grp_id + 5), rc);
break;
case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0:
@@ -1188,11 +1448,13 @@
break;
default:
rc = CAM_VFE_IRQ_STATUS_ERR;
+ CAM_ERR(CAM_ISP, "Error! Invalid comp_grp_type %u",
+ rsrc_data->comp_grp_type);
break;
}
if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
+ cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
&evt_payload);
return rc;
@@ -1203,13 +1465,12 @@
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
struct cam_isp_resource_node *comp_grp)
{
- struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
- comp_grp->res_priv;
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Failed to alloc for comp_grp_priv\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for comp_grp_priv");
return -ENOMEM;
}
comp_grp->res_priv = rsrc_data;
@@ -1222,7 +1483,6 @@
rsrc_data->hw_regs = &ver2_hw_info->comp_grp_reg[index];
rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
-
if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
list_add_tail(&comp_grp->list,
@@ -1241,7 +1501,34 @@
return 0;
}
-static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args)
+static int cam_vfe_bus_deinit_comp_grp(
+ struct cam_isp_resource_node *comp_grp)
+{
+ struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+ comp_grp->res_priv;
+
+ comp_grp->start = NULL;
+ comp_grp->stop = NULL;
+ comp_grp->top_half_handler = NULL;
+ comp_grp->bottom_half_handler = NULL;
+ comp_grp->hw_intf = NULL;
+
+ list_del_init(&comp_grp->list);
+ comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+ comp_grp->res_priv = NULL;
+
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! comp_grp_priv is NULL");
+ return -ENODEV;
+ }
+ kfree(rsrc_data);
+
+ return 0;
+}
+
+static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
+ uint32_t args_size)
{
int rc = -ENODEV;
int i;
@@ -1257,14 +1544,14 @@
struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
if (!bus_priv || !acquire_args) {
- pr_err("Invalid Param");
+ CAM_ERR(CAM_ISP, "Invalid Param");
return -EINVAL;
}
out_acquire_args = &acq_args->vfe_out;
format = out_acquire_args->out_port_info->format;
- CDBG("Acquiring resource type 0x%x\n",
+ CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
out_acquire_args->out_port_info->res_type);
vfe_out_res_id = cam_vfe_bus_get_out_res_id(
@@ -1278,7 +1565,7 @@
rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
- pr_err("Resource not available: Res_id %d state:%d\n",
+ CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
vfe_out_res_id, rsrc_node->res_state);
return -EBUSY;
}
@@ -1297,30 +1584,45 @@
CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
out_acquire_args->unique_id,
out_acquire_args->is_dual,
out_acquire_args->is_master,
out_acquire_args->dual_slave_core,
&rsrc_data->comp_grp);
- if (rc < 0)
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d Comp_Grp acquire fail for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
return rc;
+ }
subscribe_irq = 0;
- } else
+ } else {
subscribe_irq = 1;
+ }
/* Reserve WM */
for (i = 0; i < num_wm; i++) {
rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
out_acquire_args->out_port_info,
+ acq_args->tasklet,
+ out_acquire_args->ctx,
vfe_out_res_id,
i,
out_acquire_args->split_id,
subscribe_irq,
&rsrc_data->wm_res[i],
&client_done_mask);
- if (rc < 0)
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "VFE%d WM acquire failed for Out %d rc=%d",
+ rsrc_data->common_data->core_index,
+ vfe_out_res_id, rc);
goto release_wm;
+ }
if (rsrc_data->comp_grp)
cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
@@ -1330,7 +1632,7 @@
rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
out_acquire_args->rsrc_node = rsrc_node;
- CDBG("Acquire successful\n");
+ CAM_DBG(CAM_ISP, "Acquire successful");
return rc;
release_wm:
@@ -1343,14 +1645,24 @@
return rc;
}
-static int cam_vfe_bus_release_vfe_out(void *bus_priv,
- struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_release_vfe_out(void *bus_priv, void *release_args,
+ uint32_t args_size)
{
uint32_t i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+ struct cam_isp_resource_node *vfe_out = NULL;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+
+ if (!bus_priv || !release_args) {
+ CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+ bus_priv, release_args);
+ return -EINVAL;
+ }
+
+ vfe_out = release_args;
+ rsrc_data = vfe_out->res_priv;
if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid resource state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
vfe_out->res_state);
}
@@ -1372,43 +1684,35 @@
return 0;
}
-static int cam_vfe_bus_start_vfe_out(struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_start_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
{
int rc = 0, i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
- struct cam_vfe_bus_ver2_common_data *common_data =
- rsrc_data->common_data;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+ struct cam_vfe_bus_ver2_common_data *common_data = NULL;
- CDBG("Start resource index %d\n", rsrc_data->out_type);
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
+ common_data = rsrc_data->common_data;
+
+ CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid resource state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
vfe_out->res_state);
return -EACCES;
}
- /* Enable IRQ Mask */
- cam_io_w_mb(0x00001FE0, common_data->mem_base + 0x2044);
- cam_io_w_mb(0x000FFFE7, common_data->mem_base + 0x2048);
- cam_io_w_mb(0x000000FF, common_data->mem_base + 0x204c);
-
for (i = 0; i < rsrc_data->num_wm; i++)
rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
if (rsrc_data->comp_grp)
rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
- /* VFE_MODULE_BUS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000003C);
- /* VFE_MODULE_COLOR_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000034);
- /* VFE_MODULE_ZOOM_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x38);
- /* VFE_MODULE_LENS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x0000002C);
- /* VFE_MODULE_STATS_CGC_OVERRIDE */
- cam_io_w_mb(0xFFFFFFFF, rsrc_data->common_data->mem_base + 0x00000030);
-
/* BUS_WR_INPUT_IF_ADDR_SYNC_CFG */
cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000207C);
/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
@@ -1431,13 +1735,22 @@
/* BUS_WR_TEST_BUS_CTRL */
cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000211C);
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
return rc;
}
-static int cam_vfe_bus_stop_vfe_out(struct cam_isp_resource_node *vfe_out)
+static int cam_vfe_bus_stop_vfe_out(
+ struct cam_isp_resource_node *vfe_out)
{
int rc = 0, i;
- struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+
+ if (!vfe_out) {
+ CAM_ERR(CAM_ISP, "Invalid input");
+ return -EINVAL;
+ }
+
+ rsrc_data = vfe_out->res_priv;
if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -1450,7 +1763,6 @@
for (i = 0; i < rsrc_data->num_wm; i++)
rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
- vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
return rc;
@@ -1497,7 +1809,7 @@
rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
GFP_KERNEL);
if (!rsrc_data) {
- CDBG("Error! Failed to alloc for vfe out priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe out priv");
rc = -ENOMEM;
return rc;
}
@@ -1524,122 +1836,39 @@
return 0;
}
-static int cam_vfe_bus_get_evt_payload(
- struct cam_vfe_bus_ver2_priv *bus_priv,
- struct cam_vfe_bus_irq_evt_payload **evt_payload)
+static int cam_vfe_bus_deinit_vfe_out_resource(
+ struct cam_isp_resource_node *vfe_out)
{
- if (list_empty(&bus_priv->free_payload_list)) {
- *evt_payload = NULL;
- pr_err("No free payload\n");
- return -ENODEV;
+ struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+ vfe_out->start = NULL;
+ vfe_out->stop = NULL;
+ vfe_out->top_half_handler = NULL;
+ vfe_out->bottom_half_handler = NULL;
+ vfe_out->hw_intf = NULL;
+
+ vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ INIT_LIST_HEAD(&vfe_out->list);
+ vfe_out->res_priv = NULL;
+
+ if (!rsrc_data) {
+ CAM_ERR(CAM_ISP, "Error! vfe out priv is NULL");
+ return -ENOMEM;
}
+ kfree(rsrc_data);
- *evt_payload = list_first_entry(&bus_priv->free_payload_list,
- struct cam_vfe_bus_irq_evt_payload, list);
- list_del_init(&(*evt_payload)->list);
- return 0;
-}
-
-static int cam_vfe_bus_put_evt_payload(void *core_info,
- struct cam_vfe_bus_irq_evt_payload **evt_payload)
-{
- struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
- uint32_t *cam_ife_irq_regs = (*evt_payload)->irq_reg_val;
- uint32_t status_reg0, status_reg1;
-
- status_reg0 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
- status_reg1 = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
-
- if (status_reg0 || status_reg1) {
- CDBG("status0 0x%x status1 0x%x\n", status_reg0, status_reg1);
- return 0;
- }
-
- if (!core_info) {
- pr_err("Invalid param core_info NULL");
- return -EINVAL;
- }
- if (*evt_payload == NULL) {
- pr_err("No payload to put\n");
- return -EINVAL;
- }
- bus_priv = (*evt_payload)->bus_priv;
- list_add_tail(&(*evt_payload)->list, &bus_priv->free_payload_list);
- *evt_payload = NULL;
return 0;
}
static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
- int32_t rc;
- int i;
- struct cam_vfe_irq_handler_priv *handler_priv;
- struct cam_vfe_hw_core_info *core_info;
- struct cam_vfe_bus_irq_evt_payload *evt_payload;
- struct cam_vfe_bus *bus_info;
struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_irq_controller_reg_info *reg_info;
- uint32_t irq_mask;
- int found = 0;
- handler_priv = th_payload->handler_priv;
- core_info = handler_priv->core_info;
- bus_info = core_info->vfe_bus;
- bus_priv = bus_info->bus_priv;
- reg_info = &bus_priv->common_data.common_reg->irq_reg_info;
-
- /*
- * add reset ack handling here once supported.
- * Just clear all the bus irq status registers and ignore the reset.
- */
-
- CDBG("Enter\n");
- rc = cam_vfe_bus_get_evt_payload(bus_priv, &evt_payload);
- if (rc) {
- pr_err("No tasklet_cmd is free in queue\n");
- return rc;
- }
-
- cam_isp_hw_get_timestamp(&evt_payload->ts);
-
- evt_payload->core_index = handler_priv->core_index;
- evt_payload->core_info = handler_priv->core_info;
- evt_payload->bus_priv = bus_priv;
- CDBG("core_idx %d, core_info %llx\n", handler_priv->core_index,
- (uint64_t)handler_priv->core_info);
-
- for (i = 0; i < CAM_IFE_BUS_IRQ_REGISTERS_MAX; i++) {
- irq_mask = cam_io_r(handler_priv->mem_base +
- irq_reg_offset[i] - (0xC * 2));
- evt_payload->irq_reg_val[i] = irq_mask &
- cam_io_r(handler_priv->mem_base + irq_reg_offset[i]);
- if (evt_payload->irq_reg_val[i])
- found = 1;
- CDBG("irq_status%d = 0x%x\n", i, evt_payload->irq_reg_val[i]);
- }
- for (i = 0; i <= CAM_IFE_IRQ_BUS_REG_STATUS2; i++) {
- cam_io_w(evt_payload->irq_reg_val[i], handler_priv->mem_base +
- reg_info->irq_reg_set[i].clear_reg_offset);
- CDBG("Clear irq_status%d = 0x%x offset 0x%x\n", i,
- evt_payload->irq_reg_val[i],
- reg_info->irq_reg_set[i].clear_reg_offset);
- }
- cam_io_w(reg_info->global_clear_bitmask, handler_priv->mem_base +
- reg_info->global_clear_offset);
- CDBG("Global clear bitmask = 0x%x offset 0x%x\n",
- reg_info->global_clear_bitmask,
- reg_info->global_clear_offset);
-
- if (found)
- th_payload->evt_payload_priv = evt_payload;
- else {
- cam_vfe_bus_put_evt_payload(evt_payload->core_info,
- &evt_payload);
- rc = -ENOMSG;
- }
-
- return rc;
+ bus_priv = th_payload->handler_priv;
+ CAM_DBG(CAM_ISP, "Enter");
+ return cam_irq_controller_handle_irq(evt_id,
+ bus_priv->common_data.bus_irq_controller);
}
static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
@@ -1652,6 +1881,7 @@
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
uint32_t *reg_val_pair;
uint32_t i, j, size = 0;
+ uint32_t frame_inc = 0;
/*
* Need the entire buf io config so we can get the stride info
@@ -1665,31 +1895,39 @@
update_buf->cdm.res->res_priv;
if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
- pr_err("Failed! Invalid data\n");
+ CAM_ERR(CAM_ISP, "Failed! Invalid data");
return -EINVAL;
}
if (update_buf->num_buf != vfe_out_data->num_wm) {
- pr_err("Failed! Invalid number buffers:%d required:%d\n",
+ CAM_ERR(CAM_ISP,
+ "Failed! Invalid number buffers:%d required:%d",
update_buf->num_buf, vfe_out_data->num_wm);
- return -ENOMEM;
+ return -EINVAL;
}
reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
io_cfg = update_buf->io_cfg;
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+ if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+ CAM_ERR(CAM_ISP,
+ "reg_val_pair %d exceeds the array limit %lu",
+ j, MAX_REG_VAL_PAIR_SIZE);
+ return -ENOMEM;
+ }
+
wm_data = vfe_out_data->wm_res[i]->res_priv;
/* For initial configuration program all bus registers */
- if (wm_data->stride != io_cfg->planes[i].plane_stride ||
- !wm_data->init_cfg_done) {
+ if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
+ !wm_data->init_cfg_done) && (wm_data->index >= 3)) {
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->stride,
io_cfg->planes[i].plane_stride);
wm_data->stride = io_cfg->planes[i].plane_stride;
}
- CDBG("image stride 0x%x\n", wm_data->stride);
+ CAM_DBG(CAM_ISP, "image stride 0x%x", wm_data->stride);
if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
!wm_data->init_cfg_done) {
@@ -1698,7 +1936,8 @@
io_cfg->framedrop_pattern);
wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
}
- CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+ CAM_DBG(CAM_ISP, "framedrop pattern 0x%x",
+ wm_data->framedrop_pattern);
if (wm_data->framedrop_period != io_cfg->framedrop_period ||
!wm_data->init_cfg_done) {
@@ -1707,7 +1946,8 @@
io_cfg->framedrop_period);
wm_data->framedrop_period = io_cfg->framedrop_period;
}
- CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+ CAM_DBG(CAM_ISP, "framedrop period 0x%x",
+ wm_data->framedrop_period);
if (wm_data->irq_subsample_period != io_cfg->subsample_period
|| !wm_data->init_cfg_done) {
@@ -1717,7 +1957,7 @@
wm_data->irq_subsample_period =
io_cfg->subsample_period;
}
- CDBG("irq subsample period 0x%x\n",
+ CAM_DBG(CAM_ISP, "irq subsample period 0x%x",
wm_data->irq_subsample_period);
if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
@@ -1728,13 +1968,13 @@
wm_data->irq_subsample_pattern =
io_cfg->subsample_pattern;
}
- CDBG("irq subsample pattern 0x%x\n",
+ CAM_DBG(CAM_ISP, "irq subsample pattern 0x%x",
wm_data->irq_subsample_pattern);
if (wm_data->en_ubwc) {
if (!wm_data->hw_regs->ubwc_regs) {
- pr_err("%s: No UBWC register to configure.\n",
- __func__);
+ CAM_ERR(CAM_ISP,
+ "No UBWC register to configure.");
return -EINVAL;
}
if (wm_data->packer_cfg !=
@@ -1746,7 +1986,8 @@
wm_data->packer_cfg =
io_cfg->planes[i].packer_config;
}
- CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+ CAM_DBG(CAM_ISP, "packer cfg 0x%x",
+ wm_data->packer_cfg);
if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
|| !wm_data->init_cfg_done) {
@@ -1756,7 +1997,7 @@
wm_data->tile_cfg =
io_cfg->planes[i].tile_config;
}
- CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+ CAM_DBG(CAM_ISP, "tile cfg 0x%x", wm_data->tile_cfg);
if (wm_data->h_init != io_cfg->planes[i].h_init ||
!wm_data->init_cfg_done) {
@@ -1765,7 +2006,7 @@
io_cfg->planes[i].h_init);
wm_data->h_init = io_cfg->planes[i].h_init;
}
- CDBG("h_init 0x%x\n", wm_data->h_init);
+ CAM_DBG(CAM_ISP, "h_init 0x%x", wm_data->h_init);
if (wm_data->v_init != io_cfg->planes[i].v_init ||
!wm_data->init_cfg_done) {
@@ -1774,7 +2015,7 @@
io_cfg->planes[i].v_init);
wm_data->v_init = io_cfg->planes[i].v_init;
}
- CDBG("v_init 0x%x\n", wm_data->v_init);
+ CAM_DBG(CAM_ISP, "v_init 0x%x", wm_data->v_init);
if (wm_data->ubwc_meta_stride !=
io_cfg->planes[i].meta_stride ||
@@ -1786,7 +2027,8 @@
wm_data->ubwc_meta_stride =
io_cfg->planes[i].meta_stride;
}
- CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+ CAM_DBG(CAM_ISP, "meta stride 0x%x",
+ wm_data->ubwc_meta_stride);
if (wm_data->ubwc_mode_cfg !=
io_cfg->planes[i].mode_config ||
@@ -1797,7 +2039,8 @@
wm_data->ubwc_mode_cfg =
io_cfg->planes[i].mode_config;
}
- CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+ CAM_DBG(CAM_ISP, "ubwc mode cfg 0x%x",
+ wm_data->ubwc_mode_cfg);
if (wm_data->ubwc_meta_offset !=
io_cfg->planes[i].meta_offset ||
@@ -1809,14 +2052,14 @@
wm_data->ubwc_meta_offset =
io_cfg->planes[i].meta_offset;
}
- CDBG("ubwc meta offset 0x%x\n",
+ CAM_DBG(CAM_ISP, "ubwc meta offset 0x%x",
wm_data->ubwc_meta_offset);
/* UBWC meta address */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->ubwc_regs->meta_addr,
update_buf->image_buf[i]);
- CDBG("ubwc meta addr 0x%llx\n",
+ CAM_DBG(CAM_ISP, "ubwc meta addr 0x%llx",
update_buf->image_buf[i]);
}
@@ -1831,7 +2074,12 @@
wm_data->hw_regs->image_addr,
update_buf->image_buf[i]);
- CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+ CAM_DBG(CAM_ISP, "image address 0x%x", reg_val_pair[j-1]);
+
+ frame_inc = io_cfg->planes[i].plane_stride *
+ io_cfg->planes[i].slice_height;
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->frame_inc, frame_inc);
/* enable the WM */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
@@ -1847,7 +2095,8 @@
/* cdm util returns dwords, need to convert to bytes */
if ((size * 4) > update_buf->cdm.size) {
- pr_err("Failed! Buf size:%d insufficient, expected size:%d\n",
+ CAM_ERR(CAM_ISP,
+ "Failed! Buf size:%d insufficient, expected size:%d",
update_buf->cdm.size, size);
return -ENOMEM;
}
@@ -1861,13 +2110,76 @@
return 0;
}
+static int cam_vfe_bus_start_hw(void *hw_priv,
+ void *start_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_start_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_stop_hw(void *hw_priv,
+ void *stop_hw_args, uint32_t arg_size)
+{
+ return cam_vfe_bus_stop_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_init_hw(void *hw_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ uint32_t top_irq_reg_mask[2] = {0};
+
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
+ return -EINVAL;
+ }
+
+ top_irq_reg_mask[0] = (1 << 9);
+
+ bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ CAM_IRQ_PRIORITY_2,
+ top_irq_reg_mask,
+ bus_priv,
+ cam_vfe_bus_ver2_handle_irq,
+ NULL,
+ NULL,
+ NULL);
+
+ if (bus_priv->irq_handle <= 0) {
+ CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int cam_vfe_bus_deinit_hw(void *hw_priv,
+ void *deinit_hw_args, uint32_t arg_size)
+{
+ struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
+ int rc;
+
+ if (!bus_priv || (bus_priv->irq_handle <= 0)) {
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
+ return -EINVAL;
+ }
+
+ rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.vfe_irq_controller,
+ bus_priv->irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to unsubscribe irq rc=%d", rc);
+
+ return rc;
+}
+
static int cam_vfe_bus_process_cmd(void *priv,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
{
int rc = -EINVAL;
if (!priv || !cmd_args) {
- pr_err_ratelimited("Error! Invalid input arguments\n");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! Invalid input arguments\n");
return -EINVAL;
}
@@ -1876,7 +2188,7 @@
rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
break;
default:
- pr_err_ratelimited("Error! Invalid camif process command:%d\n",
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Inval camif process command:%d\n",
cmd_type);
break;
}
@@ -1885,7 +2197,7 @@
}
int cam_vfe_bus_ver2_init(
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
@@ -1896,29 +2208,48 @@
struct cam_vfe_bus *vfe_bus_local;
struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
- CDBG("Enter\n");
+ CAM_DBG(CAM_ISP, "Enter");
+
+ if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
+ CAM_ERR(CAM_ISP,
+ "Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+ soc_info, hw_intf, bus_hw_info);
+ CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
+ rc = -EINVAL;
+ goto end;
+ }
vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
if (!vfe_bus_local) {
- CDBG("Failed to alloc for vfe_bus\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
rc = -ENOMEM;
- goto err_alloc_bus;
+ goto end;
}
bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
GFP_KERNEL);
if (!bus_priv) {
- CDBG("Failed to alloc for vfe_bus_priv\n");
+ CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
rc = -ENOMEM;
- goto err_alloc_priv;
+ goto free_bus_local;
}
vfe_bus_local->bus_priv = bus_priv;
- bus_priv->common_data.mem_base = mem_base;
+ bus_priv->common_data.core_index = soc_info->index;
+ bus_priv->common_data.mem_base =
+ CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
bus_priv->common_data.hw_intf = hw_intf;
bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
bus_priv->common_data.common_reg = &ver2_hw_info->common_reg;
+ rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
+ &ver2_hw_info->common_reg.irq_reg_info,
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+ goto free_bus_priv;
+ }
+
INIT_LIST_HEAD(&bus_priv->free_comp_grp);
INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
INIT_LIST_HEAD(&bus_priv->used_comp_grp);
@@ -1927,8 +2258,8 @@
rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
&bus_priv->bus_client[i]);
if (rc < 0) {
- pr_err("Error! Init WM failed\n");
- goto err_init_wm;
+ CAM_ERR(CAM_ISP, "Error! Init WM failed rc=%d", rc);
+ goto deinit_wm;
}
}
@@ -1936,8 +2267,8 @@
rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
&bus_priv->comp_grp[i]);
if (rc < 0) {
- pr_err("Error! Init Comp Grp failed\n");
- goto err_init_comp_grp;
+ CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+ goto deinit_comp_grp;
}
}
@@ -1945,36 +2276,123 @@
rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv, bus_hw_info,
&bus_priv->vfe_out[i]);
if (rc < 0) {
- pr_err("Error! Init VFE Out failed\n");
- goto err_init_vfe_out;
+ CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
+ goto deinit_vfe_out;
}
}
- INIT_LIST_HEAD(&bus_priv->free_payload_list);
- for (i = 0; i < 128; i++) {
- INIT_LIST_HEAD(&bus_priv->evt_payload[i].list);
- list_add_tail(&bus_priv->evt_payload[i].list,
- &bus_priv->free_payload_list);
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+ list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+ &bus_priv->common_data.free_payload_list);
}
- vfe_bus_local->acquire_resource = cam_vfe_bus_acquire_vfe_out;
- vfe_bus_local->release_resource = cam_vfe_bus_release_vfe_out;
- vfe_bus_local->start_resource = cam_vfe_bus_start_vfe_out;
- vfe_bus_local->stop_resource = cam_vfe_bus_stop_vfe_out;
- vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
+ vfe_bus_local->hw_ops.reserve = cam_vfe_bus_acquire_vfe_out;
+ vfe_bus_local->hw_ops.release = cam_vfe_bus_release_vfe_out;
+ vfe_bus_local->hw_ops.start = cam_vfe_bus_start_hw;
+ vfe_bus_local->hw_ops.stop = cam_vfe_bus_stop_hw;
+ vfe_bus_local->hw_ops.init = cam_vfe_bus_init_hw;
+ vfe_bus_local->hw_ops.deinit = cam_vfe_bus_deinit_hw;
+ vfe_bus_local->top_half_handler = cam_vfe_bus_ver2_handle_irq;
vfe_bus_local->bottom_half_handler = NULL;
- vfe_bus_local->process_cmd = cam_vfe_bus_process_cmd;
+ vfe_bus_local->hw_ops.process_cmd = cam_vfe_bus_process_cmd;
*vfe_bus = vfe_bus_local;
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
-err_init_vfe_out:
-err_init_comp_grp:
-err_init_wm:
+deinit_vfe_out:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+
+deinit_comp_grp:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_COMP_GRP_MAX;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+ if (i < 0)
+ i = CAM_VFE_BUS_VER2_MAX_CLIENTS;
+ for (--i; i >= 0; i--)
+ cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+free_bus_priv:
kfree(vfe_bus_local->bus_priv);
-err_alloc_priv:
+
+free_bus_local:
kfree(vfe_bus_local);
-err_alloc_bus:
+
+end:
return rc;
}
+
+int cam_vfe_bus_ver2_deinit(
+ struct cam_vfe_bus **vfe_bus)
+{
+ int i, rc = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = NULL;
+ struct cam_vfe_bus *vfe_bus_local;
+
+ if (!vfe_bus || !*vfe_bus) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input");
+ return -EINVAL;
+ }
+ vfe_bus_local = *vfe_bus;
+
+ bus_priv = vfe_bus_local->bus_priv;
+ if (!bus_priv) {
+ CAM_ERR(CAM_ISP, "Error! bus_priv is NULL");
+ rc = -ENODEV;
+ goto free_bus_local;
+ }
+
+ INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+ for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++)
+ INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_MAX_CLIENTS; i++) {
+ rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit WM failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+ rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit Comp Grp failed rc=%d", rc);
+ }
+
+ for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
+ rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit VFE Out failed rc=%d", rc);
+ }
+
+ INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+ INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+ rc = cam_irq_controller_deinit(
+ &bus_priv->common_data.bus_irq_controller);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error! Deinit IRQ Controller failed rc=%d", rc);
+
+ kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+ kfree(vfe_bus_local);
+
+ *vfe_bus = NULL;
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index e451174..ba98077 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -171,18 +171,34 @@
*
* @Brief: Initialize Bus layer
*
- * @mem_base: Mapped base address of register space
+ * @soc_info: Soc Information for the associated HW
* @hw_intf: HW Interface of HW to which this resource belongs
* @bus_hw_info: BUS HW info that contains details of BUS registers
* @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
* level IRQs
* @vfe_bus: Pointer to vfe_bus structure which will be filled
* and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
*/
-int cam_vfe_bus_ver2_init(void __iomem *mem_base,
+int cam_vfe_bus_ver2_init(
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
struct cam_vfe_bus **vfe_bus);
+/*
+ * cam_vfe_bus_ver2_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_deinit(struct cam_vfe_bus **vfe_bus);
+
#endif /* _CAM_VFE_BUS_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
index d202c13..c089911 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -14,6 +14,7 @@
#define _CAM_VFE_BUS_H_
#include <uapi/media/cam_isp.h>
+#include "cam_hw_intf.h"
#include "cam_isp_hw.h"
#define CAM_VFE_BUS_VER_1_0 0x1000
@@ -31,25 +32,14 @@
* @Brief: Bus interface structure
*
* @bus_priv: Private data of BUS
- * @acquire_resource: Function pointer for acquiring BUS output resource
- * @release_resource: Function pointer for releasing BUS resource
- * @start_resource: Function for starting BUS Output resource
- * @stop_resource: Function for stopping BUS Output resource
- * @process_cmd: Function to process commands specific to BUS
- * resources
+ * @hw_ops: Hardware interface functions
* @top_half_handler: Top Half handler function
* @bottom_half_handler: Bottom Half handler function
*/
struct cam_vfe_bus {
void *bus_priv;
- int (*acquire_resource)(void *bus_priv, void *acquire_args);
- int (*release_resource)(void *bus_priv,
- struct cam_isp_resource_node *vfe_out);
- int (*start_resource)(struct cam_isp_resource_node *vfe_out);
- int (*stop_resource)(struct cam_isp_resource_node *vfe_out);
- int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
- uint32_t arg_size);
+ struct cam_hw_ops hw_ops;
CAM_IRQ_HANDLER_TOP_HALF top_half_handler;
CAM_IRQ_HANDLER_BOTTOM_HALF bottom_half_handler;
};
@@ -60,19 +50,36 @@
* @Brief: Initialize Bus layer
*
* @bus_version: Version of BUS to initialize
- * @mem_base: Mapped base address of register space
+ * @soc_info: Soc Information for the associated HW
* @hw_intf: HW Interface of HW to which this resource belongs
* @bus_hw_info: BUS HW info that contains details of BUS registers
* @vfe_irq_controller: VFE IRQ Controller to use for subscribing to Top
* level IRQs
* @vfe_bus: Pointer to vfe_bus structure which will be filled
* and returned on successful initialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
*/
int cam_vfe_bus_init(uint32_t bus_version,
- void __iomem *mem_base,
+ struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *bus_hw_info,
void *vfe_irq_controller,
struct cam_vfe_bus **vfe_bus);
+/*
+ * cam_vfe_bus_deinit()
+ *
+ * @Brief: Deinitialize Bus layer
+ *
+ * @bus_version: Version of BUS to deinitialize
+ * @vfe_bus: Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_bus_deinit(uint32_t bus_version,
+ struct cam_vfe_bus **vfe_bus);
+
#endif /* _CAM_VFE_BUS_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 3f3c2a3..e70ecc5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -10,9 +10,7 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
-#include <linux/slab.h>
+ #include <linux/slab.h>
#include <uapi/media/cam_isp.h>
#include "cam_io_util.h"
#include "cam_isp_hw_mgr_intf.h"
@@ -20,9 +18,7 @@
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
#include "cam_vfe_camif_ver2.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_mux_camif_data {
void __iomem *mem_base;
@@ -55,7 +51,7 @@
rc = 0;
break;
default:
- pr_err("Error! Invalid pix pattern:%d\n", pattern);
+ CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern);
rc = -EINVAL;
break;
}
@@ -96,12 +92,12 @@
uint32_t val = 0;
if (!camif_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid camif res res_state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d",
camif_res->res_state);
return -EINVAL;
}
@@ -119,10 +115,6 @@
rsrc_data->camif_reg->line_skip_pattern);
cam_io_w_mb(0x1, rsrc_data->mem_base +
rsrc_data->camif_reg->pixel_skip_pattern);
- cam_io_w_mb(0x0, rsrc_data->mem_base +
- rsrc_data->camif_reg->skip_period);
- cam_io_w_mb(0x1, rsrc_data->mem_base +
- rsrc_data->camif_reg->irq_subsample_pattern);
/* epoch config with 20 line */
cam_io_w_mb(0x00140014,
@@ -133,7 +125,7 @@
/* Reg Update */
cam_io_w_mb(0x1, rsrc_data->mem_base + 0x4AC);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return 0;
}
@@ -146,7 +138,7 @@
int rc = 0;
if (!camif_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -192,26 +184,26 @@
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- CDBG("event ID:%d\n", payload->evt_id);
- CDBG("irq_status_0 = %x\n", irq_status0);
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
switch (payload->evt_id) {
case CAM_ISP_HW_EVENT_SOF:
if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
- CDBG("Received SOF\n");
+ CAM_DBG(CAM_ISP, "Received SOF");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
case CAM_ISP_HW_EVENT_EPOCH:
if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
- CDBG("Received EPOCH\n");
+ CAM_DBG(CAM_ISP, "Received EPOCH");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
case CAM_ISP_HW_EVENT_REG_UPDATE:
if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
- CDBG("Received REG_UPDATE_ACK\n");
+ CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
@@ -219,7 +211,7 @@
break;
}
- CDBG("returing status = %d\n", ret);
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
return ret;
}
@@ -233,9 +225,9 @@
struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!camif_priv) {
- CDBG("Error! Failed to alloc for camif_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv");
return -ENOMEM;
}
@@ -255,3 +247,24 @@
return 0;
}
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node)
+{
+ struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+
+ camif_node->start = NULL;
+ camif_node->stop = NULL;
+ camif_node->top_half_handler = NULL;
+ camif_node->bottom_half_handler = NULL;
+
+ camif_node->res_priv = NULL;
+
+ if (!camif_priv) {
+ CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+ return -ENODEV;
+ }
+
+ kfree(camif_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index cc6aab0..553abf2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -75,4 +75,7 @@
void *camif_hw_info,
struct cam_isp_resource_node *camif_node);
+int cam_vfe_camif_ver2_deinit(
+ struct cam_isp_resource_node *camif_node);
+
#endif /* _CAM_VFE_CAMIF_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 5f77a7c..df7b0f9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -10,21 +10,19 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_vfe_rdi.h"
#include "cam_isp_hw_mgr_intf.h"
#include "cam_vfe_hw_intf.h"
#include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_mux_rdi_data {
void __iomem *mem_base;
struct cam_hw_intf *hw_intf;
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data;
enum cam_isp_hw_sync_mode sync_mode;
};
@@ -51,12 +49,12 @@
int rc = 0;
if (!rdi_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Invalid rdi res res_state:%d\n",
+ CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d",
rdi_res->res_state);
return -EINVAL;
}
@@ -67,7 +65,7 @@
/* Reg Update */
cam_io_w_mb(0x2, rsrc_data->mem_base + 0x4AC);
- CDBG("Exit\n");
+ CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -80,7 +78,7 @@
int rc = 0;
if (!rdi_res) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -103,13 +101,14 @@
int rc = -EINVAL;
if (!priv || !cmd_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
switch (cmd_type) {
default:
- pr_err("Error! unsupported RDI process command:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP,
+ "unsupported RDI process command:%d", cmd_type);
break;
}
@@ -139,23 +138,28 @@
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- CDBG("event ID:%d\n", payload->evt_id);
- CDBG("irq_status_0 = %x\n", irq_status0);
+ CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+ CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
switch (payload->evt_id) {
case CAM_ISP_HW_EVENT_SOF:
- if (irq_status0 & 0x8000000)
+ if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received SOF");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
break;
case CAM_ISP_HW_EVENT_REG_UPDATE:
- if (irq_status0 & 0x20)
+ if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+ CAM_DBG(CAM_ISP, "Received REG UPDATE");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+ }
+ cam_vfe_put_evt_payload(payload->core_info, &payload);
break;
default:
break;
}
- CDBG("returing status = %d\n", ret);
+ CAM_DBG(CAM_ISP, "returing status = %d", ret);
return ret;
}
@@ -166,11 +170,12 @@
struct cam_isp_resource_node *rdi_node)
{
struct cam_vfe_mux_rdi_data *rdi_priv = NULL;
+ struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
GFP_KERNEL);
if (!rdi_priv) {
- CDBG("Error! Failed to alloc for rdi_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv");
return -ENOMEM;
}
@@ -178,6 +183,31 @@
rdi_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
rdi_priv->hw_intf = hw_intf;
+ rdi_priv->common_reg = rdi_info->common_reg;
+ rdi_priv->rdi_reg = rdi_info->rdi_reg;
+
+ switch (rdi_node->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ rdi_priv->reg_data = rdi_info->reg_data[0];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ rdi_priv->reg_data = rdi_info->reg_data[1];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ rdi_priv->reg_data = rdi_info->reg_data[2];
+ break;
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (rdi_info->reg_data[3]) {
+ rdi_priv->reg_data = rdi_info->reg_data[3];
+ } else {
+ CAM_ERR(CAM_ISP, "Error! RDI3 is not supported");
+ goto err_init;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id);
+ goto err_init;
+ }
rdi_node->start = cam_vfe_rdi_resource_start;
rdi_node->stop = cam_vfe_rdi_resource_stop;
@@ -185,5 +215,28 @@
rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
return 0;
+err_init:
+ kfree(rdi_priv);
+ return -EINVAL;
}
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node)
+{
+ struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+
+ rdi_node->start = NULL;
+ rdi_node->stop = NULL;
+ rdi_node->top_half_handler = NULL;
+ rdi_node->bottom_half_handler = NULL;
+
+ rdi_node->res_priv = NULL;
+
+ if (!rdi_priv) {
+ CAM_ERR(CAM_ISP, "Error! rdi_priv NULL");
+ return -ENODEV;
+ }
+ kfree(rdi_priv);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
index 967cec3..04e4f02 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -16,18 +16,22 @@
#include "cam_isp_hw.h"
#include "cam_vfe_top.h"
+#define CAM_VFE_RDI_VER2_MAX 4
+
struct cam_vfe_rdi_ver2_reg {
uint32_t reg_update_cmd;
};
struct cam_vfe_rdi_reg_data {
+ uint32_t reg_update_cmd_data;
+ uint32_t sof_irq_mask;
uint32_t reg_update_irq_mask;
};
struct cam_vfe_rdi_ver2_hw_info {
- struct cam_vfe_top_ver2_reg_offset_common *common_reg;
- struct cam_vfe_rdi_ver2_reg *rdi_reg;
- struct cam_vfe_rdi_reg_data *reg_data;
+ struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_rdi_ver2_reg *rdi_reg;
+ struct cam_vfe_rdi_reg_data *reg_data[CAM_VFE_RDI_VER2_MAX];
};
int cam_vfe_rdi_ver2_acquire_resource(
@@ -43,4 +47,7 @@
void *rdi_hw_info,
struct cam_isp_resource_node *rdi_node);
+int cam_vfe_rdi_ver2_deinit(
+ struct cam_isp_resource_node *rdi_node);
+
#endif /* _CAM_VFE_RDI_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index e2bceb8..8eb1835 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -10,10 +10,9 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
int cam_vfe_top_init(uint32_t top_version,
struct cam_hw_soc_info *soc_info,
@@ -29,7 +28,24 @@
vfe_top);
break;
default:
- pr_err("Error! Unsupported Version %x\n", top_version);
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+ break;
+ }
+
+ return rc;
+}
+
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top)
+{
+ int rc = -EINVAL;
+
+ switch (top_version) {
+ case CAM_VFE_TOP_VER_2_0:
+ rc = cam_vfe_top_ver2_deinit(vfe_top);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 3ef4f49..1a3eeae 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -10,17 +10,13 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/slab.h>
#include "cam_io_util.h"
#include "cam_cdm_util.h"
#include "cam_vfe_hw_intf.h"
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
@@ -43,13 +39,13 @@
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
- pr_err("Error! Invalid cmd size\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
return -EINVAL;
}
if (!cdm_args || !cdm_args->res || !top_priv ||
!top_priv->common_data.soc_info) {
- pr_err("Error! Invalid args\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
return -EINVAL;
}
@@ -57,22 +53,22 @@
(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
if (!cdm_util_ops) {
- pr_err("Invalid CDM ops\n");
+ CAM_ERR(CAM_ISP, "Invalid CDM ops");
return -EINVAL;
}
size = cdm_util_ops->cdm_required_size_changebase();
/* since cdm returns dwords, we need to convert it into bytes */
if ((size * 4) > cdm_args->size) {
- pr_err("buf size:%d is not sufficient, expected: %d\n",
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
cdm_args->size, size);
return -EINVAL;
}
mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
- CDBG("core %d mem_base 0x%x\n", top_priv->common_data.soc_info->index,
- mem_base);
+ CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+ top_priv->common_data.soc_info->index, mem_base);
cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
cdm_args->used_bytes = (size * 4);
@@ -90,26 +86,26 @@
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
- pr_err("Error! Invalid cmd size\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
return -EINVAL;
}
if (!cdm_args || !cdm_args->res) {
- pr_err("Error! Invalid args\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid args");
return -EINVAL;
}
cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
if (!cdm_util_ops) {
- pr_err("Error! Invalid CDM ops\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid CDM ops");
return -EINVAL;
}
size = cdm_util_ops->cdm_required_size_reg_random(1);
/* since cdm returns dwords, we need to convert it into bytes */
if ((size * 4) > cdm_args->size) {
- pr_err("Error! buf size:%d is not sufficient, expected: %d\n",
+ CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
cdm_args->size, size);
return -EINVAL;
}
@@ -153,7 +149,7 @@
struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
if (!top_priv) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -169,7 +165,7 @@
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
reg_common->global_reset_cmd);
- CDBG("Reset HW exit\n");
+ CAM_DBG(CAM_ISP, "Reset HW exit");
return 0;
}
@@ -183,7 +179,7 @@
int rc = -EINVAL;
if (!device_priv || !reserve_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -228,16 +224,16 @@
struct cam_isp_resource_node *mux_res;
if (!device_priv || !release_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
mux_res = (struct cam_isp_resource_node *)release_args;
- CDBG("%s: Resource in state %d\n", __func__, mux_res->res_state);
+ CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
- pr_err("Error! Resource in Invalid res_state :%d\n",
+ CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d",
mux_res->res_state);
return -EINVAL;
}
@@ -254,7 +250,7 @@
int rc = 0;
if (!device_priv || !start_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -268,7 +264,7 @@
mux_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
rc = 0;
} else {
- pr_err("Invalid res id:%d\n", mux_res->res_id);
+ CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
rc = -EINVAL;
}
@@ -283,7 +279,7 @@
int rc = 0;
if (!device_priv || !stop_args) {
- pr_err("Error! Invalid input arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
return -EINVAL;
}
@@ -295,7 +291,7 @@
mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
rc = mux_res->stop(mux_res);
} else {
- pr_err("Invalid res id:%d\n", mux_res->res_id);
+ CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
rc = -EINVAL;
}
@@ -322,7 +318,7 @@
struct cam_vfe_top_ver2_priv *top_priv;
if (!device_priv || !cmd_args) {
- pr_err("Error! Invalid arguments\n");
+ CAM_ERR(CAM_ISP, "Error! Invalid arguments");
return -EINVAL;
}
top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
@@ -337,7 +333,7 @@
break;
default:
rc = -EINVAL;
- pr_err("Error! Invalid cmd:%d\n", cmd_type);
+ CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
break;
}
@@ -357,17 +353,17 @@
vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
if (!vfe_top) {
- CDBG("Error! Failed to alloc for vfe_top\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top");
rc = -ENOMEM;
- goto err_alloc_top;
+ goto end;
}
top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
GFP_KERNEL);
if (!top_priv) {
- CDBG("Error! Failed to alloc for vfe_top_priv\n");
+ CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv");
rc = -ENOMEM;
- goto err_alloc_priv;
+ goto free_vfe_top;
}
vfe_top->top_priv = top_priv;
@@ -384,16 +380,17 @@
&ver2_hw_info->camif_hw_info,
&top_priv->mux_rsrc[i]);
if (rc)
- goto err_mux_init;
+ goto deinit_resources;
} else {
/* set the RDI resource id */
top_priv->mux_rsrc[i].res_id =
- CAM_ISP_HW_VFE_IN_RDI0 + j;
+ CAM_ISP_HW_VFE_IN_RDI0 + j++;
+
rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
- NULL, &top_priv->mux_rsrc[i]);
+ &ver2_hw_info->rdi_hw_info,
+ &top_priv->mux_rsrc[i]);
if (rc)
goto deinit_resources;
- j++;
}
}
@@ -416,10 +413,71 @@
return rc;
deinit_resources:
-err_mux_init:
+ for (--i; i >= 0; i--) {
+ if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+ if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "Camif Deinit failed");
+ } else {
+ if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+ CAM_ERR(CAM_ISP, "RDI Deinit failed");
+ }
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ }
+
kfree(vfe_top->top_priv);
-err_alloc_priv:
+free_vfe_top:
kfree(vfe_top);
-err_alloc_top:
+end:
return rc;
}
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top_ptr)
+{
+ int i, rc = 0;
+ struct cam_vfe_top_ver2_priv *top_priv = NULL;
+ struct cam_vfe_top *vfe_top;
+
+ if (!vfe_top_ptr) {
+ CAM_ERR(CAM_ISP, "Error! Invalid input");
+ return -EINVAL;
+ }
+
+ vfe_top = *vfe_top_ptr;
+ if (!vfe_top) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top NULL");
+ return -ENODEV;
+ }
+
+ top_priv = vfe_top->top_priv;
+ if (!top_priv) {
+ CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL");
+ rc = -ENODEV;
+ goto free_vfe_top;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ top_priv->mux_rsrc[i].res_state =
+ CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+ if (top_priv->mux_rsrc[i].res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF) {
+ rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+ rc);
+ } else {
+ rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+ if (rc)
+ CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
+ }
+ }
+
+ kfree(vfe_top->top_priv);
+
+free_vfe_top:
+ kfree(vfe_top);
+ *vfe_top_ptr = NULL;
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index 1038721..bafd7f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -52,12 +52,15 @@
struct cam_vfe_top_ver2_hw_info {
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
struct cam_vfe_camif_ver2_hw_info camif_hw_info;
+ struct cam_vfe_rdi_ver2_hw_info rdi_hw_info;
uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
};
int cam_vfe_top_ver2_init(struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
void *top_hw_info,
- struct cam_vfe_top **vfe_top);
+ struct cam_vfe_top **vfe_top);
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top **vfe_top);
#endif /* _CAM_VFE_TOP_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index 44c046d..dbb211f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -50,4 +50,7 @@
void *top_hw_info,
struct cam_vfe_top **vfe_top);
+int cam_vfe_top_deinit(uint32_t top_version,
+ struct cam_vfe_top **vfe_top);
+
#endif /* _CAM_VFE_TOP_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
new file mode 100644
index 0000000..4d272d3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_dev.o cam_jpeg_context.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
new file mode 100644
index 0000000..a299179
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_jpeg_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+
+static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc);
+ else
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unable to release device %d", rc);
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ return cam_context_prepare_dev_to_hw(ctx, cmd);
+}
+
+static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx,
+ uint32_t evt_id, void *done)
+{
+ return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = { },
+ .crm_ops = { },
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = { },
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
+ .config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+ },
+ .crm_ops = { },
+ .irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+ },
+};
+
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_hw_mgr_intf *hw_intf)
+{
+ int rc;
+ int i;
+
+ if (!ctx || !ctx_base) {
+ CAM_ERR(CAM_JPEG, "Invalid Context");
+ rc = -EFAULT;
+ goto err;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->base = ctx_base;
+
+ for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+ ctx->req_base[i].req_priv = ctx;
+
+ rc = cam_context_init(ctx_base, NULL, hw_intf, ctx->req_base,
+ CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
+ goto err;
+ }
+
+ ctx_base->state_machine = cam_jpeg_ctx_state_machine;
+ ctx_base->ctx_priv = ctx;
+
+err:
+ return rc;
+}
+
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx)
+{
+ if (!ctx || !ctx->base) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx);
+ return -EINVAL;
+ }
+
+ cam_context_deinit(ctx->base);
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
new file mode 100644
index 0000000..90ac5cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_CONTEXT_H_
+#define _CAM_JPEG_CONTEXT_H_
+
+#include <uapi/media/cam_jpeg.h>
+
+#include "cam_context.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+
+#define CAM_JPEG_HW_EVENT_MAX 20
+
+/**
+ * struct cam_jpeg_context - Jpeg context
+ * @base: Base jpeg cam context object
+ * @req_base: Common request structure
+ */
+struct cam_jpeg_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam jpeg context irq handling function type */
+typedef int (*cam_jpeg_hw_event_cb_func)(
+ struct cam_jpeg_context *ctx_jpeg,
+ void *evt_data);
+
+/**
+ * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_jpeg_ctx_irq_ops {
+ cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX];
+};
+
+/**
+ * cam_jpeg_context_init()
+ *
+ * @brief: Initialization function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: JPEG hw manager interface
+ *
+ */
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+ struct cam_context *ctx_base,
+ struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_jpeg_context_deinit()
+ *
+ * @brief: Deinitialize function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be deinitialized
+ *
+ */
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx);
+
+#endif /* __CAM_JPEG_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
new file mode 100644
index 0000000..fb68ddb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -0,0 +1,136 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_dev.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_DEV_NAME "cam-jpeg"
+
+static struct cam_jpeg_dev g_jpeg_dev;
+
+static const struct of_device_id cam_jpeg_dt_match[] = {
+ {
+ .compatible = "qcom,cam-jpeg"
+ },
+ { }
+};
+
+static int cam_jpeg_dev_remove(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d",
+ i, rc);
+ }
+
+ rc = cam_subdev_remove(&g_jpeg_dev.sd);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Unregister failed %d", rc);
+
+ return rc;
+}
+
+static int cam_jpeg_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+
+ rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
+ CAM_JPEG_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc);
+ goto err;
+ }
+ node = (struct cam_node *)g_jpeg_dev.sd.token;
+
+ rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
+ (uint64_t *)&hw_mgr_intf);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i],
+ &g_jpeg_dev.ctx[i],
+ &node->hw_mgr_intf);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d",
+ i, rc);
+ goto ctx_init_fail;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_CTX_MAX,
+ CAM_JPEG_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc);
+ goto ctx_init_fail;
+ }
+
+ mutex_init(&g_jpeg_dev.jpeg_mutex);
+
+ CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
+
+ return rc;
+
+ctx_init_fail:
+ for (--i; i >= 0; i--)
+ if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]))
+ CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc);
+unregister:
+ if (cam_subdev_remove(&g_jpeg_dev.sd))
+ CAM_ERR(CAM_JPEG, "remove fail %d", rc);
+err:
+ return rc;
+}
+
+static struct platform_driver jpeg_driver = {
+ .probe = cam_jpeg_dev_probe,
+ .remove = cam_jpeg_dev_remove,
+ .driver = {
+ .name = "cam_jpeg",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_dt_match,
+ },
+};
+
+static int __init cam_jpeg_dev_init_module(void)
+{
+ return platform_driver_register(&jpeg_driver);
+}
+
+static void __exit cam_jpeg_dev_exit_module(void)
+{
+ platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(cam_jpeg_dev_init_module);
+module_exit(cam_jpeg_dev_exit_module);
+MODULE_DESCRIPTION("MSM JPEG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
new file mode 100644
index 0000000..deab2d5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DEV_H_
+#define _CAM_JPEG_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_jpeg_context.h"
+
+/**
+ * struct cam_jpeg_dev - Camera JPEG V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to jpeg subdevice
+ * @ctx: JPEG base context storage
+ * @ctx_jpeg: JPEG private context storage
+ * @jpeg_mutex: Jpeg dev mutex
+ */
+struct cam_jpeg_dev {
+ struct cam_subdev sd;
+ struct cam_node *node;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
+ struct mutex jpeg_mutex;
+};
+#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
new file mode 100644
index 0000000..08c9528
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
new file mode 100644
index 0000000..b06b5c4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -0,0 +1,1178 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_ENTRIES_MAX 20
+
+static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr;
+
+static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status,
+ int32_t result_size, void *data);
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data);
+
+static int cam_jpeg_mgr_process_irq(void *priv, void *data)
+{
+ int rc = 0;
+ struct cam_jpeg_process_irq_work_data_t *task_data;
+ struct cam_jpeg_hw_mgr *hw_mgr;
+ int32_t i;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_hw_done_event_data buf_data;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ uint32_t dev_type = 0;
+ uint64_t kaddr;
+ uint32_t *cmd_buf_kaddr;
+ size_t cmd_buf_len;
+ struct cam_jpeg_config_inout_param_info *p_params;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_frame_work_data_t *wq_task_data;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_JPEG, "Invalid data");
+ return -EINVAL;
+ }
+
+ task_data = data;
+ hw_mgr = &g_jpeg_hw_mgr;
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)task_data->data;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ return -EINVAL;
+ }
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+ irq_cb.data = NULL;
+ irq_cb.b_set_cb = false;
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ CAM_ERR(CAM_JPEG, "process_cmd null ");
+ return -EINVAL;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ hw_mgr->device_in_use[dev_type][0] = false;
+ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+ hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+ task = cam_req_mgr_workq_get_task(
+ g_jpeg_hw_mgr.work_process_frame);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ return -EINVAL;
+ }
+
+ wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)
+ task->payload;
+ if (!task_data) {
+ CAM_ERR(CAM_JPEG, "task_data is NULL");
+ return -EINVAL;
+ }
+ wq_task_data->data = (void *)(uint64_t)dev_type;
+ wq_task_data->request_id = 0;
+ wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_cmd;
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "could not enque task %d", rc);
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf(
+ p_cfg_req->hw_cfg_args.hw_update_entries[1].handle,
+ (uint64_t *)&kaddr, &cmd_buf_len);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
+ hw_mgr->iommu_hdl, rc);
+ return rc;
+ }
+
+ cmd_buf_kaddr = (uint32_t *)kaddr;
+
+ cmd_buf_kaddr =
+ (cmd_buf_kaddr +
+ (p_cfg_req->hw_cfg_args.hw_update_entries[1].offset/4));
+
+ p_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
+
+ p_params->output_size = task_data->result_size;
+ CAM_DBG(CAM_JPEG, "Encoded Size %d", task_data->result_size);
+
+ buf_data.num_handles = p_cfg_req->
+ hw_cfg_args.num_out_map_entries;
+ for (i = 0; i < buf_data.num_handles; i++) {
+ buf_data.resource_handle[i] =
+ p_cfg_req->hw_cfg_args.
+ out_map_entries[i].resource_handle;
+ }
+ buf_data.request_id =
+ (uint64_t)p_cfg_req->hw_cfg_args.priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+ list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+
+ return rc;
+}
+
+static int cam_jpeg_hw_mgr_cb(
+ uint32_t irq_status, int32_t result_size, void *data)
+{
+ int32_t rc;
+ unsigned long flags;
+ struct cam_jpeg_hw_mgr *hw_mgr = &g_jpeg_hw_mgr;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_irq_work_data_t *task_data;
+
+ spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+ task = cam_req_mgr_workq_get_task(
+ g_jpeg_hw_mgr.work_process_irq_cb);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+ return -ENOMEM;
+ }
+
+ task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload;
+ task_data->data = data;
+ task_data->irq_status = irq_status;
+ task_data->result_size = result_size;
+ task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_irq;
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr)
+{
+ int i = 0;
+ int num_ctx = CAM_JPEG_CTX_MAX;
+
+ for (i = 0; i < num_ctx; i++) {
+ mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+ if (hw_mgr->ctx_data[i].in_use == false) {
+ hw_mgr->ctx_data[i].in_use = true;
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ break;
+ }
+ mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+ }
+
+ return i;
+}
+
+
+static int cam_jpeg_mgr_release_ctx(
+ struct cam_jpeg_hw_mgr *hw_mgr, int ctx_id)
+{
+ if (ctx_id >= CAM_JPEG_CTX_MAX) {
+ CAM_ERR(CAM_JPEG, "ctx_id is wrong: %d", ctx_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ if (!hw_mgr->ctx_data[ctx_id].in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is already in use: %d", ctx_id);
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+ return -EINVAL;
+ }
+
+ hw_mgr->ctx_data[ctx_id].in_use = 0;
+ mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+ return 0;
+}
+
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
+{
+ int rc;
+ int i = 0;
+ struct cam_jpeg_hw_mgr *hw_mgr = priv;
+ struct cam_hw_update_entry *cmd;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_hw_config_args *config_args = NULL;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uint64_t request_id = 0;
+ struct cam_jpeg_process_frame_work_data_t *task_data =
+ (struct cam_jpeg_process_frame_work_data_t *)data;
+ uint32_t dev_type;
+ struct cam_jpeg_set_irq_cb irq_cb;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ uint32_t size = 0;
+ uint32_t mem_cam_base = 0;
+ struct cam_hw_done_event_data buf_data;
+
+ CAM_DBG(CAM_JPEG, "in cam_jpeg_mgr_process_cmd");
+ if (!hw_mgr || !task_data) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+ hw_mgr, task_data);
+ return -EINVAL;
+ }
+
+ if (list_empty(&hw_mgr->hw_config_req_list)) {
+ CAM_DBG(CAM_JPEG, "no available request");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list,
+ struct cam_jpeg_hw_cfg_req, list);
+ if (!p_cfg_req) {
+ CAM_ERR(CAM_JPEG, "no request");
+ rc = -EFAULT;
+ goto end;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
+ hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
+ hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
+ list_del_init(&p_cfg_req->list);
+ } else {
+ CAM_ERR(CAM_JPEG, "NOT dequeing, just return");
+ rc = -EFAULT;
+ goto end;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
+ request_id = task_data->request_id;
+ if (request_id != (uint64_t)config_args->priv) {
+ CAM_WARN(CAM_JPEG, "not a recent req %d %d",
+ request_id, (uint64_t)config_args->priv);
+ }
+
+ if (!config_args->num_hw_update_entries) {
+ CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ if (dev_type != p_cfg_req->dev_type)
+ CAM_WARN(CAM_JPEG, "dev types not same something wrong");
+
+ irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+ irq_cb.data = (void *)ctx_data;
+ irq_cb.b_set_cb = true;
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ CAM_ERR(CAM_JPEG, "op process_cmd null ");
+ return -EINVAL;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc);
+ return -EINVAL;
+ }
+
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) {
+ CAM_ERR(CAM_JPEG, "op reset null ");
+ return -EINVAL;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.reset(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc);
+ return -EINVAL;
+ }
+
+ mem_cam_base = (uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->
+ mem_cam_base;
+ size = hw_mgr->cdm_info[dev_type][0].cdm_ops->
+ cdm_required_size_changebase();
+ hw_mgr->cdm_info[dev_type][0].cdm_ops->
+ cdm_write_changebase(ctx_data->cmd_chbase_buf_addr,
+ (uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base);
+ ctx_data->cdm_cmd_chbase->cmd_arrary_count = 1;
+ ctx_data->cdm_cmd_chbase->type = CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA;
+ ctx_data->cdm_cmd_chbase->flag = false;
+ ctx_data->cdm_cmd_chbase->userdata = NULL;
+ ctx_data->cdm_cmd_chbase->cookie = 0;
+ ctx_data->cdm_cmd_chbase->cmd[0].bl_addr.kernel_iova =
+ ctx_data->cmd_chbase_buf_addr;
+ ctx_data->cdm_cmd_chbase->cmd[0].offset = 0;
+ ctx_data->cdm_cmd_chbase->cmd[0].len = size;
+ rc = cam_cdm_submit_bls(hw_mgr->cdm_info[dev_type][0].cdm_handle,
+ ctx_data->cdm_cmd_chbase);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "failed cdm cmd %d", rc);
+
+ CAM_DBG(CAM_JPEG, "cfg e %pK num %d",
+ config_args->hw_update_entries,
+ config_args->num_hw_update_entries);
+
+ if (config_args->num_hw_update_entries > 0) {
+ cdm_cmd = ctx_data->cdm_cmd;
+ cdm_cmd->cmd_arrary_count =
+ config_args->num_hw_update_entries - 1;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+
+ for (i = 0; i <= cdm_cmd->cmd_arrary_count; i++) {
+ cmd = (config_args->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ rc = cam_cdm_submit_bls(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle,
+ cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+ rc);
+ goto end_callcb;
+ }
+
+ if (!hw_mgr->devices[dev_type][0]->hw_ops.start) {
+ CAM_ERR(CAM_JPEG, "op start null ");
+ rc = -EINVAL;
+ goto end_callcb;
+ }
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.start(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+ rc);
+ goto end_callcb;
+ }
+ } else {
+ CAM_ERR(CAM_JPEG, "No commands to config");
+ }
+
+ return rc;
+
+end_callcb:
+ if (p_cfg_req) {
+ buf_data.num_handles = p_cfg_req->
+ hw_cfg_args.num_out_map_entries;
+ for (i = 0; i < buf_data.num_handles; i++) {
+ buf_data.resource_handle[i] =
+ p_cfg_req->hw_cfg_args.
+ out_map_entries[i].resource_handle;
+ }
+ buf_data.request_id =
+ (uint64_t)p_cfg_req->hw_cfg_args.priv;
+ ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+ }
+end:
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+ int rc;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *config_args = config_hw_args;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uint64_t request_id = 0;
+ struct cam_hw_update_entry *hw_update_entries;
+ struct crm_workq_task *task;
+ struct cam_jpeg_process_frame_work_data_t *task_data;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+
+ if (!hw_mgr || !config_args) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+ hw_mgr, config_args);
+ return -EINVAL;
+ }
+
+ if (!config_args->num_hw_update_entries) {
+ CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ if (list_empty(&hw_mgr->free_req_list)) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_JPEG, "list empty");
+ return -ENOMEM;
+ }
+
+ p_cfg_req = list_first_entry(&hw_mgr->free_req_list,
+ struct cam_jpeg_hw_cfg_req, list);
+ list_del_init(&p_cfg_req->list);
+
+ /* Update Currently Processing Config Request */
+ p_cfg_req->hw_cfg_args = *config_args;
+ p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ request_id = (uint64_t)config_args->priv;
+ hw_update_entries = config_args->hw_update_entries;
+ CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %d %pK",
+ ctx_data, request_id, config_args->priv);
+ task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
+ if (!task) {
+ CAM_ERR(CAM_JPEG, "no empty task");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ rc = -ENOMEM;
+ goto err_after_dq_free_list;
+ }
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ task_data = (struct cam_jpeg_process_frame_work_data_t *)
+ task->payload;
+ if (!task_data) {
+ CAM_ERR(CAM_JPEG, "task_data is NULL");
+ rc = -EINVAL;
+ goto err_after_dq_free_list;
+ }
+ CAM_DBG(CAM_JPEG, "cfge %pK num %d",
+ p_cfg_req->hw_cfg_args.hw_update_entries,
+ p_cfg_req->hw_cfg_args.num_hw_update_entries);
+
+ list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+
+ task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
+ task_data->request_id = request_id;
+ task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+ task->process_cb = cam_jpeg_mgr_process_cmd;
+
+ rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+ CRM_TASK_PRIORITY_0);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc);
+ goto err_after_get_task;
+ }
+
+ return rc;
+
+err_after_get_task:
+ list_del_init(&p_cfg_req->list);
+err_after_dq_free_list:
+ list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+ return rc;
+}
+
+
+static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
+ void *prepare_hw_update_args)
+{
+ int rc, i, j, k;
+ struct cam_hw_prepare_update_args *prepare_args =
+ prepare_hw_update_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_packet *packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+ if (!prepare_args || !hw_mgr) {
+ CAM_ERR(CAM_JPEG, "Invalid args %pK %pK",
+ prepare_args, hw_mgr);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ packet = prepare_args->packet;
+ if (!packet) {
+ CAM_ERR(CAM_JPEG, "received packet is NULL");
+ return -EINVAL;
+ }
+
+ if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) &&
+ ((packet->header.op_code
+ & 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) {
+ CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d",
+ packet->header.op_code & 0xff);
+ return -EINVAL;
+ }
+ if ((packet->num_cmd_buf > 2) || !packet->num_patches ||
+ !packet->num_io_configs) {
+ CAM_ERR(CAM_JPEG, "wrong number of cmd/patch info: %u %u",
+ packet->num_cmd_buf,
+ packet->num_patches);
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)
+ ((uint32_t *)&packet->payload +
+ (packet->cmd_buf_offset / 4));
+ CAM_DBG(CAM_JPEG, "packet = %pK cmd_desc = %pK size = %lu",
+ (void *)packet, (void *)cmd_desc,
+ sizeof(struct cam_cmd_buf_desc));
+
+ rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
+ return rc;
+ }
+
+ io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+ CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
+ (void *)packet, (void *)io_cfg_ptr,
+ sizeof(struct cam_buf_io_cfg));
+
+ prepare_args->num_out_map_entries = 0;
+
+ for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+ if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+ prepare_args->in_map_entries[j].resource_handle =
+ io_cfg_ptr[i].resource_type;
+ prepare_args->in_map_entries[j++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_in_map_entries++;
+ } else {
+ prepare_args->in_map_entries[k].resource_handle =
+ io_cfg_ptr[i].resource_type;
+ prepare_args->out_map_entries[k++].sync_id =
+ io_cfg_ptr[i].fence;
+ prepare_args->num_out_map_entries++;
+ }
+ CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u",
+ i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
+ }
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ prepare_args->hw_update_entries[i].len =
+ (uint32_t)cmd_desc[i].length;
+ prepare_args->hw_update_entries[i].handle =
+ (uint32_t)cmd_desc[i].mem_handle;
+ prepare_args->hw_update_entries[i].offset =
+ (uint32_t)cmd_desc[i].offset;
+ prepare_args->num_hw_update_entries++;
+ }
+
+ prepare_args->priv = (void *)packet->header.request_id;
+
+ CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
+ prepare_args->in_map_entries[0].sync_id);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+ int rc;
+ int ctx_id = 0;
+ struct cam_hw_release_args *release_hw = release_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ uint32_t dev_type;
+
+ if (!release_hw || !hw_mgr) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+ if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) {
+ if (cam_cdm_stream_off(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+ CAM_ERR(CAM_JPEG, "CDM stream off failed %d",
+ hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ }
+ /* release cdm handle */
+ cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ }
+
+ if (g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit) {
+ rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit(
+ g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+ if (rc) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_JPEG, "handle %llu", ctx_data);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+ int rc;
+ int32_t ctx_id = 0;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_hw_acquire_args *args = acquire_hw_args;
+ struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+ struct cam_cdm_acquire_data cdm_acquire;
+ uint32_t dev_type;
+ uint32_t size = 0;
+
+ if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv,
+ acquire_hw_args);
+ return -EINVAL;
+ }
+
+ if (args->num_acq > 1) {
+ CAM_ERR(CAM_JPEG,
+ "number of resources are wrong: %u",
+ args->num_acq);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&jpeg_dev_acquire_info,
+ (void __user *)args->acquire_info,
+ sizeof(jpeg_dev_acquire_info))) {
+ CAM_ERR(CAM_JPEG, "copy failed");
+ return -EFAULT;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr);
+ if (ctx_id >= CAM_JPEG_CTX_MAX) {
+ CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EFAULT;
+ }
+
+ ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+ ctx_data->cdm_cmd =
+ kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+ ((CAM_JPEG_HW_ENTRIES_MAX - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!ctx_data->cdm_cmd) {
+ rc = -ENOMEM;
+ goto acq_cdm_hdl_failed;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+ if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
+
+ if (dev_type == CAM_JPEG_RES_TYPE_ENC) {
+ memcpy(cdm_acquire.identifier,
+ "jpegenc", sizeof("jpegenc"));
+ } else {
+ memcpy(cdm_acquire.identifier,
+ "jpegdma", sizeof("jpegdma"));
+ }
+ cdm_acquire.cell_index = 0;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = ctx_data;
+ if (hw_mgr->cdm_reg_map[dev_type][0]) {
+ cdm_acquire.base_array[0] =
+ hw_mgr->cdm_reg_map[dev_type][0];
+ }
+ cdm_acquire.base_array_cnt = 1;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.cam_cdm_callback = NULL;
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d",
+ rc);
+ rc = -EFAULT;
+ goto acq_cdm_hdl_failed;
+ }
+ hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle;
+ hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops;
+ hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+ } else {
+ hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+ }
+
+ ctx_data->cdm_cmd_chbase =
+ kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+ (2 * sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!ctx_data->cdm_cmd_chbase) {
+ rc = -ENOMEM;
+ goto start_cdm_hdl_failed;
+ }
+ size = hw_mgr->cdm_info[dev_type][0].
+ cdm_ops->cdm_required_size_changebase();
+ ctx_data->cmd_chbase_buf_addr = kzalloc(size*4, GFP_KERNEL);
+ if (!ctx_data->cdm_cmd_chbase) {
+ rc = -ENOMEM;
+ goto start_cdm_hdl_failed;
+ }
+
+ if (!g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init) {
+ CAM_ERR(CAM_JPEG, "hw op init null ");
+ rc = -EINVAL;
+ goto start_cdm_hdl_failed;
+ }
+ rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init(
+ g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv,
+ ctx_data,
+ sizeof(ctx_data));
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+ goto start_cdm_hdl_failed;
+ }
+
+ if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+ if (cam_cdm_stream_on(
+ hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+ CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!",
+ hw_mgr->cdm_info[dev_type][0].cdm_handle);
+ rc = -EFAULT;
+ goto start_cdm_hdl_failed;
+ }
+
+ mutex_lock(&ctx_data->ctx_mutex);
+ ctx_data->context_priv = args->context_data;
+
+ args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]);
+
+ mutex_unlock(&ctx_data->ctx_mutex);
+
+ hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+
+ if (copy_to_user((void __user *)args->acquire_info,
+ &jpeg_dev_acquire_info,
+ sizeof(jpeg_dev_acquire_info))) {
+ rc = -EFAULT;
+ goto copy_to_user_failed;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data);
+
+ return rc;
+
+copy_to_user_failed:
+ cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+start_cdm_hdl_failed:
+ cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+acq_cdm_hdl_failed:
+ kfree(ctx_data->cdm_cmd);
+ cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+ int rc;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+ if (!hw_mgr_priv || !hw_caps_args) {
+ CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK",
+ hw_mgr_priv, hw_caps_args);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+ if (copy_to_user((void __user *)query_cap->caps_handle,
+ &g_jpeg_hw_mgr.jpeg_caps,
+ sizeof(struct cam_jpeg_query_cap_cmd))) {
+ CAM_ERR(CAM_JPEG, "copy_to_user failed");
+ rc = -EFAULT;
+ goto copy_error;
+ }
+ CAM_DBG(CAM_JPEG, "cam_jpeg_mgr_get_hw_caps success");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return 0;
+
+copy_error:
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
+static int cam_jpeg_setup_workqs(void)
+{
+ int rc, i;
+
+ rc = cam_req_mgr_workq_create(
+ "jpeg_command_queue",
+ CAM_JPEG_WORKQ_NUM_TASK,
+ &g_jpeg_hw_mgr.work_process_frame,
+ CRM_WORKQ_USAGE_NON_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+ goto work_process_frame_failed;
+ }
+
+ rc = cam_req_mgr_workq_create(
+ "jpeg_message_queue",
+ CAM_JPEG_WORKQ_NUM_TASK,
+ &g_jpeg_hw_mgr.work_process_irq_cb,
+ CRM_WORKQ_USAGE_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+ goto work_process_irq_cb_failed;
+ }
+
+ g_jpeg_hw_mgr.process_frame_work_data =
+ (struct cam_jpeg_process_frame_work_data_t *)
+ kzalloc(sizeof(struct cam_jpeg_process_frame_work_data_t) *
+ CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.process_frame_work_data) {
+ rc = -ENOMEM;
+ goto work_process_frame_data_failed;
+ }
+
+ g_jpeg_hw_mgr.process_irq_cb_work_data =
+ (struct cam_jpeg_process_irq_work_data_t *)
+ kzalloc(sizeof(struct cam_jpeg_process_irq_work_data_t) *
+ CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.process_irq_cb_work_data) {
+ rc = -ENOMEM;
+ goto work_process_irq_cb_data_failed;
+ }
+
+ for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+ g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload =
+ &g_jpeg_hw_mgr.process_irq_cb_work_data[i];
+
+ for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+ g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload =
+ &g_jpeg_hw_mgr.process_frame_work_data[i];
+
+ INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list);
+ INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list);
+ for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) {
+ INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list));
+ list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list),
+ &(g_jpeg_hw_mgr.free_req_list));
+ }
+
+ return rc;
+
+work_process_irq_cb_data_failed:
+ kfree(g_jpeg_hw_mgr.process_frame_work_data);
+work_process_frame_data_failed:
+ cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb);
+work_process_irq_cb_failed:
+ cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame);
+work_process_frame_failed:
+
+ return rc;
+}
+
+static int cam_jpeg_init_devices(struct device_node *of_node,
+ uint32_t *p_num_enc_dev,
+ uint32_t *p_num_dma_dev)
+{
+ int count, i, rc;
+ uint32_t num_dev;
+ uint32_t num_dma_dev;
+ const char *name = NULL;
+ struct device_node *child_node = NULL;
+ struct platform_device *child_pdev = NULL;
+ struct cam_hw_intf *child_dev_intf = NULL;
+ struct cam_hw_info *enc_hw = NULL;
+ struct cam_hw_info *dma_hw = NULL;
+ struct cam_hw_soc_info *enc_soc_info = NULL;
+ struct cam_hw_soc_info *dma_soc_info = NULL;
+
+ if (!p_num_enc_dev || !p_num_dma_dev) {
+ rc = -EINVAL;
+ goto num_dev_failed;
+ }
+ count = of_property_count_strings(of_node, "compat-hw-name");
+ if (!count) {
+ CAM_ERR(CAM_JPEG,
+ "no compat hw found in dev tree, count = %d",
+ count);
+ rc = -EINVAL;
+ goto num_dev_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc);
+ goto num_enc_failed;
+ }
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = kzalloc(
+ sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed");
+ goto num_enc_failed;
+ }
+
+ rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc);
+ goto num_dma_failed;
+ }
+
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = kzalloc(
+ sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL);
+ if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) {
+ rc = -ENOMEM;
+ goto num_dma_failed;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "compat-hw-name",
+ i, &name);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "getting dev object name failed");
+ goto compat_hw_name_failed;
+ }
+
+ child_node = of_find_node_by_name(NULL, name);
+ if (!child_node) {
+ CAM_ERR(CAM_JPEG,
+ "error! Cannot find node in dtsi %s", name);
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+
+ child_pdev = of_find_device_by_node(child_node);
+ if (!child_pdev) {
+ CAM_ERR(CAM_JPEG, "failed to find device on bus %s",
+ child_node->name);
+ rc = -ENODEV;
+ of_node_put(child_node);
+ goto compat_hw_name_failed;
+ }
+
+ child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+ child_pdev);
+ if (!child_dev_intf) {
+ CAM_ERR(CAM_JPEG, "no child device");
+ of_node_put(child_node);
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+ CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d",
+ child_dev_intf,
+ child_dev_intf->hw_type,
+ child_dev_intf->hw_idx);
+
+ if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC &&
+ child_dev_intf->hw_idx >= num_dev) ||
+ (child_dev_intf->hw_type == CAM_JPEG_DEV_DMA &&
+ child_dev_intf->hw_idx >= num_dma_dev)) {
+ CAM_ERR(CAM_JPEG, "index out of range");
+ rc = -ENODEV;
+ goto compat_hw_name_failed;
+ }
+ g_jpeg_hw_mgr.devices[child_dev_intf->hw_type]
+ [child_dev_intf->hw_idx] = child_dev_intf;
+
+ of_node_put(child_node);
+ }
+
+ enc_hw = (struct cam_hw_info *)
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv;
+ enc_soc_info = &enc_hw->soc_info;
+ g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] =
+ &enc_soc_info->reg_map[0];
+ dma_hw = (struct cam_hw_info *)
+ g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv;
+ dma_soc_info = &dma_hw->soc_info;
+ g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] =
+ &dma_soc_info->reg_map[0];
+
+ *p_num_enc_dev = num_dev;
+ *p_num_dma_dev = num_dma_dev;
+
+ return rc;
+
+compat_hw_name_failed:
+ kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]);
+num_dma_failed:
+ kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]);
+num_enc_failed:
+num_dev_failed:
+
+ return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+{
+ int i, rc;
+ uint32_t num_dev;
+ uint32_t num_dma_dev;
+ struct cam_hw_mgr_intf *hw_mgr_intf;
+ struct cam_iommu_handle cdm_handles;
+
+ hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+ if (!of_node || !hw_mgr_intf) {
+ CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK",
+ of_node, hw_mgr_intf);
+ return -EINVAL;
+ }
+
+ memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf));
+ hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps;
+ hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw;
+ hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
+ hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
+ hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+
+ mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
+
+ for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+ mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+ rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc);
+ goto smmu_get_failed;
+ }
+
+ rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc);
+ goto smmu_get_failed;
+ }
+
+ CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl);
+ rc = cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "jpeg attach failed: %d", rc);
+ goto jpeg_attach_failed;
+ }
+
+ rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail %d", rc);
+ g_jpeg_hw_mgr.cdm_iommu_hdl = -1;
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1;
+ goto cdm_iommu_failed;
+ }
+ g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure;
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure;
+
+ g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure =
+ g_jpeg_hw_mgr.iommu_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure =
+ g_jpeg_hw_mgr.iommu_sec_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure =
+ g_jpeg_hw_mgr.cdm_iommu_hdl;
+ g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure =
+ g_jpeg_hw_mgr.cdm_iommu_hdl_secure;
+ g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev;
+ g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr = 0;
+ g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0;
+
+ rc = cam_jpeg_setup_workqs();
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "setup work qs failed %d", rc);
+ goto cdm_iommu_failed;
+ }
+
+ return rc;
+
+cdm_iommu_failed:
+ cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl);
+jpeg_attach_failed:
+ g_jpeg_hw_mgr.iommu_hdl = 0;
+smmu_get_failed:
+ mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex);
+ for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+ mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
new file mode 100644
index 0000000..9e3418d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_H
+#define CAM_JPEG_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_jpeg_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_JPEG_WORKQ_NUM_TASK 30
+#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1
+#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
+#define CAM_JPEG_HW_CFG_Q_MAX 50
+
+/**
+ * struct cam_jpeg_process_frame_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct cam_jpeg_process_frame_work_data_t {
+ uint32_t type;
+ void *data;
+ uint64_t request_id;
+};
+
+/**
+ * struct cam_jpeg_process_irq_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to message data
+ * @result_size: Result size of enc/dma
+ * @irq_status: IRQ status
+ */
+struct cam_jpeg_process_irq_work_data_t {
+ uint32_t type;
+ void *data;
+ int32_t result_size;
+ uint32_t irq_status;
+};
+
+/**
+ * struct cam_jpeg_hw_cdm_info_t
+ *
+ * @ref_cnt: Ref count of how many times device type is acquired
+ * @cdm_handle: Cdm handle
+ * @cdm_ops: Cdm ops struct
+ */
+struct cam_jpeg_hw_cdm_info_t {
+ int ref_cnt;
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_jpeg_hw_cfg_req_t
+ *
+ * @list_head: List head
+ * @hw_cfg_args: Hw config args
+ * @dev_type: Dev type for cfg request
+ */
+struct cam_jpeg_hw_cfg_req {
+ struct list_head list;
+ struct cam_hw_config_args hw_cfg_args;
+ uint32_t dev_type;
+};
+
+/**
+ * struct cam_jpeg_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ * acquire.
+ * @ctx_mutex: Mutex for context
+ * @jpeg_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @cdm_cmd: Cdm cmd submitted for that context.
+ * @cdm_cmd_chbase: Change base cdm command from context
+ * @cmd_chbase_buf_addr : Change base cmd buf address
+ */
+struct cam_jpeg_hw_ctx_data {
+ void *context_priv;
+ struct mutex ctx_mutex;
+ struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+ cam_hw_event_cb_func ctxt_event_cb;
+ bool in_use;
+ struct completion wait_complete;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_cdm_bl_request *cdm_cmd_chbase;
+ uint32_t *cmd_chbase_buf_addr;
+};
+
+/**
+ * struct cam_jpeg_hw_mgr
+ * @hw_mgr_mutex: Mutex for JPEG hardware manager
+ * @hw_mgr_lock: Spinlock for JPEG hardware manager
+ * @ctx_data: Context data
+ * @jpeg_caps: JPEG capabilities
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @work_process_frame: Work queue for hw config requests
+ * @work_process_irq_cb: Work queue for processing IRQs.
+ * @process_frame_work_data: Work data pool for hw config
+ * requests
+ * @process_irq_cb_work_data: Work data pool for irq requests
+ * @cdm_iommu_hdl: Iommu handle received from cdm
+ * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm
+ * @devices: Core hw Devices of JPEG hardware manager
+ * @cdm_info: Cdm info for each core device.
+ * @cdm_reg_map: Regmap of each device for cdm.
+ * @device_in_use: Flag device being used for an active request
+ * @dev_hw_cfg_args: Current cfg request per core dev
+ * @hw_config_req_list: Pending hw update requests list
+ * @free_req_list: Free nodes for above list
+ * @req_list: Nodes of hw update list
+ */
+struct cam_jpeg_hw_mgr {
+ struct mutex hw_mgr_mutex;
+ spinlock_t hw_mgr_lock;
+ struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX];
+ struct cam_jpeg_query_cap_cmd jpeg_caps;
+ int32_t iommu_hdl;
+ int32_t iommu_sec_hdl;
+ struct cam_req_mgr_core_workq *work_process_frame;
+ struct cam_req_mgr_core_workq *work_process_irq_cb;
+ struct cam_jpeg_process_frame_work_data_t *process_frame_work_data;
+ struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data;
+ int cdm_iommu_hdl;
+ int cdm_iommu_hdl_secure;
+
+ struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX];
+ struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+ struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX]
+ [CAM_JPEG_NUM_DEV_PER_RES_MAX];
+
+ struct list_head hw_config_req_list;
+ struct list_head free_req_list;
+ struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX];
+};
+
+#endif /* CAM_JPEG_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
new file mode 100644
index 0000000..71b21b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_HW_INTF_H
+#define CAM_JPEG_DMA_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_dma_cmd_type {
+ CAM_JPEG_DMA_CMD_CDM_CFG,
+ CAM_JPEG_DMA_CMD_SET_IRQ_CB,
+ CAM_JPEG_DMA_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_DMA_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
new file mode 100644
index 0000000..f0b4e00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_HW_INTF_H
+#define CAM_JPEG_ENC_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_enc_cmd_type {
+ CAM_JPEG_ENC_CMD_CDM_CFG,
+ CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+ CAM_JPEG_ENC_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_ENC_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
new file mode 100644
index 0000000..3204388
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_INTF_H
+#define CAM_JPEG_HW_INTF_H
+
+#define CAM_JPEG_CTX_MAX 8
+#define CAM_JPEG_DEV_PER_TYPE_MAX 1
+
+#define CAM_JPEG_CMD_BUF_MAX_SIZE 128
+#define CAM_JPEG_MSG_BUF_MAX_SIZE CAM_JPEG_CMD_BUF_MAX_SIZE
+
+enum cam_jpeg_hw_type {
+ CAM_JPEG_DEV_ENC,
+ CAM_JPEG_DEV_DMA,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
new file mode 100644
index 0000000..d5c8c9d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_INTF_H
+#define CAM_JPEG_HW_MGR_INTF_H
+
+#include <uapi/media/cam_jpeg.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+
+#include "cam_cpas_api.h"
+
+#define JPEG_TURBO_VOTE 640000000
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node,
+ uint64_t *hw_mgr_hdl);
+
+/**
+ * struct cam_jpeg_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: Flag for axi vote data
+ */
+struct cam_jpeg_cpas_vote {
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ uint32_t ahb_vote_valid;
+ uint32_t axi_vote_valid;
+};
+
+struct cam_jpeg_set_irq_cb {
+ int32_t (*jpeg_hw_mgr_cb)(
+ uint32_t irq_status,
+ int32_t result_size,
+ void *data);
+ void *data;
+ uint32_t b_set_cb;
+};
+
+#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
new file mode 100644
index 0000000..23b27bf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_dev.o jpeg_dma_core.o jpeg_dma_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
new file mode 100644
index 0000000..05c1a95
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ struct cam_jpeg_cpas_vote cpas_vote;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_dma_dev->soc_info;
+ core_info =
+ (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+ core_info;
+
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+ cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+ cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+ rc = cam_jpeg_dma_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+ cam_cpas_stop(core_info->cpas_handle);
+ }
+
+ return rc;
+}
+
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_dma_dev->soc_info;
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ rc = cam_jpeg_dma_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+ rc = cam_cpas_stop(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+ return 0;
+}
+
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_dma_dev = device_priv;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_JPEG_DMA_CMD_MAX) {
+ CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ core_info =
+ (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+ core_info;
+
+ switch (cmd_type) {
+ case CAM_JPEG_DMA_CMD_SET_IRQ_CB:
+ {
+ struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_JPEG, "cmd args NULL");
+ return -EINVAL;
+ }
+ if (irq_cb->b_set_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb =
+ irq_cb->jpeg_hw_mgr_cb;
+ core_info->irq_cb.data = irq_cb->data;
+ } else {
+ core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+ core_info->irq_cb.data = NULL;
+ }
+ rc = 0;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data)
+{
+ return IRQ_HANDLED;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
new file mode 100644
index 0000000..bb4e34a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_CORE_H
+#define CAM_JPEG_DMA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_dma_device_hw_info {
+ uint32_t reserved;
+};
+
+struct cam_jpeg_dma_set_irq_cb {
+ int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+ int32_t result_size, void *data);
+ void *data;
+};
+
+enum cam_jpeg_dma_core_state {
+ CAM_JPEG_DMA_CORE_NOT_READY,
+ CAM_JPEG_DMA_CORE_READY,
+ CAM_JPEG_DMA_CORE_RESETTING,
+ CAM_JPEG_DMA_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_dma_device_core_info {
+ enum cam_jpeg_dma_core_state core_state;
+ struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
+ uint32_t cpas_handle;
+ struct cam_jpeg_dma_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_DMA_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
new file mode 100644
index 0000000..829bb51
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = {
+ .reserved = 0,
+};
+EXPORT_SYMBOL(cam_jpeg_dma_hw_info);
+
+static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_jpeg_dma_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = &soc_info->pdev->dev;
+ memcpy(cpas_register_params.identifier, "jpeg-dma",
+ sizeof("jpeg-dma"));
+ cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+static int cam_jpeg_dma_unregister_cpas(
+ struct cam_jpeg_dma_device_core_info *core_info)
+{
+ int rc;
+
+ rc = cam_cpas_unregister_client(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+ core_info->cpas_handle = 0;
+
+ return rc;
+}
+
+static int cam_jpeg_dma_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_dma_dev = NULL;
+ struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ int rc;
+
+ jpeg_dma_dev_intf = platform_get_drvdata(pdev);
+ if (!jpeg_dma_dev_intf) {
+ CAM_ERR(CAM_JPEG, "error No data in pdev");
+ return -EINVAL;
+ }
+
+ jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv;
+ if (!jpeg_dma_dev) {
+ CAM_ERR(CAM_JPEG, "error HW data is NULL");
+ rc = -ENODEV;
+ goto free_jpeg_hw_intf;
+ }
+
+ core_info = (struct cam_jpeg_dma_device_core_info *)
+ jpeg_dma_dev->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_JPEG, "error core data NULL");
+ goto deinit_soc;
+ }
+
+ rc = cam_jpeg_dma_unregister_cpas(core_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+ kfree(core_info);
+
+deinit_soc:
+ rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&jpeg_dma_dev->hw_mutex);
+ kfree(jpeg_dma_dev);
+
+free_jpeg_hw_intf:
+ kfree(jpeg_dma_dev_intf);
+ return rc;
+}
+
+static int cam_jpeg_dma_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_dma_dev = NULL;
+ struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_jpeg_dma_device_core_info *core_info = NULL;
+ struct cam_jpeg_dma_device_hw_info *hw_info = NULL;
+ int rc;
+
+ jpeg_dma_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!jpeg_dma_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &jpeg_dma_dev_intf->hw_idx);
+
+ jpeg_dma_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!jpeg_dma_dev) {
+ rc = -ENOMEM;
+ goto error_alloc_dev;
+ }
+ jpeg_dma_dev->soc_info.pdev = pdev;
+ jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev;
+ jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw;
+ jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw;
+ jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd;
+ jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA;
+
+ platform_set_drvdata(pdev, jpeg_dma_dev_intf);
+ jpeg_dma_dev->core_info =
+ kzalloc(sizeof(struct cam_jpeg_dma_device_core_info),
+ GFP_KERNEL);
+ if (!jpeg_dma_dev->core_info) {
+ rc = -ENOMEM;
+ goto error_alloc_core;
+ }
+ core_info = (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+ core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info");
+ rc = -EINVAL;
+ goto error_match_dev;
+ }
+ hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
+ core_info->jpeg_dma_hw_info = hw_info;
+ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+
+ rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
+ cam_jpeg_dma_irq,
+ jpeg_dma_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "%failed to init_soc %d", rc);
+ goto error_match_dev;
+ }
+
+ rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
+ core_info, jpeg_dma_dev_intf->hw_idx);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+ goto error_reg_cpas;
+ }
+ jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&jpeg_dma_dev->hw_mutex);
+ spin_lock_init(&jpeg_dma_dev->hw_lock);
+ init_completion(&jpeg_dma_dev->hw_complete);
+
+ CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx);
+
+ return rc;
+
+error_reg_cpas:
+ rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_match_dev:
+ kfree(jpeg_dma_dev->core_info);
+error_alloc_core:
+ kfree(jpeg_dma_dev);
+error_alloc_dev:
+ kfree(jpeg_dma_dev_intf);
+ return rc;
+}
+
+static const struct of_device_id cam_jpeg_dma_dt_match[] = {
+ {
+ .compatible = "qcom,cam_jpeg_dma",
+ .data = &cam_jpeg_dma_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match);
+
+static struct platform_driver cam_jpeg_dma_driver = {
+ .probe = cam_jpeg_dma_probe,
+ .remove = cam_jpeg_dma_remove,
+ .driver = {
+ .name = "cam-jpeg-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_dma_dt_match,
+ },
+};
+
+static int __init cam_jpeg_dma_init_module(void)
+{
+ return platform_driver_register(&cam_jpeg_dma_driver);
+}
+
+static void __exit cam_jpeg_dma_exit_module(void)
+{
+ platform_driver_unregister(&cam_jpeg_dma_driver);
+}
+
+module_init(cam_jpeg_dma_init_module);
+module_exit(cam_jpeg_dma_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
new file mode 100644
index 0000000..efc161b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_dma_irq_handler, void *irq_data)
+{
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ jpeg_dma_irq_handler,
+ irq_data);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
new file mode 100644
index 0000000..bc9bed8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DMA_SOC_H_
+#define _CAM_JPEG_DMA_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_dma_irq_handler, void *irq_data);
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_DMA_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
new file mode 100644
index 0000000..b046a7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_dev.o jpeg_enc_core.o jpeg_enc_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
new file mode 100644
index 0000000..25405cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -0,0 +1,348 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26)
+#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29)
+
+#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \
+ CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \
+ CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define CAM_JPEG_HW_MASK_COMP_ERR \
+ (CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+ CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+ CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status) \
+ (jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_FRAMEDONE)
+#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status) \
+ (jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_RESET_ACK)
+#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status) \
+ (jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_ERR)
+
+#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_jpeg_cpas_vote cpas_vote;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info =
+ (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+ core_info;
+
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+ cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+ cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+ rc = cam_cpas_start(core_info->cpas_handle,
+ &cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+ rc = cam_jpeg_enc_enable_soc_resources(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+ cam_cpas_stop(core_info->cpas_handle);
+ }
+
+ return rc;
+}
+
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ if (!soc_info || !core_info) {
+ CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+ soc_info, core_info);
+ return -EINVAL;
+ }
+
+ rc = cam_jpeg_enc_disable_soc_resources(soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+ rc = cam_cpas_stop(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+ return 0;
+}
+
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ uint32_t irq_status = 0;
+ uint32_t encoded_size = 0;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return IRQ_HANDLED;
+ }
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info =
+ (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+ core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ irq_status = cam_io_r_mb(mem_base +
+ core_info->jpeg_enc_hw_info->int_status);
+
+ cam_io_w_mb(irq_status,
+ soc_info->reg_map[0].mem_base +
+ core_info->jpeg_enc_hw_info->int_clr);
+
+ CAM_DBG(CAM_JPEG, "irq_num %d irq_status = %x , core_state %d",
+ irq_num, irq_status, core_info->core_state);
+ if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status)) {
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
+ encoded_size = cam_io_r_mb(mem_base + 0x180);
+ if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+ encoded_size,
+ core_info->irq_cb.data);
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected done");
+ }
+ }
+
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ }
+ if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status)) {
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+ core_info->core_state = CAM_JPEG_ENC_CORE_READY;
+ complete(&jpeg_enc_dev->hw_complete);
+ } else {
+ CAM_ERR(CAM_JPEG, "unexpected reset irq");
+ }
+ }
+ /* Unexpected/unintended HW interrupt */
+ if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status)) {
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ CAM_ERR_RATE_LIMIT(CAM_JPEG,
+ "error irq_num %d irq_status = %x , core_state %d",
+ irq_num, irq_status, core_info->core_state);
+
+ if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+ -1,
+ core_info->irq_cb.data);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_jpeg_enc_reset_hw(void *data,
+ void *start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+ unsigned long rem_jiffies;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ /* maskdisable.clrirq.maskenable.resetcmd */
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info =
+ (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+ core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+ CAM_ERR(CAM_JPEG, "alrady resetting");
+ return 0;
+ }
+
+ reinit_completion(&jpeg_enc_dev->hw_complete);
+
+ core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+
+ cam_io_w_mb(0x00000000, mem_base + hw_info->int_mask);
+ cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_clr);
+ cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_mask);
+ cam_io_w_mb(0x00032093, mem_base + hw_info->reset_cmd);
+
+ rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+ CAM_JPEG_ENC_RESET_TIMEOUT);
+ if (!rem_jiffies) {
+ CAM_ERR(CAM_JPEG, "error Reset Timeout");
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+ }
+
+ return 0;
+}
+
+int cam_jpeg_enc_start_hw(void *data,
+ void *start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = data;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ void __iomem *mem_base;
+
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ mem_base = soc_info->reg_map[0].mem_base;
+
+ if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
+ CAM_ERR(CAM_JPEG, "Error not ready");
+ return -EINVAL;
+ }
+
+ cam_io_w_mb(0x00000001, mem_base + 0x00000010);
+
+ return 0;
+}
+
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *jpeg_enc_dev = device_priv;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ if (!device_priv) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ if (cmd_type >= CAM_JPEG_ENC_CMD_MAX) {
+ CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+ return -EINVAL;
+ }
+
+ core_info =
+ (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+ core_info;
+
+ switch (cmd_type) {
+ case CAM_JPEG_ENC_CMD_SET_IRQ_CB:
+ {
+ struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+ if (!cmd_args) {
+ CAM_ERR(CAM_JPEG, "cmd args NULL");
+ return -EINVAL;
+ }
+ if (irq_cb->b_set_cb) {
+ core_info->irq_cb.jpeg_hw_mgr_cb =
+ irq_cb->jpeg_hw_mgr_cb;
+ core_info->irq_cb.data = irq_cb->data;
+ } else {
+ core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+ core_info->irq_cb.data = NULL;
+ }
+ rc = 0;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
new file mode 100644
index 0000000..6ae4cdc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_CORE_H
+#define CAM_JPEG_ENC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_enc_device_hw_info {
+ uint32_t hw_version;
+ uint32_t int_status;
+ uint32_t int_clr;
+ uint32_t int_mask;
+ uint32_t reset_cmd;
+};
+
+struct cam_jpeg_enc_set_irq_cb {
+ int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+ int32_t result_size, void *data);
+ void *data;
+};
+
+enum cam_jpeg_enc_core_state {
+ CAM_JPEG_ENC_CORE_NOT_READY,
+ CAM_JPEG_ENC_CORE_READY,
+ CAM_JPEG_ENC_CORE_RESETTING,
+ CAM_JPEG_ENC_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_enc_device_core_info {
+ enum cam_jpeg_enc_core_state core_state;
+ struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
+ uint32_t cpas_handle;
+ struct cam_jpeg_enc_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+ void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_start_hw(void *device_priv,
+ void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_reset_hw(void *device_priv,
+ void *reset_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_ENC_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
new file mode 100644
index 0000000..5dd1e1f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
+ .int_clr = 0x1c,
+ .int_status = 0x20,
+ .int_mask = 0x18,
+ .reset_cmd = 0x8,
+ .hw_version = 0x0,
+};
+EXPORT_SYMBOL(cam_jpeg_enc_hw_info);
+
+static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info,
+ struct cam_jpeg_enc_device_core_info *core_info,
+ uint32_t hw_idx)
+{
+ struct cam_cpas_register_params cpas_register_params;
+ int rc;
+
+ cpas_register_params.dev = &soc_info->pdev->dev;
+ memcpy(cpas_register_params.identifier, "jpeg-enc",
+ sizeof("jpeg-enc"));
+ cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cell_index = hw_idx;
+ cpas_register_params.userdata = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_params);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+ return rc;
+ }
+ core_info->cpas_handle = cpas_register_params.client_handle;
+
+ return rc;
+}
+
+static int cam_jpeg_enc_unregister_cpas(
+ struct cam_jpeg_enc_device_core_info *core_info)
+{
+ int rc;
+
+ rc = cam_cpas_unregister_client(core_info->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+ core_info->cpas_handle = 0;
+
+ return rc;
+}
+
+static int cam_jpeg_enc_remove(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_enc_dev = NULL;
+ struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ int rc;
+
+ jpeg_enc_dev_intf = platform_get_drvdata(pdev);
+ if (!jpeg_enc_dev_intf) {
+ CAM_ERR(CAM_JPEG, "error No data in pdev");
+ return -EINVAL;
+ }
+
+ jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv;
+ if (!jpeg_enc_dev) {
+ CAM_ERR(CAM_JPEG, "error HW data is NULL");
+ rc = -ENODEV;
+ goto free_jpeg_hw_intf;
+ }
+
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ if (!core_info) {
+ CAM_ERR(CAM_JPEG, "error core data NULL");
+ goto deinit_soc;
+ }
+
+ rc = cam_jpeg_enc_unregister_cpas(core_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+ kfree(core_info);
+
+deinit_soc:
+ rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+ mutex_destroy(&jpeg_enc_dev->hw_mutex);
+ kfree(jpeg_enc_dev);
+
+free_jpeg_hw_intf:
+ kfree(jpeg_enc_dev_intf);
+ return rc;
+}
+
+static int cam_jpeg_enc_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *jpeg_enc_dev = NULL;
+ struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_jpeg_enc_device_core_info *core_info = NULL;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+ int rc;
+
+ jpeg_enc_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+ if (!jpeg_enc_dev_intf)
+ return -ENOMEM;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &jpeg_enc_dev_intf->hw_idx);
+
+ jpeg_enc_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!jpeg_enc_dev) {
+ rc = -ENOMEM;
+ goto error_alloc_dev;
+ }
+ jpeg_enc_dev->soc_info.pdev = pdev;
+ jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev;
+ jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
+ jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
+ jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+ jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
+ jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
+ jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
+
+ platform_set_drvdata(pdev, jpeg_enc_dev_intf);
+ jpeg_enc_dev->core_info =
+ kzalloc(sizeof(struct cam_jpeg_enc_device_core_info),
+ GFP_KERNEL);
+ if (!jpeg_enc_dev->core_info) {
+ rc = -ENOMEM;
+ goto error_alloc_core;
+ }
+ core_info = (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+ core_info;
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info");
+ rc = -EINVAL;
+ goto error_match_dev;
+ }
+ hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
+ core_info->jpeg_enc_hw_info = hw_info;
+ core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+
+ rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
+ cam_jpeg_enc_irq,
+ jpeg_enc_dev);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
+ goto error_match_dev;
+ }
+
+ rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
+ core_info, jpeg_enc_dev_intf->hw_idx);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+ goto error_reg_cpas;
+ }
+ jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+ mutex_init(&jpeg_enc_dev->hw_mutex);
+ spin_lock_init(&jpeg_enc_dev->hw_lock);
+ init_completion(&jpeg_enc_dev->hw_complete);
+
+ return rc;
+
+error_reg_cpas:
+ cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_match_dev:
+ kfree(jpeg_enc_dev->core_info);
+error_alloc_core:
+ kfree(jpeg_enc_dev);
+error_alloc_dev:
+ kfree(jpeg_enc_dev_intf);
+
+ return rc;
+}
+
+static const struct of_device_id cam_jpeg_enc_dt_match[] = {
+ {
+ .compatible = "qcom,cam_jpeg_enc",
+ .data = &cam_jpeg_enc_hw_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match);
+
+static struct platform_driver cam_jpeg_enc_driver = {
+ .probe = cam_jpeg_enc_probe,
+ .remove = cam_jpeg_enc_remove,
+ .driver = {
+ .name = "cam-jpeg-enc",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_jpeg_enc_dt_match,
+ },
+};
+
+static int __init cam_jpeg_enc_init_module(void)
+{
+ return platform_driver_register(&cam_jpeg_enc_driver);
+}
+
+static void __exit cam_jpeg_enc_exit_module(void)
+{
+ platform_driver_unregister(&cam_jpeg_enc_driver);
+}
+
+module_init(cam_jpeg_enc_init_module);
+module_exit(cam_jpeg_enc_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_ENC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
new file mode 100644
index 0000000..3f450cd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_enc_irq_handler, void *irq_data)
+{
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ jpeg_enc_irq_handler,
+ irq_data);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_SVS_VOTE, true);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+ return rc;
+}
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+ int rc;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
new file mode 100644
index 0000000..a0485a2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_ENC_SOC_H_
+#define _CAM_JPEG_ENC_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t jpeg_enc_irq_handler, void *irq_data);
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_ENC_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index e6da6ca..f514139 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -1,4 +1,6 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_dev.o \
cam_req_mgr_util.o \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index f47b1dc..c150244 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -10,14 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
-
-#ifdef CONFIG_MEM_MGR_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mutex.h>
@@ -27,6 +19,7 @@
#include "cam_req_mgr_util.h"
#include "cam_mem_mgr.h"
#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
static struct cam_mem_table tbl;
@@ -36,12 +29,12 @@
{
*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
if (IS_ERR_OR_NULL((void *)*vaddr)) {
- pr_err("kernel map fail");
+ CAM_ERR(CAM_CRM, "kernel map fail");
return -ENOSPC;
}
if (ion_handle_get_size(tbl.client, hdl, len)) {
- pr_err("kernel get len failed");
+ CAM_ERR(CAM_CRM, "kernel get len failed");
ion_unmap_kernel(tbl.client, hdl);
return -ENOSPC;
}
@@ -69,7 +62,7 @@
tbl.client = msm_ion_client_create("camera_global_pool");
if (IS_ERR_OR_NULL(tbl.client)) {
- pr_err("fail to create client\n");
+ CAM_ERR(CAM_CRM, "fail to create client");
rc = -EINVAL;
}
@@ -92,7 +85,7 @@
rc = cam_mem_util_client_create();
if (rc < 0) {
- pr_err("fail to create ion client\n");
+ CAM_ERR(CAM_CRM, "fail to create ion client");
goto client_fail;
}
@@ -127,10 +120,12 @@
mutex_lock(&tbl.m_lock);
for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
if (!tbl.bufq[i].active) {
- CDBG("Buffer inactive at idx=%d, continuing\n", i);
+ CAM_DBG(CAM_CRM,
+ "Buffer inactive at idx=%d, continuing", i);
continue;
} else {
- pr_err("Active buffer at idx=%d, possible leak\n", i);
+ CAM_ERR(CAM_CRM,
+ "Active buffer at idx=%d, possible leak", i);
}
mutex_lock(&tbl.bufq[i].q_lock);
@@ -221,7 +216,7 @@
iova_ptr,
len_ptr);
if (rc < 0)
- pr_err("fail to get buf hdl :%d", buf_handle);
+ CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
handle_mismatch:
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -255,7 +250,7 @@
ion_hdl = tbl.bufq[idx].i_hdl;
if (!ion_hdl) {
- pr_err("Invalid ION handle\n");
+ CAM_ERR(CAM_CRM, "Invalid ION handle");
rc = -EINVAL;
goto exit_func;
}
@@ -310,7 +305,7 @@
rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
&ion_flag);
if (rc) {
- pr_err("cache get flags failed %d\n", rc);
+ CAM_ERR(CAM_CRM, "cache get flags failed %d", rc);
goto fail;
}
@@ -326,7 +321,8 @@
ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
break;
default:
- pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+ CAM_ERR(CAM_CRM,
+ "invalid cache ops :%d", cmd->mem_cache_ops);
rc = -EINVAL;
goto fail;
}
@@ -337,7 +333,7 @@
tbl.bufq[idx].len,
ion_cache_ops);
if (rc)
- pr_err("cache operation failed %d\n", rc);
+ CAM_ERR(CAM_CRM, "cache operation failed %d", rc);
}
fail:
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -360,7 +356,7 @@
*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
if (*fd < 0) {
- pr_err("dma buf get fd fail");
+ CAM_ERR(CAM_CRM, "dma buf get fd fail");
rc = -EINVAL;
goto get_fd_fail;
}
@@ -404,19 +400,19 @@
static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
{
if (!cmd->flags) {
- pr_err("Invalid flags\n");
+ CAM_ERR(CAM_CRM, "Invalid flags");
return -EINVAL;
}
if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
CAM_MEM_MMU_MAX_HANDLE);
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- pr_err("Kernel mapping in secure mode not allowed");
+ CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
return -EINVAL;
}
@@ -426,24 +422,25 @@
static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
{
if (!cmd->flags) {
- pr_err("Invalid flags\n");
+ CAM_ERR(CAM_CRM, "Invalid flags");
return -EINVAL;
}
if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+ CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
CAM_MEM_MMU_MAX_HANDLE);
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- pr_err("Kernel mapping in secure mode not allowed");
+ CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
return -EINVAL;
}
if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- pr_err("Shared memory buffers are not allowed to be mapped\n");
+ CAM_ERR(CAM_CRM,
+ "Shared memory buffers are not allowed to be mapped");
return -EINVAL;
}
@@ -463,7 +460,7 @@
int dir = cam_mem_util_get_dma_dir(flags);
if (dir < 0) {
- pr_err("fail to map DMA direction\n");
+ CAM_ERR(CAM_CRM, "fail to map DMA direction");
return dir;
}
@@ -476,7 +473,8 @@
len);
if (rc < 0) {
- pr_err("Failed to securely map to smmu");
+ CAM_ERR(CAM_CRM,
+ "Failed to securely map to smmu");
goto multi_map_fail;
}
}
@@ -490,7 +488,7 @@
region);
if (rc < 0) {
- pr_err("Failed to map to smmu");
+ CAM_ERR(CAM_CRM, "Failed to map to smmu");
goto multi_map_fail;
}
}
@@ -520,14 +518,14 @@
size_t len;
if (!cmd) {
- pr_err(" Invalid argument\n");
+ CAM_ERR(CAM_CRM, " Invalid argument");
return -EINVAL;
}
len = cmd->len;
rc = cam_mem_util_check_flags(cmd);
if (rc) {
- pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
return rc;
}
@@ -535,7 +533,7 @@
&ion_hdl,
&ion_fd);
if (rc) {
- pr_err("Ion allocation failed\n");
+ CAM_ERR(CAM_CRM, "Ion allocation failed");
return rc;
}
@@ -591,7 +589,7 @@
cmd->out.fd = tbl.bufq[idx].fd;
cmd->out.vaddr = 0;
- CDBG("buf handle: %x, fd: %d, len: %zu\n",
+ CAM_DBG(CAM_CRM, "buf handle: %x, fd: %d, len: %zu",
cmd->out.buf_handle, cmd->out.fd,
tbl.bufq[idx].len);
@@ -613,7 +611,7 @@
size_t len = 0;
if (!cmd || (cmd->fd < 0)) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
@@ -622,13 +620,13 @@
rc = cam_mem_util_check_map_flags(cmd);
if (rc) {
- pr_err("Invalid flags: flags = %X\n", cmd->flags);
+ CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
return rc;
}
ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
- pr_err("Failed to import ion fd\n");
+ CAM_ERR(CAM_CRM, "Failed to import ion fd");
return -EINVAL;
}
@@ -690,7 +688,7 @@
int rc = -EINVAL;
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index\n");
+ CAM_ERR(CAM_CRM, "Incorrect index");
return rc;
}
@@ -722,27 +720,31 @@
static int cam_mem_util_unmap(int32_t idx)
{
int rc = 0;
- enum cam_smmu_region_id region;
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index\n");
+ CAM_ERR(CAM_CRM, "Incorrect index");
return -EINVAL;
}
- CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+ CAM_DBG(CAM_CRM, "Flags = %X", tbl.bufq[idx].flags);
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
-
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ /* SHARED flag gets precedence, all other flags after it */
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
region = CAM_SMMU_REGION_SHARED;
+ } else {
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+ }
- rc = cam_mem_util_unmap_hw_va(idx,
- region);
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+ rc = cam_mem_util_unmap_hw_va(idx, region);
+
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].flags = 0;
@@ -751,7 +753,8 @@
memset(tbl.bufq[idx].hdls, 0,
sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
- CDBG("Ion handle at idx = %d freeing = %pK, fd = %d, imported %d\n",
+ CAM_DBG(CAM_CRM,
+ "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d",
idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
tbl.bufq[idx].is_imported);
@@ -776,27 +779,28 @@
int rc;
if (!cmd) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index extracted from mem handle\n");
+ CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
return -EINVAL;
}
if (!tbl.bufq[idx].active) {
- pr_err("Released buffer state should be active\n");
+ CAM_ERR(CAM_CRM, "Released buffer state should be active");
return -EINVAL;
}
if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
- pr_err("Released buf handle not matching within table\n");
+ CAM_ERR(CAM_CRM,
+ "Released buf handle not matching within table");
return -EINVAL;
}
- CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+ CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
rc = cam_mem_util_unmap(idx);
return rc;
@@ -817,16 +821,17 @@
uint32_t mem_handle;
int32_t smmu_hdl = 0;
int32_t num_hdl = 0;
- enum cam_smmu_region_id region;
+ enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (!inp || !out) {
- pr_err("Invalid params\n");
+ CAM_ERR(CAM_CRM, "Invalid params");
return -EINVAL;
}
- if (inp->region != CAM_MEM_MGR_REGION_SHARED &&
- inp->region != CAM_MEM_MGR_REGION_NON_SECURE_IO) {
- pr_err("Invalid flags for request mem\n");
+ if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+ inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
+ inp->flags & CAM_MEM_FLAG_CACHE)) {
+ CAM_ERR(CAM_CRM, "Invalid flags for request mem");
return -EINVAL;
}
@@ -845,29 +850,31 @@
&ion_fd);
if (rc) {
- pr_err("ION alloc failed for shared buffer\n");
+ CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
goto ion_fail;
} else {
- CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+ CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
}
rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
if (rc) {
- pr_err("Failed to get kernel vaddr\n");
+ CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
goto map_fail;
}
if (!inp->smmu_hdl) {
- pr_err("Invalid SMMU handle\n");
+ CAM_ERR(CAM_CRM, "Invalid SMMU handle");
rc = -EINVAL;
goto smmu_fail;
}
- if (inp->region == CAM_MEM_MGR_REGION_SHARED)
+ /* SHARED flag gets precedence, all other flags after it */
+ if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
region = CAM_SMMU_REGION_SHARED;
-
- if (inp->region == CAM_MEM_MGR_REGION_NON_SECURE_IO)
- region = CAM_SMMU_REGION_IO;
+ } else {
+ if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+ region = CAM_SMMU_REGION_IO;
+ }
rc = cam_smmu_map_iova(inp->smmu_hdl,
ion_fd,
@@ -877,7 +884,7 @@
region);
if (rc < 0) {
- pr_err("SMMU mapping failed\n");
+ CAM_ERR(CAM_CRM, "SMMU mapping failed");
goto smmu_fail;
}
@@ -912,13 +919,13 @@
out->smmu_hdl = smmu_hdl;
out->mem_handle = mem_handle;
out->len = inp->size;
- out->region = inp->region;
+ out->region = region;
return rc;
slot_fail:
cam_smmu_unmap_iova(inp->smmu_hdl,
ion_fd,
- inp->region);
+ region);
smmu_fail:
ion_unmap_kernel(tbl.client, hdl);
map_fail:
@@ -934,27 +941,28 @@
int rc;
if (!inp) {
- pr_err("Invalid argument\n");
+ CAM_ERR(CAM_CRM, "Invalid argument");
return -EINVAL;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- pr_err("Incorrect index extracted from mem handle\n");
+ CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
return -EINVAL;
}
if (!tbl.bufq[idx].active) {
- pr_err("Released buffer state should be active\n");
+ CAM_ERR(CAM_CRM, "Released buffer state should be active");
return -EINVAL;
}
if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
- pr_err("Released buf handle not matching within table\n");
+ CAM_ERR(CAM_CRM,
+ "Released buf handle not matching within table");
return -EINVAL;
}
- CDBG("Releasing hdl = %X\n", inp->mem_handle);
+ CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 32a754e..0858b8a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -14,13 +14,7 @@
#define _CAM_MEM_MGR_API_H_
#include <media/cam_req_mgr.h>
-
-/* Region IDs for memory manager */
-#define CAM_MEM_MGR_REGION_FIRMWARE 0
-#define CAM_MEM_MGR_REGION_SHARED 1
-#define CAM_MEM_MGR_REGION_NON_SECURE_IO 2
-#define CAM_MEM_MGR_REGION_SECURE_IO 3
-#define CAM_MEM_MGR_REGION_SCRATCH 4
+#include "cam_smmu_api.h"
/**
* struct cam_mem_mgr_request_desc
@@ -36,7 +30,6 @@
uint64_t align;
int32_t smmu_hdl;
uint32_t flags;
- uint32_t region;
};
/**
@@ -55,7 +48,7 @@
int32_t smmu_hdl;
uint32_t mem_handle;
uint64_t len;
- uint32_t region;
+ enum cam_smmu_region_id region;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index ed251eb..3fd42f7 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -19,6 +19,8 @@
#include "cam_req_mgr_core.h"
#include "cam_req_mgr_workq.h"
#include "cam_req_mgr_debug.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
static struct cam_req_mgr_core_device *g_crm_core_dev;
@@ -58,24 +60,25 @@
struct cam_req_mgr_req_tbl *req_tbl = req->l_tbl;
if (!in_q || !req_tbl) {
- CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+ CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl);
return -EINVAL;
}
- CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+ CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
mutex_lock(&req->lock);
for (i = 0; i < in_q->num_slots; i++) {
- CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+ CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i,
in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
}
while (req_tbl != NULL) {
for (i = 0; i < req_tbl->num_slots; i++) {
- CRM_DBG("idx= %d, map= %x, state= %d",
+ CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d",
req_tbl->slot[i].idx,
req_tbl->slot[i].req_ready_map,
req_tbl->slot[i].state);
}
- CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+ CAM_DBG(CAM_CRM,
+ "TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
req_tbl->id, req_tbl->pd, req_tbl->dev_count,
req_tbl->dev_mask, req_tbl->skip_traverse,
req_tbl->num_slots);
@@ -165,7 +168,7 @@
struct cam_req_mgr_apply *apply_data;
if (!traverse_data->tbl || !traverse_data->apply_data) {
- CRM_ERR("NULL pointer %pK %pK",
+ CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
traverse_data->tbl, traverse_data->apply_data);
traverse_data->result = 0;
return -EINVAL;
@@ -173,7 +176,7 @@
tbl = traverse_data->tbl;
apply_data = traverse_data->apply_data;
- CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+ CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
tbl->pd, curr_idx, tbl->slot[curr_idx].state,
tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
@@ -228,7 +231,7 @@
in_q->slot[idx].req_id = -1;
in_q->slot[idx].skip_idx = 1;
in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
- CRM_DBG("SET IDX SKIP on slot= %d", idx);
+ CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
}
/**
@@ -246,7 +249,7 @@
return;
do {
tbl->id = req->num_tbl++;
- CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
tbl->id, tbl->pd, tbl->skip_traverse,
tbl->pd_delta);
tbl = tbl->next;
@@ -275,7 +278,7 @@
max_pd = tbl->pd;
do {
tbl->skip_traverse = max_pd - tbl->pd;
- CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+ CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
tbl->id, tbl->pd, tbl->skip_traverse,
tbl->pd_delta);
tbl = tbl->next;
@@ -298,7 +301,7 @@
struct cam_req_mgr_req_queue *in_q = link->req.in_q;
slot = &in_q->slot[idx];
- CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+ CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
/* Check if CSL has already pushed new request*/
if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
@@ -312,7 +315,7 @@
/* Reset all pd table slot */
while (tbl != NULL) {
- CRM_DBG("pd: %d: idx %d state %d",
+ CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
tbl->pd, idx, tbl->slot[idx].state);
tbl->slot[idx].req_ready_map = 0;
tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
@@ -338,13 +341,14 @@
__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
slot = &in_q->slot[idx];
- CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+ CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
/* Check if there is new req from CSL, if not complete req */
if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
__cam_req_mgr_in_q_skip_idx(in_q, idx);
if (in_q->wr_idx != idx)
- CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+ CAM_WARN(CAM_CRM,
+ "CHECK here wr %d, rd %d", in_q->wr_idx, idx);
__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
}
}
@@ -376,13 +380,13 @@
if (dev) {
pd = dev->dev_info.p_delay;
if (pd >= CAM_PIPELINE_DELAY_MAX) {
- CRM_WARN("pd %d greater than max",
+ CAM_WARN(CAM_CRM, "pd %d greater than max",
pd);
continue;
}
if (link->req.apply_data[pd].skip_idx ||
link->req.apply_data[pd].req_id < 0) {
- CRM_DBG("skip %d req_id %lld",
+ CAM_DBG(CAM_CRM, "skip %d req_id %lld",
link->req.apply_data[pd].skip_idx,
link->req.apply_data[pd].req_id);
continue;
@@ -393,7 +397,10 @@
idx = link->req.apply_data[pd].idx;
apply_req.report_if_bubble =
in_q->slot[idx].recover;
- CRM_DBG("SEND: pd %d req_id %lld",
+
+ trace_cam_req_mgr_apply_request(link, &apply_req, dev);
+
+ CAM_DBG(CAM_CRM, "SEND: pd %d req_id %lld",
pd, apply_req.request_id);
if (dev->ops && dev->ops->apply_req) {
rc = dev->ops->apply_req(&apply_req);
@@ -403,7 +410,7 @@
}
}
if (rc < 0) {
- CRM_ERR("APPLY FAILED pd %d req_id %lld",
+ CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
dev->dev_info.p_delay, apply_req.request_id);
/* Apply req failed notify already applied devs */
for (; i >= 0; i--) {
@@ -456,11 +463,12 @@
*/
rc = __cam_req_mgr_traverse(&traverse_data);
- CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+ CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
idx, traverse_data.result, link->pd_mask, rc);
if (!rc && traverse_data.result == link->pd_mask) {
- CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+ CAM_DBG(CAM_CRM,
+ "APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
link->link_hdl, idx,
apply_data[2].req_id, apply_data[1].req_id,
apply_data[0].req_id);
@@ -497,12 +505,12 @@
* - if in applied_state, somthign wrong.
* - if in no_req state, no new req
*/
- CRM_DBG("idx %d req_status %d",
+ CAM_DBG(CAM_CRM, "idx %d req_status %d",
in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
slot = &in_q->slot[in_q->rd_idx];
if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
- CRM_DBG("No Pending req");
+ CAM_DBG(CAM_CRM, "No Pending req");
return 0;
}
@@ -516,7 +524,8 @@
slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
if (link->state == CAM_CRM_LINK_STATE_ERR) {
- CRM_WARN("Err recovery done idx %d status %d",
+ CAM_WARN(CAM_CRM,
+ "Err recovery done idx %d status %d",
in_q->rd_idx,
in_q->slot[in_q->rd_idx].status);
mutex_lock(&link->lock);
@@ -552,7 +561,8 @@
* don't expect to enter here.
* @TODO: gracefully handle if recovery fails.
*/
- CRM_ERR("FATAL recovery cant finish idx %d status %d",
+ CAM_ERR(CAM_CRM,
+ "FATAL recovery cant finish idx %d status %d",
in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
rc = -EPERM;
}
@@ -600,7 +610,7 @@
tbl->next = new_tbl;
tbl->pd_delta = tbl->pd - new_tbl->pd;
}
- CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+ CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
new_tbl->pd_delta);
}
@@ -619,7 +629,7 @@
kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
if (tbl != NULL) {
tbl->num_slots = MAX_REQ_SLOTS;
- CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+ CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
}
return tbl;
@@ -636,7 +646,7 @@
{
struct cam_req_mgr_req_tbl *tbl = *l_tbl, *temp;
- CRM_DBG("*l_tbl %pK", tbl);
+ CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
while (tbl != NULL) {
temp = tbl->next;
kfree(tbl);
@@ -665,7 +675,7 @@
for (i = 0; i < in_q->num_slots; i++) {
slot = &in_q->slot[idx];
if (slot->req_id == req_id) {
- CRM_DBG("req %lld found at %d %d status %d",
+ CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
req_id, idx, slot->idx,
slot->status);
break;
@@ -693,7 +703,7 @@
struct cam_req_mgr_req_queue *in_q = req->in_q;
if (!in_q) {
- CRM_ERR("NULL in_q");
+ CAM_ERR(CAM_CRM, "NULL in_q");
return -EINVAL;
}
@@ -728,7 +738,7 @@
struct cam_req_mgr_req_queue *in_q = req->in_q;
if (!in_q) {
- CRM_ERR("NULL in_q");
+ CAM_ERR(CAM_CRM, "NULL in_q");
return -EINVAL;
}
@@ -757,11 +767,11 @@
struct cam_req_mgr_core_link *link = NULL;
if (!timer) {
- CRM_ERR("NULL timer");
+ CAM_ERR(CAM_CRM, "NULL timer");
return;
}
link = (struct cam_req_mgr_core_link *)timer->parent;
- CRM_ERR("SOF freeze for link %x", link->link_hdl);
+ CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
}
/**
@@ -858,12 +868,12 @@
struct cam_req_mgr_req_queue *in_q;
if (!session || !g_crm_core_dev) {
- CRM_ERR("NULL session/core_dev ptr");
+ CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
return NULL;
}
if (session->num_links >= MAX_LINKS_PER_SESSION) {
- CRM_ERR("Reached max links %d per session limit %d",
+ CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
session->num_links, MAX_LINKS_PER_SESSION);
return NULL;
}
@@ -871,7 +881,7 @@
link = (struct cam_req_mgr_core_link *)
kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
if (!link) {
- CRM_ERR("failed to create link, no mem");
+ CAM_ERR(CAM_CRM, "failed to create link, no mem");
return NULL;
}
in_q = &session->in_q;
@@ -892,7 +902,7 @@
mutex_lock(&session->lock);
session->links[session->num_links] = link;
session->num_links++;
- CRM_DBG("Active session links (%d)",
+ CAM_DBG(CAM_CRM, "Active session links (%d)",
session->num_links);
mutex_unlock(&session->lock);
@@ -914,14 +924,14 @@
int32_t i = 0;
if (!session || !*link) {
- CRM_ERR("NULL session/link ptr %pK %pK",
+ CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
session, *link);
return;
}
mutex_lock(&session->lock);
if (!session->num_links)
- CRM_WARN("No active link or invalid state %d",
+ CAM_WARN(CAM_CRM, "No active link or invalid state %d",
session->num_links);
else {
for (i = 0; i < session->num_links; i++) {
@@ -929,7 +939,7 @@
session->links[i] = NULL;
}
session->num_links--;
- CRM_DBG("Active session links (%d)",
+ CAM_DBG(CAM_CRM, "Active session links (%d)",
session->num_links);
}
kfree(*link);
@@ -958,7 +968,7 @@
struct cam_req_mgr_req_queue *in_q = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -975,7 +985,7 @@
* cam_req_mgr_process_flush_req()
*
* @brief: This runs in workque thread context. Call core funcs to check
- * which requests need to be removedcancelled.
+ * which requests need to be removed/cancelled.
* @priv : link information.
* @data : contains information about frame_id, link etc.
*
@@ -993,20 +1003,22 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
flush_info = (struct cam_req_mgr_flush_info *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld type %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld type %d",
flush_info->link_hdl,
flush_info->req_id,
flush_info->flush_type);
in_q = link->req.in_q;
+ trace_cam_flush_req(link, flush_info);
+
mutex_lock(&link->req.lock);
if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
for (i = 0; i < in_q->num_slots; i++) {
@@ -1021,15 +1033,16 @@
CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
if (idx < 0) {
- CRM_ERR("req_id %lld not found in input queue",
+ CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
flush_info->req_id);
} else {
- CRM_DBG("req_id %lld found at idx %d",
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
flush_info->req_id, idx);
slot = &in_q->slot[idx];
if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
- CRM_WARN("req_id %lld can not be cancelled",
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be cancelled",
flush_info->req_id);
mutex_unlock(&link->req.lock);
return -EINVAL;
@@ -1075,14 +1088,14 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
sched_req = (struct cam_req_mgr_sched_request *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
sched_req->link_hdl,
sched_req->req_id);
@@ -1093,9 +1106,9 @@
if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
- CRM_WARN("in_q overwrite %d", slot->status);
+ CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
- CRM_DBG("sched_req %lld at slot %d",
+ CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
sched_req->req_id, in_q->wr_idx);
slot->status = CRM_SLOT_STATUS_REQ_ADDED;
@@ -1130,7 +1143,7 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -1147,7 +1160,7 @@
}
}
if (!tbl) {
- CRM_ERR("dev_hdl not found %x, %x %x",
+ CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
add_req->dev_hdl,
link->l_dev[0].dev_hdl,
link->l_dev[1].dev_hdl);
@@ -1165,7 +1178,7 @@
mutex_lock(&link->req.lock);
idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
if (idx < 0) {
- CRM_ERR("req %lld not found in in_q", add_req->req_id);
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
rc = -EBADSLT;
mutex_unlock(&link->req.lock);
goto end;
@@ -1173,19 +1186,21 @@
slot = &tbl->slot[idx];
if (slot->state != CRM_REQ_STATE_PENDING &&
slot->state != CRM_REQ_STATE_EMPTY) {
- CRM_WARN("Unexpected state %d for slot %d map %x",
+ CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
slot->state, idx, slot->req_ready_map);
}
slot->state = CRM_REQ_STATE_PENDING;
slot->req_ready_map |= (1 << device->dev_bit);
- CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+ CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
slot->req_ready_map);
+ trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
+
if (slot->req_ready_map == tbl->dev_mask) {
- CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+ CAM_DBG(CAM_CRM, "idx %d req_id %lld pd %d SLOT READY",
idx, add_req->req_id, tbl->pd);
slot->state = CRM_REQ_STATE_READY;
}
@@ -1216,14 +1231,14 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
link = (struct cam_req_mgr_core_link *)priv;
task_data = (struct crm_task_payload *)data;
err_info = (struct cam_req_mgr_error_notify *)&task_data->u;
- CRM_DBG("link_hdl %x req_id %lld error %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
err_info->link_hdl,
err_info->req_id,
err_info->error);
@@ -1234,20 +1249,22 @@
if (err_info->error == CRM_KMD_ERR_BUBBLE) {
idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
if (idx < 0) {
- CRM_ERR("req_id %lld not found in input queue",
+ CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
err_info->req_id);
} else {
- CRM_DBG("req_id %lld found at idx %d",
+ CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
err_info->req_id, idx);
slot = &in_q->slot[idx];
if (!slot->recover) {
- CRM_WARN("err recovery disabled req_id %lld",
+ CAM_WARN(CAM_CRM,
+ "err recovery disabled req_id %lld",
err_info->req_id);
mutex_unlock(&link->req.lock);
return 0;
} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
- CRM_WARN("req_id %lld can not be recovered %d",
+ CAM_WARN(CAM_CRM,
+ "req_id %lld can not be recovered %d",
err_info->req_id, slot->status);
mutex_unlock(&link->req.lock);
return -EINVAL;
@@ -1302,7 +1319,7 @@
struct crm_task_payload *task_data = NULL;
if (!data || !priv) {
- CRM_ERR("input args NULL %pK %pK", data, priv);
+ CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
rc = -EINVAL;
goto end;
}
@@ -1310,7 +1327,7 @@
task_data = (struct crm_task_payload *)data;
sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
- CRM_DBG("link_hdl %x frame_id %lld",
+ CAM_DBG(CAM_CRM, "link_hdl %x frame_id %lld",
sof_data->link_hdl,
sof_data->frame_id);
@@ -1321,11 +1338,11 @@
* Check if current read index is in applied state, if yes make it free
* and increment read index to next slot.
*/
- CRM_DBG("link_hdl %x curent idx %d req_status %d",
+ CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
if (link->state == CAM_CRM_LINK_STATE_ERR)
- CRM_WARN("Error recovery idx %d status %d",
+ CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
in_q->rd_idx,
in_q->slot[in_q->rd_idx].status);
@@ -1364,17 +1381,18 @@
struct crm_task_payload *task_data;
if (!add_req) {
- CRM_ERR("sof_data is NULL");
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
rc = -EINVAL;
goto end;
}
- CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+ CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
+ add_req->dev_hdl, add_req->req_id);
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(add_req->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1382,14 +1400,14 @@
/* Validate if req id is present in input queue */
idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
if (idx < 0) {
- CRM_ERR("req %lld not found in in_q", add_req->req_id);
+ CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
rc = -ENOENT;
goto end;
}
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task dev %x req %lld",
+ CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
add_req->dev_hdl, add_req->req_id);
rc = -EBUSY;
goto end;
@@ -1403,7 +1421,8 @@
dev_req->dev_hdl = add_req->dev_hdl;
task->process_cb = &cam_req_mgr_process_add_req;
rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
- CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+ CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
+ add_req->dev_hdl, add_req->req_id);
end:
return rc;
@@ -1428,7 +1447,7 @@
struct crm_task_payload *task_data;
if (!err_info) {
- CRM_ERR("err_info is NULL");
+ CAM_ERR(CAM_CRM, "err_info is NULL");
rc = -EINVAL;
goto end;
}
@@ -1436,7 +1455,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(err_info->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1444,7 +1463,7 @@
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task req_id %lld", err_info->req_id);
+ CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
rc = -EBUSY;
goto end;
}
@@ -1482,7 +1501,7 @@
struct crm_task_payload *task_data;
if (!sof_data) {
- CRM_ERR("sof_data is NULL");
+ CAM_ERR(CAM_CRM, "sof_data is NULL");
rc = -EINVAL;
goto end;
}
@@ -1490,7 +1509,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(sof_data->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", sof_data->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -1498,7 +1517,8 @@
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
- CRM_ERR("no empty task frame %lld", sof_data->frame_id);
+ CAM_ERR(CAM_CRM, "no empty task frame %lld",
+ sof_data->frame_id);
rc = -EBUSY;
goto end;
}
@@ -1545,7 +1565,7 @@
return -EPERM;
mutex_init(&link->req.lock);
- CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
link->req.num_tbl = 0;
rc = __cam_req_mgr_setup_in_q(&link->req);
@@ -1562,7 +1582,7 @@
if (!dev->ops ||
!dev->ops->get_dev_info ||
!dev->ops->link_setup) {
- CRM_ERR("FATAL: device ops NULL");
+ CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
rc = -ENXIO;
goto error;
}
@@ -1570,7 +1590,10 @@
dev->parent = (void *)link;
dev->dev_info.dev_hdl = dev->dev_hdl;
rc = dev->ops->get_dev_info(&dev->dev_info);
- CRM_DBG("%x: connected: %s, id %d, delay %d",
+
+ trace_cam_req_mgr_connect_device(link, &dev->dev_info);
+
+ CAM_DBG(CAM_CRM, "%x: connected: %s, id %d, delay %d",
link_info->session_hdl, dev->dev_info.name,
dev->dev_info.dev_id, dev->dev_info.p_delay);
if (rc < 0 ||
@@ -1578,10 +1601,10 @@
CAM_PIPELINE_DELAY_MAX ||
dev->dev_info.p_delay <
CAM_PIPELINE_DELAY_0) {
- CRM_ERR("get device info failed");
+ CAM_ERR(CAM_CRM, "get device info failed");
goto error;
} else {
- CRM_DBG("%x: connected: %s, delay %d",
+ CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
link_info->session_hdl,
dev->dev_info.name,
dev->dev_info.p_delay);
@@ -1610,7 +1633,7 @@
pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
dev->dev_info.p_delay);
if (!pd_tbl) {
- CRM_ERR("pd %d tbl not found",
+ CAM_ERR(CAM_CRM, "pd %d tbl not found",
dev->dev_info.p_delay);
rc = -ENXIO;
goto error;
@@ -1619,7 +1642,7 @@
pd_tbl = __cam_req_mgr_create_pd_tbl(
dev->dev_info.p_delay);
if (pd_tbl == NULL) {
- CRM_ERR("create new pd tbl failed");
+ CAM_ERR(CAM_CRM, "create new pd tbl failed");
rc = -ENXIO;
goto error;
}
@@ -1666,7 +1689,7 @@
struct cam_req_mgr_core_session *cam_session = NULL;
if (!ses_info) {
- CRM_DBG("NULL session info pointer");
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
return -EINVAL;
}
mutex_lock(&g_crm_core_dev->crm_lock);
@@ -1679,7 +1702,8 @@
session_hdl = cam_create_session_hdl((void *)cam_session);
if (session_hdl < 0) {
- CRM_ERR("unable to create session_hdl = %x", session_hdl);
+ CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
+ session_hdl);
rc = session_hdl;
kfree(cam_session);
goto end;
@@ -1687,7 +1711,7 @@
ses_info->session_hdl = session_hdl;
mutex_init(&cam_session->lock);
- CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+ CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
mutex_lock(&cam_session->lock);
cam_session->session_hdl = session_hdl;
@@ -1706,7 +1730,7 @@
struct cam_req_mgr_core_session *cam_session = NULL;
if (!ses_info) {
- CRM_DBG("NULL session info pointer");
+ CAM_DBG(CAM_CRM, "NULL session info pointer");
return -EINVAL;
}
@@ -1714,14 +1738,14 @@
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(ses_info->session_hdl);
if (!cam_session) {
- CRM_ERR("failed to get session priv");
+ CAM_ERR(CAM_CRM, "failed to get session priv");
rc = -ENOENT;
goto end;
}
mutex_lock(&cam_session->lock);
if (cam_session->num_links) {
- CRM_ERR("destroy session %x num_active_links %d",
+ CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
ses_info->session_hdl,
cam_session->num_links);
/* @TODO : Go through active links and destroy ? */
@@ -1733,7 +1757,7 @@
rc = cam_destroy_session_hdl(ses_info->session_hdl);
if (rc < 0)
- CRM_ERR("unable to destroy session_hdl = %x rc %d",
+ CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
ses_info->session_hdl, rc);
end:
@@ -1750,11 +1774,12 @@
struct cam_req_mgr_core_link *link;
if (!link_info) {
- CRM_DBG("NULL pointer");
+ CAM_DBG(CAM_CRM, "NULL pointer");
return -EINVAL;
}
if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
- CRM_ERR("Invalid num devices %d", link_info->num_devices);
+ CAM_ERR(CAM_CRM, "Invalid num devices %d",
+ link_info->num_devices);
return -EINVAL;
}
@@ -1762,7 +1787,7 @@
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(link_info->session_hdl);
if (!cam_session) {
- CRM_DBG("NULL pointer");
+ CAM_DBG(CAM_CRM, "NULL pointer");
return -EINVAL;
}
@@ -1771,11 +1796,11 @@
/* Allocate link struct and map it with session's request queue */
link = __cam_req_mgr_reserve_link(cam_session);
if (!link) {
- CRM_ERR("failed to reserve new link");
+ CAM_ERR(CAM_CRM, "failed to reserve new link");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
- CRM_DBG("link reserved %pK %x", link, link->link_hdl);
+ CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
root_dev.session_hdl = link_info->session_hdl;
@@ -1785,7 +1810,8 @@
/* Create unique dev handle for link */
link->link_hdl = cam_create_device_hdl(&root_dev);
if (link->link_hdl < 0) {
- CRM_ERR("Insufficient memory to create new device handle");
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new device handle");
mutex_unlock(&link->lock);
rc = link->link_hdl;
goto link_hdl_fail;
@@ -1797,7 +1823,8 @@
rc = __cam_req_mgr_create_subdevs(&link->l_dev,
link_info->num_devices);
if (rc < 0) {
- CRM_ERR("Insufficient memory to create new crm subdevs");
+ CAM_ERR(CAM_CRM,
+ "Insufficient memory to create new crm subdevs");
goto create_subdev_failed;
}
@@ -1816,7 +1843,7 @@
rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
if (rc < 0) {
- CRM_ERR("FATAL: unable to create worker");
+ CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
__cam_req_mgr_destroy_link_info(link);
goto setup_failed;
}
@@ -1862,18 +1889,18 @@
struct cam_req_mgr_core_link *link;
if (!unlink_info) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
mutex_lock(&g_crm_core_dev->crm_lock);
- CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(unlink_info->session_hdl);
if (!cam_session) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@@ -1881,7 +1908,7 @@
/* link hdl's priv data is core_link struct */
link = cam_get_device_priv(unlink_info->link_hdl);
if (!link) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
@@ -1905,7 +1932,7 @@
/* Destroy the link handle */
rc = cam_destroy_device_hdl(unlink_info->link_hdl);
if (rc < 0) {
- CRM_ERR("error while destroying dev handle %d %x",
+ CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
rc, link->link_hdl);
}
@@ -1926,7 +1953,7 @@
struct crm_task_payload task_data;
if (!sched_req) {
- CRM_ERR("csl_req is NULL");
+ CAM_ERR(CAM_CRM, "csl_req is NULL");
rc = -EINVAL;
goto end;
}
@@ -1934,15 +1961,16 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(sched_req->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
return -EINVAL;
}
session = (struct cam_req_mgr_core_session *)link->parent;
if (!session) {
- CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
+ CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
return -EINVAL;
}
- CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
+ CAM_DBG(CAM_CRM, "link %x req %lld",
+ sched_req->link_hdl, sched_req->req_id);
task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
sched = (struct cam_req_mgr_sched_request *)&task_data.u;
@@ -1957,7 +1985,8 @@
rc = cam_req_mgr_process_sched_req(link, &task_data);
- CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
+ CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
+ sched_req->link_hdl, sched_req->req_id);
end:
return rc;
}
@@ -1966,7 +1995,7 @@
struct cam_req_mgr_sync_mode *sync_links)
{
if (!sync_links) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
@@ -1985,12 +2014,13 @@
struct cam_req_mgr_core_session *session = NULL;
if (!flush_info) {
- CRM_ERR("flush req is NULL");
+ CAM_ERR(CAM_CRM, "flush req is NULL");
rc = -EFAULT;
goto end;
}
if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
- CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+ CAM_ERR(CAM_CRM, "incorrect flush type %x",
+ flush_info->flush_type);
rc = -EINVAL;
goto end;
}
@@ -1999,12 +2029,12 @@
session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(flush_info->session_hdl);
if (!session) {
- CRM_ERR("Invalid session %x", flush_info->session_hdl);
+ CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
rc = -EINVAL;
goto end;
}
if (session->num_links <= 0) {
- CRM_WARN("No active links in session %x",
+ CAM_WARN(CAM_CRM, "No active links in session %x",
flush_info->session_hdl);
goto end;
}
@@ -2012,7 +2042,7 @@
link = (struct cam_req_mgr_core_link *)
cam_get_device_priv(flush_info->link_hdl);
if (!link) {
- CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+ CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
rc = -EINVAL;
goto end;
}
@@ -2044,10 +2074,10 @@
int cam_req_mgr_core_device_init(void)
{
- CRM_DBG("Enter g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
if (g_crm_core_dev) {
- CRM_WARN("core device is already initialized");
+ CAM_WARN(CAM_CRM, "core device is already initialized");
return 0;
}
g_crm_core_dev = (struct cam_req_mgr_core_device *)
@@ -2055,7 +2085,7 @@
if (!g_crm_core_dev)
return -ENOMEM;
- CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
INIT_LIST_HEAD(&g_crm_core_dev->session_head);
mutex_init(&g_crm_core_dev->crm_lock);
cam_req_mgr_debug_register(g_crm_core_dev);
@@ -2066,11 +2096,11 @@
int cam_req_mgr_core_device_deinit(void)
{
if (!g_crm_core_dev) {
- CRM_ERR("NULL pointer");
+ CAM_ERR(CAM_CRM, "NULL pointer");
return -EINVAL;
}
- CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+ CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
mutex_destroy(&g_crm_core_dev->crm_lock);
kfree(g_crm_core_dev);
g_crm_core_dev = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index 2a831e8..f61c41e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -25,30 +25,5 @@
#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
-#if (CRM_TRACE_ENABLE == 1)
- #define CRM_DBG(fmt, args...) do { \
- trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
- pr_debug("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
- } while (0)
-
- #define CRM_WARN(fmt, args...) do { \
- trace_printk("%d: [crm_warn] "fmt"\n", __LINE__, ##args); \
- pr_warn("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
- } while (0)
-
- #define CRM_ERR(fmt, args...) do { \
- trace_printk("%d: [crm_err] "fmt"\n", __LINE__, ##args); \
- pr_err("%s:%d "fmt"\n", __func__, __LINE__, ##args);\
- } while (0)
-#else
- #define CRM_DBG(fmt, args...) pr_debug("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-
- #define CRM_WARN(fmt, args...) pr_warn("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-
- #define CRM_ERR(fmt, args...) pr_err("%s:%d "fmt"\n", \
- __func__, __LINE__, ##args)
-#endif
#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index c495088..7a2bc09 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-REQ-MGR %s:%d " fmt, __func__, __LINE__
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -26,6 +24,7 @@
#include "cam_req_mgr_core.h"
#include "cam_subdev.h"
#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
#define CAM_REQ_MGR_EVENT_MAX 30
@@ -108,7 +107,7 @@
rc = v4l2_fh_open(filep);
if (rc) {
- pr_err("v4l2_fh_open failed: %d\n", rc);
+ CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc);
goto end;
}
@@ -120,7 +119,7 @@
rc = cam_mem_mgr_init();
if (rc) {
g_dev.open_cnt--;
- pr_err("mem mgr init failed\n");
+ CAM_ERR(CAM_CRM, "mem mgr init failed");
goto mem_mgr_init_fail;
}
@@ -507,19 +506,20 @@
int rc;
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
if (!csd || !csd->name) {
- pr_err("invalid arguments");
+ CAM_ERR(CAM_CRM, "invalid arguments");
return -EINVAL;
}
mutex_lock(&g_dev.dev_lock);
if ((g_dev.subdev_nodes_created) &&
(csd->sd_flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) {
- pr_err("dynamic node is not allowed, name: %s, type : %d",
+ CAM_ERR(CAM_CRM,
+ "dynamic node is not allowed, name: %s, type :%d",
csd->name, csd->ent_function);
rc = -EINVAL;
goto reg_fail;
@@ -538,7 +538,7 @@
rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd);
if (rc) {
- pr_err("register subdev failed");
+ CAM_ERR(CAM_CRM, "register subdev failed");
goto reg_fail;
}
g_dev.count++;
@@ -552,7 +552,7 @@
int cam_unregister_subdev(struct cam_subdev *csd)
{
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
@@ -603,19 +603,19 @@
rc = cam_req_mgr_util_init();
if (rc) {
- pr_err("cam req mgr util init is failed\n");
+ CAM_ERR(CAM_CRM, "cam req mgr util init is failed");
goto req_mgr_util_fail;
}
rc = cam_mem_mgr_init();
if (rc) {
- pr_err("mem mgr init failed\n");
+ CAM_ERR(CAM_CRM, "mem mgr init failed");
goto mem_mgr_init_fail;
}
rc = cam_req_mgr_core_device_init();
if (rc) {
- pr_err("core device setup failed\n");
+ CAM_ERR(CAM_CRM, "core device setup failed");
goto req_mgr_core_fail;
}
@@ -663,7 +663,7 @@
return -EINVAL;
if (g_dev.state != true) {
- pr_err("camera root device not ready yet");
+ CAM_ERR(CAM_CRM, "camera root device not ready yet");
return -ENODEV;
}
@@ -675,7 +675,7 @@
rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev);
if (rc) {
- pr_err("failed to register the sub devices");
+ CAM_ERR(CAM_CRM, "failed to register the sub devices");
goto create_fail;
}
@@ -683,7 +683,7 @@
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
sd->entity.name = video_device_node_name(sd->devnode);
- pr_debug("created node :%s\n", sd->entity.name);
+ CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name);
}
g_dev.subdev_nodes_created = true;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 91860f6..8faf35a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -196,9 +196,9 @@
* @error : what error device hit while processing this req
*/
struct cam_req_mgr_error_notify {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
enum cam_req_mgr_device_error error;
};
@@ -210,9 +210,9 @@
*
*/
struct cam_req_mgr_add_request {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
};
@@ -260,7 +260,7 @@
struct cam_req_mgr_apply_request {
int32_t link_hdl;
int32_t dev_hdl;
- int64_t request_id;
+ uint64_t request_id;
int32_t report_if_bubble;
};
@@ -276,7 +276,7 @@
int32_t link_hdl;
int32_t dev_hdl;
uint32_t type;
- int64_t req_id;
+ uint64_t req_id;
};
/**
@@ -286,9 +286,9 @@
*
*/
struct cam_req_mgr_link_evt_data {
- int32_t link_hdl;
- int32_t dev_hdl;
- int64_t req_id;
+ int32_t link_hdl;
+ int32_t dev_hdl;
+ uint64_t req_id;
enum cam_req_mgr_link_evt_type evt_type;
union {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
index 9da445d..2aa2ab1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -11,12 +11,13 @@
*/
#include "cam_req_mgr_timer.h"
+#include "cam_debug_util.h"
void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
{
if (!crm_timer)
return;
- CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CAM_DBG(CAM_CRM, "Starting timer to fire in %d ms. (jiffies=%lu)\n",
crm_timer->expires, jiffies);
mod_timer(&crm_timer->sys_timer,
(jiffies + msecs_to_jiffies(crm_timer->expires)));
@@ -27,17 +28,17 @@
struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
if (!timer) {
- CRM_ERR("NULL timer");
+ CAM_ERR(CAM_CRM, "NULL timer");
return;
}
- CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+ CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent);
crm_timer_reset(timer);
}
void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
int32_t expires)
{
- CRM_DBG("new time %d", expires);
+ CAM_DBG(CAM_CRM, "new time %d", expires);
if (crm_timer) {
crm_timer->expires = expires;
crm_timer_reset(crm_timer);
@@ -50,7 +51,7 @@
int ret = 0;
struct cam_req_mgr_timer *crm_timer = NULL;
- CRM_DBG("init timer %d %pK", expires, *timer);
+ CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer);
if (*timer == NULL) {
crm_timer = (struct cam_req_mgr_timer *)
kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
@@ -71,7 +72,7 @@
crm_timer_reset(crm_timer);
*timer = crm_timer;
} else {
- CRM_WARN("Timer already exists!!");
+ CAM_WARN(CAM_CRM, "Timer already exists!!");
ret = -EINVAL;
}
end:
@@ -79,7 +80,7 @@
}
void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
{
- CRM_DBG("destroy timer %pK", *crm_timer);
+ CAM_DBG(CAM_CRM, "destroy timer %pK", *crm_timer);
if (*crm_timer) {
del_timer(&(*crm_timer)->sys_timer);
kfree(*crm_timer);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 7bc26ec..a9134fb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -20,12 +20,7 @@
#include <linux/random.h>
#include <media/cam_req_mgr.h>
#include "cam_req_mgr_util.h"
-
-#ifdef CONFIG_CAM_REQ_MGR_UTIL_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
static DEFINE_SPINLOCK(hdl_tbl_lock);
@@ -38,7 +33,7 @@
if (hdl_tbl) {
rc = -EINVAL;
- pr_err("Hdl_tbl is already present\n");
+ CAM_ERR(CAM_CRM, "Hdl_tbl is already present");
goto hdl_tbl_check_failed;
}
@@ -58,7 +53,7 @@
spin_unlock_bh(&hdl_tbl_lock);
bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
- hdl_tbl->bitmap = kzalloc(sizeof(bitmap_size), GFP_KERNEL);
+ hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!hdl_tbl->bitmap) {
rc = -ENOMEM;
goto bitmap_alloc_fail;
@@ -79,7 +74,7 @@
{
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
@@ -99,14 +94,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES; i++) {
if (hdl_tbl->hdl[i].state == HDL_ACTIVE) {
- pr_err("Dev handle = %x session_handle = %x\n",
+ CAM_ERR(CAM_CRM, "Dev handle = %x session_handle = %x",
hdl_tbl->hdl[i].hdl_value,
hdl_tbl->hdl[i].session_hdl);
hdl_tbl->hdl[i].state = HDL_FREE;
@@ -141,14 +136,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
- pr_err("Unable to create session handle\n");
+ CAM_ERR(CAM_CRM, "Unable to create session handle");
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -174,14 +169,14 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
spin_unlock_bh(&hdl_tbl_lock);
return -EINVAL;
}
idx = cam_get_free_handle_index();
if (idx < 0) {
- pr_err("Unable to create device handle\n");
+ CAM_ERR(CAM_CRM, "Unable to create device handle");
spin_unlock_bh(&hdl_tbl_lock);
return idx;
}
@@ -196,7 +191,7 @@
hdl_tbl->hdl[idx].ops = hdl_data->ops;
spin_unlock_bh(&hdl_tbl_lock);
- pr_debug("%s: handle = %x\n", __func__, handle);
+ pr_debug("%s: handle = %x", __func__, handle);
return handle;
}
@@ -208,29 +203,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto device_priv_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto device_priv_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto device_priv_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
- pr_err("Invalid type\n");
+ CAM_ERR(CAM_CRM, "Invalid type");
goto device_priv_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto device_priv_fail;
}
@@ -252,29 +247,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto device_ops_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto device_ops_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto device_ops_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
- pr_err("Invalid type\n");
+ CAM_ERR(CAM_CRM, "Invalid type");
goto device_ops_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto device_ops_fail;
}
@@ -295,29 +290,29 @@
spin_lock_bh(&hdl_tbl_lock);
if (!hdl_tbl) {
- pr_err("Hdl tbl is NULL\n");
+ CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
goto destroy_hdl_fail;
}
idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
- pr_err("Invalid idx\n");
+ CAM_ERR(CAM_CRM, "Invalid idx");
goto destroy_hdl_fail;
}
if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
- pr_err("Invalid state\n");
+ CAM_ERR(CAM_CRM, "Invalid state");
goto destroy_hdl_fail;
}
type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
if (type != dev_hdl_type) {
- pr_err("Invalid type %d, %d\n", type, dev_hdl_type);
+ CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type);
goto destroy_hdl_fail;
}
if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
- pr_err("Invalid hdl\n");
+ CAM_ERR(CAM_CRM, "Invalid hdl");
goto destroy_hdl_fail;
}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 38dcb42..c48a391 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -11,6 +11,7 @@
*/
#include "cam_req_mgr_workq.h"
+#include "cam_debug_util.h"
#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
if ((workq)->in_irq) \
@@ -84,7 +85,7 @@
if (task->process_cb)
task->process_cb(task->priv, task->payload);
else
- CRM_WARN("FATAL:no task handler registered for workq");
+ CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
cam_req_mgr_workq_put_task(task);
return 0;
@@ -100,7 +101,7 @@
struct crm_workq_task *task, *task_save;
int32_t i = CRM_TASK_PRIORITY_0;
if (!w) {
- CRM_ERR("NULL task pointer can not schedule");
+ CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
return;
}
workq = (struct cam_req_mgr_core_workq *)
@@ -113,7 +114,7 @@
atomic_sub(1, &workq->task.pending_cnt);
cam_req_mgr_process_task(task);
}
- CRM_DBG("processed task %pK free_cnt %d",
+ CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
task, atomic_read(&workq->task.free_cnt));
}
i++;
@@ -125,7 +126,7 @@
int32_t i = CRM_TASK_PRIORITY_0;
struct crm_workq_task *task, *task_save;
- CRM_DBG("pending_cnt %d",
+ CAM_DBG(CAM_CRM, "pending_cnt %d",
atomic_read(&workq->task.pending_cnt));
while (i < CRM_TASK_PRIORITY_MAX) {
@@ -133,7 +134,7 @@
list_for_each_entry_safe(task, task_save,
&workq->task.process_head[i], entry) {
cam_req_mgr_workq_put_task(task);
- CRM_WARN("flush task %pK, %d, cnt %d",
+ CAM_WARN(CAM_CRM, "flush task %pK, %d, cnt %d",
task, i, atomic_read(
&workq->task.free_cnt));
}
@@ -150,13 +151,13 @@
unsigned long flags = 0;
if (!task) {
- CRM_WARN("NULL task pointer can not schedule");
+ CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
rc = -EINVAL;
goto end;
}
workq = (struct cam_req_mgr_core_workq *)task->parent;
if (!workq) {
- CRM_DBG("NULL workq pointer suspect mem corruption");
+ CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
rc = -EINVAL;
goto end;
}
@@ -167,7 +168,7 @@
if (task->cancel == 1) {
cam_req_mgr_workq_put_task(task);
- CRM_WARN("task aborted and queued back to pool");
+ CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
rc = 0;
goto end;
}
@@ -182,7 +183,7 @@
WORKQ_RELEASE_LOCK(workq, flags);
atomic_add(1, &workq->task.pending_cnt);
- CRM_DBG("enq task %pK pending_cnt %d",
+ CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
task, atomic_read(&workq->task.pending_cnt));
queue_work(workq->job, &workq->work);
@@ -207,7 +208,7 @@
return -ENOMEM;
strlcat(buf, name, sizeof(buf));
- CRM_DBG("create workque crm_workq-%s", name);
+ CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
crm_workq->job = alloc_workqueue(buf,
WQ_HIGHPRI | WQ_UNBOUND, 0, NULL);
if (!crm_workq->job) {
@@ -218,7 +219,7 @@
/* Workq attributes initialization */
INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
spin_lock_init(&crm_workq->lock_bh);
- CRM_DBG("LOCK_DBG workq %s lock %pK",
+ CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
name, &crm_workq->lock_bh);
/* Task attributes initialization */
@@ -234,7 +235,7 @@
crm_workq->task.num_task,
GFP_KERNEL);
if (!crm_workq->task.pool) {
- CRM_WARN("Insufficient memory %lu",
+ CAM_WARN(CAM_CRM, "Insufficient memory %lu",
sizeof(struct crm_workq_task) *
crm_workq->task.num_task);
kfree(crm_workq);
@@ -250,7 +251,7 @@
cam_req_mgr_workq_put_task(task);
}
*workq = crm_workq;
- CRM_DBG("free tasks %d",
+ CAM_DBG(CAM_CRM, "free tasks %d",
atomic_read(&crm_workq->task.free_cnt));
}
@@ -259,7 +260,7 @@
void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
{
- CRM_DBG("destroy workque %pK", crm_workq);
+ CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
if (*crm_workq) {
crm_workq_clear_q(*crm_workq);
if ((*crm_workq)->job) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index e515a40..b66480c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
index 8670d80..4e8ea8b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -1,8 +1,10 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator_dev.o cam_actuator_core.o cam_actuator_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 91b68cf..18097b0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -13,7 +13,8 @@
#include <linux/module.h>
#include <cam_sensor_cmn_header.h>
#include "cam_actuator_core.h"
-#include <cam_sensor_util.h>
+#include "cam_sensor_util.h"
+#include "cam_trace.h"
int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
uint32_t *cmd_buf)
@@ -22,7 +23,7 @@
struct cam_cmd_i2c_info *i2c_info;
if (!a_ctrl || !cmd_buf) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -31,8 +32,8 @@
i2c_info->i2c_freq_mode;
a_ctrl->io_master_info.cci_client->sid =
i2c_info->slave_addr >> 1;
- CDBG("%s:%d Slave addr: 0x%x Freq Mode: %d\n", __func__,
- __LINE__, i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+ CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d",
+ i2c_info->slave_addr, i2c_info->i2c_freq_mode);
return rc;
}
@@ -45,13 +46,12 @@
uint32_t i, size;
if (a_ctrl == NULL || i2c_set == NULL) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
if (i2c_set->is_settings_valid != 1) {
- pr_err("%s: %d :Error: Invalid settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Invalid settings");
return -EINVAL;
}
@@ -61,8 +61,8 @@
rc = camera_io_dev_write(&(a_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Applying i2c write settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in Applying i2c wrt settings");
return rc;
}
} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
@@ -81,8 +81,8 @@
i2c_list->i2c_settings.
reg_setting[i].delay);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Applying i2c poll settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "i2c poll apply setting Fail");
return rc;
}
}
@@ -98,21 +98,21 @@
struct cam_actuator_ctrl_t *a_ctrl = NULL;
if (!apply) {
- pr_err("%s:%d :Error: Invalid Input Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Input Args");
return -EINVAL;
}
a_ctrl = (struct cam_actuator_ctrl_t *)
cam_get_device_priv(apply->dev_hdl);
if (!a_ctrl) {
- pr_err("%s: %d :Error: Device data is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
return -EINVAL;
}
request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
- CDBG("%s:%d Request Id: %lld\n",
- __func__, __LINE__, apply->request_id);
+
+ trace_cam_apply_req("Actuator", apply);
+
+ CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
if ((apply->request_id ==
a_ctrl->i2c_data.per_frame[request_id].request_id) &&
@@ -121,8 +121,9 @@
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.per_frame[request_id]);
if (rc < 0) {
- pr_err("%s:%d Failed in applying the request: %lld\n",
- __func__, __LINE__, apply->request_id);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in applying the request: %lld\n",
+ apply->request_id);
return rc;
}
}
@@ -135,12 +136,13 @@
a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
if (rc < 0) {
- pr_err("%s: %d :Error: Fail deleting the req: %d err: %d\n",
- __func__, __LINE__, del_req_id, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Fail deleting the req: %d err: %d\n",
+ del_req_id, rc);
return rc;
}
} else {
- CDBG("%s:%d No Valid Req to clean Up\n", __func__, __LINE__);
+ CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
}
return rc;
@@ -152,15 +154,14 @@
struct cam_actuator_ctrl_t *a_ctrl = NULL;
if (!link) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
a_ctrl = (struct cam_actuator_ctrl_t *)
cam_get_device_priv(link->dev_hdl);
if (!a_ctrl) {
- pr_err("%s:%d :Error: Device data is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
return -EINVAL;
}
if (link->link_enable) {
@@ -177,7 +178,7 @@
int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
{
if (!info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -204,8 +205,7 @@
struct cam_req_mgr_add_request add_req;
if (!a_ctrl || !arg) {
- pr_err("%s:%d :Error: Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args");
return -EINVAL;
}
@@ -216,21 +216,21 @@
rc = cam_mem_get_cpu_buf(config.packet_handle,
(uint64_t *)&generic_ptr, &len_of_buff);
if (rc < 0) {
- pr_err("%s:%d :Error: error in converting command Handle %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
+ rc);
return rc;
}
if (config.offset > len_of_buff) {
- pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
- __func__, __LINE__, config.offset, len_of_buff);
+ CAM_ERR(CAM_ACTUATOR,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buff);
return -EINVAL;
}
csl_packet = (struct cam_packet *)(generic_ptr +
config.offset);
- CDBG("%s:%d Pkt opcode: %d\n",
- __func__, __LINE__, csl_packet->header.op_code);
+ CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
if ((csl_packet->header.op_code & 0xFFFFFF) ==
CAM_ACTUATOR_PACKET_OPCODE_INIT) {
@@ -242,24 +242,22 @@
cmd_desc = (struct cam_cmd_buf_desc *)(offset);
if (csl_packet->num_cmd_buf != 2) {
- pr_err("%s:: %d :Error: cmd Buffers in Init : %d\n",
- __func__, __LINE__, csl_packet->num_cmd_buf);
+ CAM_ERR(CAM_ACTUATOR, "cmd Buffers in Init : %d",
+ csl_packet->num_cmd_buf);
return -EINVAL;
}
rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
(uint64_t *)&generic_ptr, &len_of_buff);
if (rc < 0) {
- pr_err("%s:%d Failed to get cpu buf\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
return rc;
}
cmd_buf = (uint32_t *)generic_ptr;
cmd_buf += cmd_desc->offset / sizeof(uint32_t);
rc = cam_actuator_slaveInfo_pkt_parser(a_ctrl, cmd_buf);
if (rc < 0) {
- pr_err("%s:%d Failed in parsing the pkt\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in parsing the pkt");
return rc;
}
cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
@@ -268,8 +266,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
&cmd_desc[1], 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -289,8 +287,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -309,8 +307,8 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+ rc);
return rc;
}
}
@@ -323,47 +321,37 @@
if (a_ctrl->bridge_intf.crm_cb &&
a_ctrl->bridge_intf.crm_cb->add_req)
a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
- CDBG("%s: %d Req Id: %lld added to Bridge\n",
- __func__, __LINE__, add_req.req_id);
+ CAM_DBG(CAM_ACTUATOR, "Req Id: %lld added to Bridge",
+ add_req.req_id);
}
return rc;
}
-static int32_t cam_actuator_vreg_control(struct cam_actuator_ctrl_t *a_ctrl,
+static int32_t cam_actuator_vreg_control(
+ struct cam_actuator_ctrl_t *a_ctrl,
int config)
{
- int rc = 0, i, cnt;
- struct cam_actuator_vreg *vreg_cfg;
+ int rc = 0, cnt;
+ struct cam_hw_soc_info *soc_info;
- vreg_cfg = &a_ctrl->vreg_cfg;
- cnt = vreg_cfg->num_vreg;
+ soc_info = &a_ctrl->soc_info;
+ cnt = soc_info->num_rgltr;
+
if (!cnt)
return 0;
- if (cnt >= MSM_ACTUATOR_MAX_VREGS) {
- pr_err("%s:%d Regulators more than supported %d\n",
- __func__, __LINE__, cnt);
+ if (cnt >= CAM_SOC_MAX_REGULATOR) {
+ CAM_ERR(CAM_ACTUATOR, "Regulators more than supported %d", cnt);
return -EINVAL;
}
- for (i = 0; i < cnt; i++) {
- if (a_ctrl->io_master_info.master_type ==
- CCI_MASTER) {
- rc = msm_camera_config_single_vreg(
- &(a_ctrl->v4l2_dev_str.pdev->dev),
- &vreg_cfg->cam_vreg[i],
- (struct regulator **)&vreg_cfg->data[i],
- config);
- } else if (a_ctrl->io_master_info.master_type ==
- I2C_MASTER) {
- rc = msm_camera_config_single_vreg(
- &(a_ctrl->io_master_info.client->dev),
- &vreg_cfg->cam_vreg[i],
- (struct regulator **)&vreg_cfg->data[i],
- config);
- }
- }
+ if (config)
+ rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
+ false);
+ else
+ rc = cam_soc_util_disable_platform_resource(soc_info, false,
+ false);
return rc;
}
@@ -371,36 +359,32 @@
static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
{
int rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
rc = cam_actuator_vreg_control(a_ctrl, 1);
if (rc < 0) {
- pr_err("%s:%d Actuator Reg Failed %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Actuator Reg Failed %d", rc);
return rc;
}
- if (a_ctrl->gconf &&
- a_ctrl->gconf->gpio_num_info &&
- a_ctrl->gconf->gpio_num_info->valid[SENSOR_VAF] == 1) {
- rc = msm_camera_request_gpio_table(
- a_ctrl->gconf->cam_gpio_req_tbl,
- a_ctrl->gconf->cam_gpio_req_tbl_size, 1);
+ gpio_num_info = a_ctrl->gpio_num_info;
+
+ if (soc_info->gpio_data &&
+ gpio_num_info &&
+ gpio_num_info->valid[SENSOR_VAF] == 1) {
+ rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+ NULL, NULL);
+ rc = cam_soc_util_enable_platform_resource(&a_ctrl->soc_info,
+ false, 0, false);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in req gpio: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Failed in req gpio: %d", rc);
return rc;
}
- if (a_ctrl->cam_pinctrl_status) {
- rc = pinctrl_select_state(
- a_ctrl->pinctrl_info.pinctrl,
- a_ctrl->pinctrl_info.gpio_state_active);
- if (rc < 0)
- pr_err("%s:%d :Error: cannot set pin to active state: %d",
- __func__, __LINE__, rc);
- }
gpio_set_value_cansleep(
- a_ctrl->gconf->gpio_num_info->gpio_num[SENSOR_VAF],
+ gpio_num_info->gpio_num[SENSOR_VAF],
1);
}
@@ -413,43 +397,32 @@
static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
rc = cam_actuator_vreg_control(a_ctrl, 0);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed %d");
return rc;
}
- if (a_ctrl->gconf &&
- a_ctrl->gconf->gpio_num_info &&
- a_ctrl->gconf->gpio_num_info->
- valid[SENSOR_VAF] == 1) {
+ gpio_num_info = a_ctrl->gpio_num_info;
+
+ if (soc_info->gpio_data &&
+ gpio_num_info &&
+ gpio_num_info->valid[SENSOR_VAF] == 1) {
gpio_set_value_cansleep(
- a_ctrl->gconf->gpio_num_info->
- gpio_num[SENSOR_VAF],
+ gpio_num_info->gpio_num[SENSOR_VAF],
GPIOF_OUT_INIT_LOW);
- if (a_ctrl->cam_pinctrl_status) {
- rc = pinctrl_select_state(
- a_ctrl->pinctrl_info.pinctrl,
- a_ctrl->pinctrl_info.
- gpio_state_suspend);
- if (rc < 0)
- pr_err("%s:%d cannot set pin to suspend state: %d",
- __func__, __LINE__, rc);
-
- devm_pinctrl_put(
- a_ctrl->pinctrl_info.pinctrl);
- }
- a_ctrl->cam_pinctrl_status = 0;
- rc = msm_camera_request_gpio_table(
- a_ctrl->gconf->cam_gpio_req_tbl,
- a_ctrl->gconf->cam_gpio_req_tbl_size,
- 0);
+ rc = cam_soc_util_release_platform_resource(&a_ctrl->soc_info);
+ rc |= cam_soc_util_disable_platform_resource(&a_ctrl->soc_info,
+ 0, 0);
if (rc < 0)
- pr_err("%s:%d Failed in selecting state: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to disable platform resources: %d", rc);
}
return rc;
@@ -462,13 +435,11 @@
struct cam_control *cmd = (struct cam_control *)arg;
if (!a_ctrl || !cmd) {
- pr_err("%s: %d :Error: Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Invalid Args");
return -EINVAL;
}
- pr_debug("%s:%d Opcode to Actuator: %d\n",
- __func__, __LINE__, cmd->op_code);
+ pr_debug("Opcode to Actuator: %d", cmd->op_code);
mutex_lock(&(a_ctrl->actuator_mutex));
switch (cmd->op_code) {
@@ -477,8 +448,7 @@
struct cam_create_dev_hdl bridge_params;
if (a_ctrl->bridge_intf.device_hdl != -1) {
- pr_err("%s:%d Device is already acquired\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Device is already acquired");
rc = -EINVAL;
goto release_mutex;
}
@@ -486,8 +456,7 @@
(void __user *) cmd->handle,
sizeof(actuator_acq_dev));
if (rc < 0) {
- pr_err("%s:%d :Error: Failed Copying from user\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
goto release_mutex;
}
@@ -503,12 +472,11 @@
a_ctrl->bridge_intf.session_hdl =
actuator_acq_dev.session_handle;
- CDBG("%s:%d Device Handle: %d\n",
- __func__, __LINE__, actuator_acq_dev.device_handle);
+ CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
+ actuator_acq_dev.device_handle);
if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("%s:%d :Error: Failed Copy to User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -517,8 +485,7 @@
break;
case CAM_RELEASE_DEV: {
if (a_ctrl->bridge_intf.device_hdl == -1) {
- pr_err("%s:%d :Error: link hdl: %d device hdl: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
a_ctrl->bridge_intf.device_hdl,
a_ctrl->bridge_intf.link_hdl);
rc = -EINVAL;
@@ -526,21 +493,19 @@
}
rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
if (rc < 0)
- pr_err("%s:%d :Error: destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "destroying the device hdl");
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.link_hdl = -1;
a_ctrl->bridge_intf.session_hdl = -1;
}
break;
case CAM_QUERY_CAP: {
- struct cam_actuator_query_cap actuator_cap;
+ struct cam_actuator_query_cap actuator_cap = {0};
actuator_cap.slot_info = a_ctrl->id;
if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
sizeof(struct cam_actuator_query_cap))) {
- pr_err("%s:%d :Error: Failed Copy to User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -549,28 +514,25 @@
case CAM_START_DEV: {
rc = cam_actuator_power_up(a_ctrl);
if (rc < 0) {
- pr_err("%s: %d :Error: Actuator Power up failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
goto release_mutex;
}
rc = camera_io_init(&a_ctrl->io_master_info);
if (rc < 0) {
- pr_err("%s:%d :Error: cci_init failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "cci_init failed");
cam_actuator_power_down(a_ctrl);
}
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.init_settings);
if (rc < 0)
- pr_err("%s: %d :Error: Cannot apply Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
/* Delete the request even if the apply is failed */
rc = delete_request(&a_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s:%d Fail in deleting the Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Fail in deleting the Init settings");
rc = -EINVAL;
goto release_mutex;
}
@@ -579,12 +541,10 @@
case CAM_STOP_DEV: {
rc = camera_io_release(&a_ctrl->io_master_info);
if (rc < 0)
- pr_err("%s:%d :Error: Failed in releasing CCI\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
rc = cam_actuator_power_down(a_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Actuator Power down failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
goto release_mutex;
}
}
@@ -594,8 +554,7 @@
ACT_APPLY_SETTINGS_LATER;
rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in actuator Parsing\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Failed in actuator Parsing");
}
if (a_ctrl->act_apply_state ==
@@ -603,14 +562,15 @@
rc = cam_actuator_apply_settings(a_ctrl,
&a_ctrl->i2c_data.init_settings);
if (rc < 0)
- pr_err("%s:%d :Error: Cannot apply Update settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Cannot apply Update settings");
/* Delete the request even if the apply is failed */
rc = delete_request(&a_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Deleting the Init Pkt: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in Deleting the Init Pkt: %d",
+ rc);
goto release_mutex;
}
}
@@ -619,8 +579,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d Invalid Opcode %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code);
}
release_mutex:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 3835680..7eba9d1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -14,6 +14,7 @@
#include "cam_req_mgr_dev.h"
#include "cam_actuator_soc.h"
#include "cam_actuator_core.h"
+#include "cam_trace.h"
static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
@@ -27,8 +28,7 @@
rc = cam_actuator_driver_cmd(a_ctrl, arg);
break;
default:
- pr_err("%s:%d Invalid ioctl cmd\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd");
rc = -EINVAL;
break;
}
@@ -42,14 +42,14 @@
struct cam_actuator_ctrl_t *a_ctrl;
if (client == NULL || id == NULL) {
- pr_err("%s:%d: :Error: Invalid Args client: %pK id: %pK\n",
- __func__, __LINE__, client, id);
+ CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
+ client, id);
return -EINVAL;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- pr_err("%s %s :Error: i2c_check_functionality failed\n",
- __func__, client->name);
+ CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed",
+ client->name);
rc = -EFAULT;
return rc;
}
@@ -76,12 +76,11 @@
INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
/* Initialize sensor device type */
- a_ctrl->of_node = client->dev.of_node;
a_ctrl->io_master_info.master_type = I2C_MASTER;
rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
if (rc < 0) {
- pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+ CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
goto free_mem;
}
@@ -100,7 +99,7 @@
a_ctrl = platform_get_drvdata(pdev);
if (!a_ctrl) {
- pr_err("%s: Actuator device is NULL\n", __func__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return 0;
}
@@ -120,7 +119,7 @@
/* Handle I2C Devices */
if (!a_ctrl) {
- pr_err("%s: Actuator device is NULL\n", __func__);
+ CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return -EINVAL;
}
/*Free Allocated Mem */
@@ -139,7 +138,8 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy from user_ptr=%pK size=%zu\n",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -149,21 +149,21 @@
cmd = VIDIOC_CAM_CONTROL;
rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
if (rc < 0) {
- pr_err("%s:%d Failed in actuator suddev handling",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed in actuator suddev handling");
return rc;
}
break;
default:
- pr_err("%s:%d Invalid compat ioctl: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_ACTUATOR,
+ "Failed to copy to user_ptr=%pK size=%zu\n",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -203,11 +203,9 @@
if (!a_ctrl)
return -ENOMEM;
- /* Initialize actuator device type */
- a_ctrl->of_node = pdev->dev.of_node;
-
/*fill in platform device*/
a_ctrl->v4l2_dev_str.pdev = pdev;
+ a_ctrl->soc_info.pdev = pdev;
a_ctrl->io_master_info.master_type = CCI_MASTER;
@@ -230,8 +228,7 @@
rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
if (rc < 0) {
- pr_err("%s:%d :Error: Paring actuator dt failed rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
goto free_ctrl;
}
@@ -254,11 +251,18 @@
rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_ACTUATOR, "Fail with cam_register_subdev");
goto free_mem;
}
+ rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
+ NULL, NULL);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Requesting Platform Resources failed rc %d", rc);
+ goto free_ctrl;
+ }
+
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.ops.get_dev_info =
cam_actuator_publish_dev_info;
@@ -310,14 +314,13 @@
rc = platform_driver_register(&cam_actuator_platform_driver);
if (rc < 0) {
- pr_err("%s platform_driver_register failed rc = %d",
- __func__, rc);
+ CAM_ERR(CAM_ACTUATOR,
+ "platform_driver_register failed rc = %d", rc);
return rc;
}
rc = i2c_add_driver(&cam_actuator_driver_i2c);
if (rc)
- pr_err("%s:%d :Error: i2c_add_driver failed rc = %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 22ef29e..fdf881f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -32,7 +32,8 @@
#include <cam_sensor_cmn_header.h>
#include <cam_subdev.h>
#include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -40,13 +41,6 @@
#define TRUE 1
#define FALSE 0
-#undef CDBG
-#ifdef CAM_SENSOR_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
#define ACTUATOR_DRIVER_I2C "i2c_actuator"
#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
@@ -60,18 +54,6 @@
};
/**
- * struct cam_actuator_vreg
- * @cam_vreg: Regulator structure
- * @data: Regulator data
- * @num_vreg: Number of regulators
- */
-struct cam_actuator_vreg {
- struct camera_vreg_t *cam_vreg;
- void *data[MSM_ACTUATOR_MAX_VREGS];
- int num_vreg;
-};
-
-/**
* struct intf_params
* @device_hdl: Device Handle
* @session_hdl: Session Handle
@@ -107,18 +89,16 @@
struct i2c_driver *i2c_driver;
enum cci_i2c_master_t cci_i2c_master;
struct camera_io_master io_master_info;
+ struct cam_hw_soc_info soc_info;
struct mutex actuator_mutex;
uint32_t id;
enum msm_actuator_state_t act_apply_state;
- struct cam_actuator_vreg vreg_cfg;
- struct msm_camera_gpio_conf *gconf;
- struct msm_pinctrl_info pinctrl_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
uint8_t cam_pinctrl_status;
struct cam_subdev v4l2_dev_str;
struct i2c_data_settings i2c_data;
struct cam_actuator_query_cap act_info;
struct intf_params bridge_intf;
- struct device_node *of_node;
char device_name[20];
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 767f3b0..ddc89a8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -10,68 +10,66 @@
* GNU General Public License for more details.
*/
-#include "cam_actuator_soc.h"
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <cam_sensor_cmn_header.h>
#include <cam_sensor_util.h>
#include <cam_sensor_io.h>
#include <cam_req_mgr_util.h>
+#include "cam_actuator_soc.h"
+#include "cam_soc_util.h"
int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
struct device *dev)
{
int32_t rc = 0;
- struct cam_actuator_vreg *vreg_cfg;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info->pdev) {
+ CAM_ERR(CAM_ACTUATOR, "soc_info is not initialized");
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
/* Initialize mutex */
mutex_init(&(a_ctrl->actuator_mutex));
- rc = of_property_read_u32(a_ctrl->of_node, "cell-index",
- &(a_ctrl->id));
- CDBG("cell-index %d, rc %d\n", a_ctrl->id, rc);
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s:%d :Error: parsing dt for cellindex rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc);
return rc;
}
-
- rc = of_property_read_u32(a_ctrl->of_node, "qcom,cci-master",
+ rc = of_property_read_u32(of_node, "cci-master",
&(a_ctrl->cci_i2c_master));
- CDBG("qcom,cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+ CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
+ a_ctrl->cci_i2c_master, rc);
if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
- pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
- __func__, __LINE__, a_ctrl->cci_i2c_master);
+ CAM_ERR(CAM_ACTUATOR, "Wrong info from dt CCI master as : %d",
+ a_ctrl->cci_i2c_master);
return rc;
}
- if (of_find_property(a_ctrl->of_node,
- "qcom,cam-vreg-name", NULL)) {
- vreg_cfg = &(a_ctrl->vreg_cfg);
- rc = cam_sensor_get_dt_vreg_data(dev->of_node,
- &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
- if (rc < 0) {
- pr_err("%s:%d :Error: parsing regulator dt: %d\n",
- __func__, __LINE__, rc);
- return rc;
- }
- }
- rc = msm_sensor_driver_get_gpio_data(&(a_ctrl->gconf),
- a_ctrl->of_node);
- if (rc < 0) {
- pr_err("%s:%d No/Error Actuator GPIOs\n",
- __func__, __LINE__);
- } else {
- a_ctrl->cam_pinctrl_status = 1;
- rc = msm_camera_pinctrl_init(
- &(a_ctrl->pinctrl_info), dev);
- if (rc < 0) {
- pr_err("ERR:%s: Error in reading actuator pinctrl\n",
- __func__);
- a_ctrl->cam_pinctrl_status = 0;
- rc = 0;
- }
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+ rc = 0;
+ return rc;
}
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &a_ctrl->gpio_num_info);
+
+ if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
+ CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
+ return -EINVAL;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
index 57dfed5..ba81259 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
@@ -3,5 +3,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 746b786..975b301 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -33,7 +33,7 @@
num_bytes = 4;
break;
default:
- pr_err("%s: %d failed: %d\n", __func__, __LINE__, type);
+ CAM_ERR(CAM_CCI, "failed: %d", type);
num_bytes = 0;
break;
}
@@ -44,14 +44,17 @@
enum cci_i2c_master_t master)
{
int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- cam_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+ cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR);
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
if (rc < 0) {
- pr_err("%s:%d wait failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait failed");
} else if (rc == 0) {
- pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait timeout");
/* Set reset pending flag to TRUE */
cci_dev->cci_master_info[master].reset_pending = TRUE;
@@ -59,18 +62,17 @@
/* Set proper mask to RESET CMD address based on MASTER */
if (master == MASTER_0)
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
else
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
/* wait for reset done irq */
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].reset_complete,
CCI_TIMEOUT);
if (rc <= 0)
- pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
- rc);
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
}
}
@@ -82,40 +84,42 @@
int32_t rc = 0;
uint32_t read_val = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
- __func__, __LINE__, read_val, len,
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
if ((read_val + len + 1) > cci_dev->
cci_i2c_queue_info[master][queue].max_queue_size) {
uint32_t reg_val = 0;
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
- CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
cam_io_w_mb(report_val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
- CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
- __func__, __LINE__, read_val, queue);
- cam_io_w_mb(read_val, cci_dev->base +
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
+ read_val, queue);
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
reg_val = 1 << ((master * 2) + queue);
- CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "CCI_QUEUE_START_ADDR");
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
- CDBG("%s line %d wait_for_completion_timeout\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].report_q[queue], CCI_TIMEOUT);
if (rc <= 0) {
- pr_err("%s: wait_for_completion_timeout %d\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Wait_for_completion_timeout %d");
if (rc == 0)
rc = -ETIMEDOUT;
cam_cci_flush_queue(cci_dev, master);
@@ -123,7 +127,7 @@
}
rc = cci_dev->cci_master_info[master].status;
if (rc < 0)
- pr_err("%s failed rc %d\n", __func__, rc);
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
}
return rc;
@@ -136,21 +140,24 @@
{
int32_t rc = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if (!cci_dev) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Failed");
return -EINVAL;
}
rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
if (rc < 0) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Failed %d", rc);
return rc;
}
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
- __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
return rc;
}
@@ -177,45 +184,44 @@
uint32_t reg_offset = 0;
/* CCI Top Registers */
- CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "****CCI TOP Registers ****");
for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
reg_offset = DEBUG_TOP_REG_START + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CAM_DBG(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
/* CCI Master registers */
- CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
- __func__, __LINE__, master);
+ CAM_DBG(CAM_CCI, "****CCI MASTER %d Registers ****",
+ master);
for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
if (i == 6)
continue;
reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CAM_DBG(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
/* CCI Master Queue registers */
- CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
- __func__, __LINE__, master, queue);
+ CAM_DBG(CAM_CCI, " **** CCI MASTER%d QUEUE%d Registers ****",
+ master, queue);
for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 +
queue*0x100 + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CAM_DBG(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
/* CCI Interrupt registers */
- CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_CCI, " ****CCI Interrupt Registers ****");
for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
reg_offset = DEBUG_INTR_REG_START + i * 4;
read_val = cam_io_r_mb(cci_dev->base + reg_offset);
- CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
- __func__, __LINE__, reg_offset, read_val);
+ CAM_DBG(CAM_CCI, "offset = 0x%X value = 0x%X",
+ reg_offset, read_val);
}
}
#endif
@@ -227,21 +233,19 @@
int32_t rc = 0;
if (!cci_dev) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed");
return -EINVAL;
}
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].report_q[queue], CCI_TIMEOUT);
- CDBG("%s line %d wait DONE_for_completion_timeout\n",
- __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "wait DONE_for_completion_timeout");
if (rc <= 0) {
#ifdef DUMP_CCI_REGISTERS
cam_cci_dump_registers(cci_dev, master, queue);
#endif
- pr_err("%s: %d wait for queue: %d\n",
- __func__, __LINE__, queue);
+ CAM_ERR(CAM_CCI, "wait for queue: %d", queue);
if (rc == 0)
rc = -ETIMEDOUT;
cam_cci_flush_queue(cci_dev, master);
@@ -249,7 +253,7 @@
}
rc = cci_dev->cci_master_info[master].status;
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
@@ -260,21 +264,23 @@
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
uint32_t reg_offset = master * 0x200 + queue * 0x100;
- uint32_t read_val = cam_io_r_mb(cci_dev->base +
+ uint32_t read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
- CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
- __func__, __LINE__, read_val);
+ CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
cam_io_w_mb(report_val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
read_val++;
- CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
- __func__, __LINE__, read_val);
- cam_io_w_mb(read_val, cci_dev->base +
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
@@ -282,12 +288,16 @@
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
+
uint32_t reg_val = 1 << ((master * 2) + queue);
cam_cci_load_report_cmd(cci_dev, master, queue);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
return cam_cci_wait(cci_dev, master, queue);
@@ -302,12 +312,12 @@
if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed line %d");
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
} else {
@@ -315,17 +325,17 @@
done_pending[queue], 1);
rc = cam_cci_wait(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Failed rc %d", rc);
return rc;
}
}
@@ -339,11 +349,13 @@
{
uint32_t read_val = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
- __func__, __LINE__, read_val,
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d", read_val,
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
return (cci_dev->
cci_i2c_queue_info[master][queue].max_queue_size) -
@@ -354,12 +366,15 @@
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
uint32_t reg_val = 1 << ((master * 2) + queue);
if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
cam_cci_load_report_cmd(cci_dev, master, queue);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
- cam_io_w_mb(reg_val, cci_dev->base +
+ cam_io_w_mb(reg_val, base +
CCI_QUEUE_START_ADDR);
}
}
@@ -375,13 +390,13 @@
done_pending[queue], 1);
rc = cam_cci_wait(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
} else {
rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
}
@@ -402,7 +417,7 @@
uint32_t size = cmd_size;
if (!cci_dev || !c_ctrl) {
- pr_err("%s: failed %d", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed");
return -EINVAL;
}
@@ -435,8 +450,7 @@
}
if (len > cci_dev->payload_size) {
- pr_err("%s: %d Len error: %d",
- __func__, __LINE__, len);
+ CAM_ERR(CAM_CCI, "Len error: %d", len);
return -EINVAL;
}
@@ -453,60 +467,61 @@
if (clk) {
cycles_per_us = ((clk/1000)*256)/1000;
} else {
- pr_err("%s:%d, failed: Can use default: %d",
- __func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+ CAM_ERR(CAM_CCI, "failed: Can use default: %d",
+ CYCLES_PER_MICRO_SEC_DEFAULT);
cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
}
return cycles_per_us;
}
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
struct cam_cci_ctrl *c_ctrl)
+
{
- uint32_t j;
- int32_t idx;
+ int32_t src_clk_idx, j;
uint32_t cci_clk_src;
unsigned long clk;
struct cam_cci_clk_params_t *clk_params = NULL;
- struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+
enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
if (i2c_freq_mode >= I2C_MAX_MODES ||
i2c_freq_mode < I2C_STANDARD_MODE) {
- pr_err("%s:%d Invalid frequency mode: %d\n",
- __func__, __LINE__, (int32_t)i2c_freq_mode);
- return NULL;
+ CAM_ERR(CAM_CCI, "Invalid frequency mode: %d",
+ (int32_t)i2c_freq_mode);
+ cci_dev->clk_level_index = -1;
+ return;
}
clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
cci_clk_src = clk_params->cci_clk_src;
- idx = of_property_match_string(of_node,
- "clock-names", CCI_CLK_SRC_NAME);
- if (idx < 0) {
+ src_clk_idx = soc_info->src_clk_idx;
+
+ if (src_clk_idx < 0) {
cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
- return cci_dev->cci_clk_rates[0];
+ cci_dev->clk_level_index = 0;
+ return;
}
if (cci_clk_src == 0) {
- clk = cci_dev->cci_clk_rates[0][idx];
+ clk = soc_info->clk_rate[0][src_clk_idx];
cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
- return cci_dev->cci_clk_rates[0];
+ cci_dev->clk_level_index = 0;
+ return;
}
- CDBG("%s:%d CCI: 3 cases:%d idx: %d\n", __func__,
- __LINE__, (int32_t)cci_dev->num_clk_cases, idx);
- for (j = 0; j < cci_dev->num_clk_cases; j++) {
- clk = cci_dev->cci_clk_rates[j][idx];
+ for (j = 0; j < CAM_MAX_VOTE; j++) {
+ clk = soc_info->clk_rate[j][src_clk_idx];
if (clk == cci_clk_src) {
cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
- cci_dev->cci_clk_src = cci_clk_src;
- return cci_dev->cci_clk_rates[j];
+ cci_dev->clk_level_index = j;
+ return;
}
}
-
- return NULL;
+ return;
}
static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
@@ -515,10 +530,12 @@
struct cam_cci_clk_params_t *clk_params = NULL;
enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
- pr_err("%s:%d invalid i2c_freq_mode = %d",
- __func__, __LINE__, i2c_freq_mode);
+ CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
return -EINVAL;
}
@@ -529,33 +546,33 @@
if (master == MASTER_0) {
cam_io_w_mb(clk_params->hw_thigh << 16 |
clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+ base + CCI_I2C_M0_SCL_CTL_ADDR);
cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_0_ADDR);
cam_io_w_mb(clk_params->hw_thd_dat << 16 |
clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_1_ADDR);
cam_io_w_mb(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ base + CCI_I2C_M0_SDA_CTL_2_ADDR);
cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ base + CCI_I2C_M0_MISC_CTL_ADDR);
} else if (master == MASTER_1) {
cam_io_w_mb(clk_params->hw_thigh << 16 |
clk_params->hw_tlow,
- cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+ base + CCI_I2C_M1_SCL_CTL_ADDR);
cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
clk_params->hw_tsu_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_0_ADDR);
cam_io_w_mb(clk_params->hw_thd_dat << 16 |
clk_params->hw_thd_sta,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_1_ADDR);
cam_io_w_mb(clk_params->hw_tbuf,
- cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ base + CCI_I2C_M1_SDA_CTL_2_ADDR);
cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
- cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ base + CCI_I2C_M1_MISC_CTL_ADDR);
}
cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
@@ -576,36 +593,38 @@
uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
uint32_t read_val = 0, reg_offset, val, delay = 0;
uint32_t max_queue_size, queue_size = 0, cmd = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
if (i2c_cmd == NULL) {
- pr_err("%s:%d Failed line\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "Failed: i2c cmd is NULL");
return -EINVAL;
}
if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
- pr_err("%s:%d failed: invalid cmd_size %d\n",
- __func__, __LINE__, cmd_size);
+ CAM_ERR(CAM_CCI, "failed: invalid cmd_size %d",
+ cmd_size);
return -EINVAL;
}
- CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+ CAM_DBG(CAM_CCI, "addr type %d data type %d cmd_size %d",
i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s:%d failed: invalid addr_type 0x%X\n",
- __func__, __LINE__, i2c_msg->addr_type);
+ CAM_ERR(CAM_CCI, "failed: invalid addr_type 0x%X",
+ i2c_msg->addr_type);
return -EINVAL;
}
if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s:%d failed: invalid data_type 0x%X\n",
- __func__, __LINE__, i2c_msg->data_type);
+ CAM_ERR(CAM_CCI, "failed: invalid data_type 0x%X",
+ i2c_msg->data_type);
return -EINVAL;
}
reg_offset = master * 0x200 + queue * 0x100;
cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
- cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+ base + CCI_SET_CID_SYNC_TIMER_ADDR +
cci_dev->cci_wait_sync_cfg.csid *
CCI_SET_CID_SYNC_TIMER_OFFSET);
@@ -613,10 +632,10 @@
c_ctrl->cci_info->retries << 16 |
c_ctrl->cci_info->id_map << 18;
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
- __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
@@ -635,13 +654,13 @@
val = CCI_I2C_WAIT_SYNC_CMD |
((cci_dev->cci_wait_sync_cfg.line) << 4);
cam_io_w_mb(val,
- cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
}
rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed line %d", rc);
return rc;
}
@@ -651,22 +670,21 @@
len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
i2c_cmd, &pack);
if (len <= 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed");
return -EINVAL;
}
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
- CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
- __func__, __LINE__, read_val, len, max_queue_size);
+ CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
+ read_val, len, max_queue_size);
/* + 1 - space alocation for Report CMD */
if ((read_val + len + 1) > queue_size) {
if ((read_val + len + 1) > max_queue_size) {
rc = cam_cci_process_full_q(cci_dev,
master, queue);
if (rc < 0) {
- pr_err("%s failed line %d\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
return rc;
}
continue;
@@ -674,7 +692,7 @@
cam_cci_process_half_q(cci_dev, master, queue);
}
- CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+ CAM_DBG(CAM_CCI, "cmd_size %d addr 0x%x data 0x%x",
cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
delay = i2c_cmd->delay;
i = 0;
@@ -736,20 +754,21 @@
}
len = ((i-1)/4) + 1;
- read_val = cam_io_r_mb(cci_dev->base +
+ read_val = cam_io_r_mb(base +
CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
for (h = 0, k = 0; h < len; h++) {
cmd = 0;
for (j = 0; (j < 4 && k < i); j++)
cmd |= (data[k++] << (j * 8));
- CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
- __func__, cmd, queue, len, read_val);
- cam_io_w_mb(cmd, cci_dev->base +
+ CAM_DBG(CAM_CCI,
+ "LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d",
+ cmd, queue, len, read_val);
+ cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
read_val += 1;
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
@@ -759,20 +778,20 @@
0x100);
cmd <<= 4;
cmd |= CCI_I2C_WAIT_CMD;
- CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
- __func__, cmd);
- cam_io_w_mb(cmd, cci_dev->base +
+ CAM_DBG(CAM_CCI,
+ "CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x", cmd);
+ cam_io_w_mb(cmd, base +
CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
master * 0x200 + queue * 0x100);
read_val += 1;
- cam_io_w_mb(read_val, cci_dev->base +
+ cam_io_w_mb(read_val, base +
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
}
}
rc = cam_cci_transfer_end(cci_dev, master, queue);
if (rc < 0) {
- pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
return rc;
}
@@ -791,6 +810,8 @@
enum cci_i2c_queue_t queue = QUEUE_1;
struct cci_device *cci_dev = NULL;
struct cam_cci_read_cfg *read_cfg = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
cci_dev = v4l2_get_subdevdata(sd);
master = c_ctrl->cci_info->cci_i2c_master;
@@ -798,9 +819,13 @@
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
+
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
/*
@@ -811,8 +836,7 @@
/* Set the I2C Frequency */
rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
goto rel_mutex;
}
@@ -825,25 +849,22 @@
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
master, queue);
if (rc < 0) {
- pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
- __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
goto rel_mutex;
}
if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
- pr_err("%s:%d More than max retries\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "More than max retries");
goto rel_mutex;
}
if (read_cfg->data == NULL) {
- pr_err("%s:%d Data ptr is NULL\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "Data ptr is NULL");
goto rel_mutex;
}
- CDBG("%s master %d, queue %d\n", __func__, master, queue);
- CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
c_ctrl->cci_info->id_map);
val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
@@ -851,19 +872,20 @@
c_ctrl->cci_info->id_map << 18;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_LOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+ read_cfg->addr_type);
rc = -EINVAL;
goto rel_mutex;
}
@@ -876,34 +898,33 @@
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
val = CCI_I2C_UNLOCK_CMD;
rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
- CDBG("%s failed line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "failed rc: %d", rc);
goto rel_mutex;
}
- val = cam_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
- CDBG("%s cur word cnt 0x%x\n", __func__, val);
- cam_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+ cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ master * 0x200 + queue * 0x100);
val = 1 << ((master * 2) + queue);
- cam_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
- CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
- __LINE__);
+ cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+ CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].reset_complete, CCI_TIMEOUT);
@@ -913,41 +934,39 @@
#endif
if (rc == 0)
rc = -ETIMEDOUT;
- pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
cam_cci_flush_queue(cci_dev, master);
goto rel_mutex;
} else {
rc = 0;
}
- read_words = cam_io_r_mb(cci_dev->base +
+ read_words = cam_io_r_mb(base +
CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
exp_words = ((read_cfg->num_byte / 4) + 1);
if (read_words != exp_words) {
- pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
- __LINE__, read_words, exp_words);
+ CAM_ERR(CAM_CCI, "read_words = %d, exp words = %d",
+ read_words, exp_words);
memset(read_cfg->data, 0, read_cfg->num_byte);
rc = -EINVAL;
goto rel_mutex;
}
index = 0;
- CDBG("%s index %d num_type %d\n", __func__, index,
- read_cfg->num_byte);
+ CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
first_byte = 0;
do {
- val = cam_io_r_mb(cci_dev->base +
+ val = cam_io_r_mb(base +
CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
- CDBG("%s read val 0x%x\n", __func__, val);
+ CAM_DBG(CAM_CCI, "read val 0x%x", val);
for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
- CDBG("%s i %d index %d\n", __func__, i, index);
+ CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
if (!first_byte) {
- CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+ CAM_DBG(CAM_CCI, "sid 0x%x", val & 0xFF);
first_byte++;
} else {
read_cfg->data[index] =
(val >> (i * 8)) & 0xFF;
- CDBG("%s data[%d] 0x%x\n", __func__, index,
+ CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
read_cfg->data[index]);
index++;
}
@@ -970,20 +989,19 @@
cci_dev = v4l2_get_subdevdata(sd);
if (cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid cci state %d\n",
- __func__, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "invalid cci state %d",
+ cci_dev->cci_state);
return -EINVAL;
}
master = c_ctrl->cci_info->cci_i2c_master;
- CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
c_ctrl->cci_info->id_map);
/* Set the I2C Frequency */
rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
return rc;
}
/*
@@ -995,18 +1013,17 @@
cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
master, queue);
if (rc < 0) {
- pr_err("%s:%d Initial validataion failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Initial validataion failed rc %d",
+ rc);
return rc;
}
if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
- pr_err("%s:%d More than max retries\n", __func__,
- __LINE__);
+ CAM_ERR(CAM_CCI, "More than max retries");
return rc;
}
rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
if (rc < 0) {
- pr_err("%s failed line %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
return rc;
}
@@ -1033,7 +1050,7 @@
&write_async->c_ctrl, write_async->queue, write_async->sync_en);
mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
if (rc < 0)
- pr_err("%s: %d failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "failed rc: %d", rc);
kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
kfree(write_async);
@@ -1074,7 +1091,7 @@
kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
cci_i2c_write_cfg->size, GFP_KERNEL);
if (!cci_i2c_write_cfg_w->reg_setting) {
- pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Couldn't allocate memory");
kfree(write_async);
return -ENOMEM;
}
@@ -1104,35 +1121,33 @@
uint16_t read_bytes = 0;
if (!sd || !c_ctrl) {
- pr_err("%s:%d sd %pK c_ctrl %pK\n", __func__,
- __LINE__, sd, c_ctrl);
+ CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
return -EINVAL;
}
if (!c_ctrl->cci_info) {
- pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "cci_info NULL");
return -EINVAL;
}
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev) {
- pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "cci_dev NULL");
return -EINVAL;
}
if (cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid cci state %d\n",
- __func__, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
return -EINVAL;
}
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
master = c_ctrl->cci_info->cci_i2c_master;
read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
- pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "read num bytes 0");
rc = -EINVAL;
goto ERROR;
}
@@ -1145,7 +1160,7 @@
read_cfg->num_byte = read_bytes;
rc = cam_cci_read(sd, c_ctrl);
if (rc < 0) {
- pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "failed rc %d", rc);
goto ERROR;
}
if (read_bytes > CCI_READ_MAX) {
@@ -1169,8 +1184,8 @@
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
@@ -1189,8 +1204,7 @@
rc = cam_cci_soc_release(cci_dev);
if (rc < 0) {
- pr_err("%s:%d Failed in releasing the cci: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Failed in releasing the cci: %d", rc);
cam_cpas_stop(cci_dev->cpas_handle);
return rc;
}
@@ -1210,8 +1224,8 @@
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
@@ -1220,7 +1234,7 @@
if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
|| c_ctrl->cci_info->cci_i2c_master < 0) {
- pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Invalid I2C master addr");
return -EINVAL;
}
@@ -1268,8 +1282,7 @@
{
int32_t rc = 0;
- CDBG("%s line %d cmd %d\n", __func__, __LINE__,
- cci_ctrl->cmd);
+ CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd);
switch (cci_ctrl->cmd) {
case MSM_CCI_INIT:
rc = cam_cci_init(sd, cci_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
index f6e82dc..a28d5d8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -23,7 +23,7 @@
*
* This API gets CCI clk rates
*/
-uint32_t *cam_cci_get_clk_rates(struct cci_device *cci_dev,
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
struct cam_cci_ctrl *c_ctrl);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 6764b8a..dad02bf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -37,8 +37,7 @@
case VIDIOC_CAM_CONTROL:
break;
default:
- pr_err("%s:%d Invalid ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd);
rc = -ENOIOCTLCMD;
}
@@ -49,10 +48,13 @@
{
uint32_t irq;
struct cci_device *cci_dev = data;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
+ void __iomem *base = soc_info->reg_map[0].mem_base;
- irq = cam_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
- cam_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ irq = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+ cam_io_w_mb(irq, base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
@@ -123,24 +125,24 @@
if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
- pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+ CAM_ERR(CAM_CCI, "MASTER_0 error 0x%x", irq);
cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
- cci_dev->base + CCI_HALT_REQ_ADDR);
+ base + CCI_HALT_REQ_ADDR);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
- pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+ CAM_ERR(CAM_CCI, "MASTER_1 error 0x%x", irq);
cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
- cci_dev->base + CCI_HALT_REQ_ADDR);
+ base + CCI_HALT_REQ_ADDR);
}
return IRQ_HANDLED;
}
@@ -150,8 +152,10 @@
{
struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
irqreturn_t ret;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
- ret = cam_cci_irq(cci_dev->irq->start, cci_dev);
+ ret = cam_cci_irq(soc_info->irq_line->start, cci_dev);
*handled = TRUE;
return 0;
}
@@ -171,6 +175,7 @@
{
struct cam_cpas_register_params cpas_parms;
struct cci_device *new_cci_dev;
+ struct cam_hw_soc_info *soc_info = NULL;
int rc = 0;
new_cci_dev = kzalloc(sizeof(struct cci_device),
@@ -178,12 +183,15 @@
if (!new_cci_dev)
return -ENOMEM;
+ soc_info = &new_cci_dev->soc_info;
+
new_cci_dev->v4l2_dev_str.pdev = pdev;
+ soc_info->pdev = pdev;
+
rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
if (rc < 0) {
- pr_err("%s: %d Resource get Failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CCI, "Resource get Failed: %d", rc);
goto cci_no_resource;
}
@@ -204,8 +212,7 @@
rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :Error: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "Fail with cam_register_subdev");
goto cci_no_resource;
}
@@ -220,10 +227,10 @@
strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("%s:%d CPAS registration failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "CPAS registration failed");
goto cci_no_resource;
}
- CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
new_cci_dev->cpas_handle = cpas_parms.client_handle;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 996fc62..cb01c6c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -31,12 +31,13 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <cam_sensor_cmn_header.h>
-#include <cam_sensor_soc_api.h>
#include <cam_io_util.h>
#include <cam_sensor_util.h>
#include <cam_subdev.h>
#include <cam_cpas_api.h>
#include "cam_cci_hwreg.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define V4L2_IDENT_CCI 50005
#define CCI_I2C_QUEUE_0_SIZE 128
@@ -80,16 +81,6 @@
#define PRIORITY_QUEUE (QUEUE_0)
#define SYNC_QUEUE (QUEUE_1)
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-#undef CCI_DBG
-#ifdef MSM_CCI_DEBUG
-#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
enum cci_i2c_sync {
MSM_SYNC_DISABLE,
MSM_SYNC_ENABLE,
@@ -206,36 +197,25 @@
*/
struct cci_device {
struct v4l2_subdev subdev;
- struct resource *irq;
- void __iomem *base;
+ struct cam_hw_soc_info soc_info;
uint32_t hw_version;
uint8_t ref_count;
enum cam_cci_state_t cci_state;
- size_t num_clk;
- struct clk **cci_clk;
- struct msm_cam_clk_info *cci_clk_info;
struct cam_cci_i2c_queue_info
cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
struct cam_cci_master_info cci_master_info[NUM_MASTERS];
enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
- struct gpio *cci_gpio_tbl;
- uint8_t cci_gpio_tbl_size;
struct msm_pinctrl_info cci_pinctrl;
uint8_t cci_pinctrl_status;
- uint32_t cci_clk_src;
- struct camera_vreg_t *cci_vreg;
- struct regulator *cci_reg_ptr[MAX_REGULATOR];
- int32_t regulator_count;
uint8_t support_seq_write;
struct workqueue_struct *write_wq[MASTER_MAX];
struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
uint8_t valid_sync;
struct cam_subdev v4l2_dev_str;
uint32_t cycles_per_us;
+ int32_t clk_level_index;
uint8_t payload_size;
- size_t num_clk_cases;
- uint32_t **cci_clk_rates;
char device_name[20];
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 59cdfaa..83cb49e3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -13,59 +13,42 @@
#include "cam_cci_dev.h"
#include "cam_cci_core.h"
-static int32_t cam_cci_pinctrl_init(struct cci_device *cci_dev)
-{
- struct msm_pinctrl_info *cci_pctrl = NULL;
-
- cci_pctrl = &cci_dev->cci_pinctrl;
- cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->v4l2_dev_str.pdev->dev);
- if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
- pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- cci_pctrl->gpio_state_active = pinctrl_lookup_state(
- cci_pctrl->pinctrl,
- CCI_PINCTRL_STATE_DEFAULT);
- if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
- pr_err("%s:%d look up state for active state failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
- cci_pctrl->pinctrl,
- CCI_PINCTRL_STATE_SLEEP);
- if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
- pr_err("%s:%d look up state for suspend state failed\n",
- __func__, __LINE__);
- return -EINVAL;
- }
- return 0;
-}
-
int cam_cci_init(struct v4l2_subdev *sd,
struct cam_cci_ctrl *c_ctrl)
{
uint8_t i = 0, j = 0;
- int32_t rc = 0, ret = 0;
+ int32_t rc = 0;
struct cci_device *cci_dev;
enum cci_i2c_master_t master = MASTER_0;
- uint32_t *clk_rates = NULL;
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
cci_dev = v4l2_get_subdevdata(sd);
if (!cci_dev || !c_ctrl) {
- pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
- __LINE__, cci_dev, c_ctrl);
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ cci_dev, c_ctrl);
rc = -EINVAL;
return rc;
}
+ soc_info = &cci_dev->soc_info;
+ base = soc_info->reg_map[0].mem_base;
+
+ if (!soc_info || !base) {
+ CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+ soc_info, base);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ CAM_DBG(CAM_CCI, "Base address %pK", base);
+
if (cci_dev->ref_count++) {
- CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+ CAM_DBG(CAM_CCI, "ref_count %d", cci_dev->ref_count);
master = c_ctrl->cci_info->cci_i2c_master;
- CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+ CAM_DBG(CAM_CCI, "master %d", master);
if (master < MASTER_MAX && master >= 0) {
mutex_lock(&cci_dev->cci_master_info[master].mutex);
flush_workqueue(cci_dev->write_wq[master]);
@@ -80,18 +63,17 @@
/* Set proper mask to RESET CMD address */
if (master == MASTER_0)
cam_io_w_mb(CCI_M0_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
else
cam_io_w_mb(CCI_M1_RESET_RMSK,
- cci_dev->base + CCI_RESET_CMD_ADDR);
+ base + CCI_RESET_CMD_ADDR);
/* wait for reset done irq */
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[master].
reset_complete,
CCI_TIMEOUT);
if (rc <= 0)
- pr_err("%s:%d wait failed %d\n", __func__,
- __LINE__, rc);
+ CAM_ERR(CAM_CCI, "wait failed %d", rc);
mutex_unlock(&cci_dev->cci_master_info[master].mutex);
}
return 0;
@@ -105,80 +87,27 @@
rc = cam_cpas_start(cci_dev->cpas_handle,
&ahb_vote, &axi_vote);
if (rc != 0) {
- pr_err("%s:%d CPAS start failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "CPAS start failed");
}
-
- ret = cam_cci_pinctrl_init(cci_dev);
- if (ret < 0) {
- pr_err("%s:%d Initialization of pinctrl failed\n",
- __func__, __LINE__);
- cci_dev->cci_pinctrl_status = 0;
- } else {
- cci_dev->cci_pinctrl_status = 1;
- }
- rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 1);
- if (cci_dev->cci_pinctrl_status) {
- ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_active);
- if (ret)
- pr_err("%s:%d cannot set pin to active state\n",
- __func__, __LINE__);
- }
- if (rc < 0) {
- CDBG("%s: request gpio failed\n", __func__);
- goto request_gpio_failed;
- }
-
- rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
- goto clk_enable_failed;
- }
-
- rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
- goto reg_enable_failed;
- }
-
- clk_rates = cam_cci_get_clk_rates(cci_dev, c_ctrl);
- if (!clk_rates) {
- pr_err("%s: clk enable failed\n", __func__);
- goto reg_enable_failed;
- }
-
- for (i = 0; i < cci_dev->num_clk; i++) {
- cci_dev->cci_clk_info[i].clk_rate =
- clk_rates[i];
- }
- rc = msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, true);
- if (rc < 0) {
- pr_err("%s: clk enable failed\n", __func__);
- goto reg_enable_failed;
- }
+ cam_cci_get_clk_rates(cci_dev, c_ctrl);
/* Re-initialize the completion */
reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
for (i = 0; i < NUM_QUEUES; i++)
reinit_completion(&cci_dev->cci_master_info[master].
report_q[i]);
- rc = msm_camera_enable_irq(cci_dev->irq, true);
+
+ /* Enable Regulators and IRQ*/
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, true);
if (rc < 0) {
- pr_err("%s: irq enable failed\n", __func__);
- return -EINVAL;
+ CAM_DBG(CAM_CCI, "request platform resources failed");
+ goto platform_enable_failed;
}
- cci_dev->hw_version = cam_io_r_mb(cci_dev->base +
+
+ cci_dev->hw_version = cam_io_r_mb(base +
CCI_HW_VERSION_ADDR);
- CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
- cci_dev->hw_version);
+ CAM_DBG(CAM_CCI, "hw_version = 0x%x", cci_dev->hw_version);
cci_dev->payload_size =
MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
@@ -195,24 +124,23 @@
max_queue_size =
CCI_I2C_QUEUE_1_SIZE;
- CDBG("CCI Master[%d] :: Q0 size: %d Q1 size: %d\n", i,
+ CAM_DBG(CAM_CCI, "CCI Master[%d] :: Q0 : %d Q1 : %d", i
+ , cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size,
cci_dev->cci_i2c_queue_info[i][j].
- max_queue_size,
- cci_dev->cci_i2c_queue_info[i][j].
- max_queue_size);
+ max_queue_size);
}
}
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
- cam_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+ cam_io_w_mb(CCI_RESET_CMD_RMSK, base +
CCI_RESET_CMD_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+ cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR);
rc = wait_for_completion_timeout(
&cci_dev->cci_master_info[MASTER_0].reset_complete,
CCI_TIMEOUT);
if (rc <= 0) {
- pr_err("%s: wait_for_completion_timeout %d\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CCI, "wait_for_completion_timeout");
if (rc == 0)
rc = -ETIMEDOUT;
goto reset_complete_failed;
@@ -220,14 +148,14 @@
for (i = 0; i < MASTER_MAX; i++)
cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
- cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+ base + CCI_IRQ_MASK_0_ADDR);
cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
- cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
- cam_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ base + CCI_IRQ_CLEAR_0_ADDR);
+ cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
for (i = 0; i < MASTER_MAX; i++) {
if (!cci_dev->write_wq[i]) {
- pr_err("Failed to flush write wq\n");
+ CAM_ERR(CAM_CCI, "Failed to flush write wq");
rc = -ENOMEM;
goto reset_complete_failed;
} else {
@@ -239,25 +167,9 @@
return 0;
reset_complete_failed:
- msm_camera_enable_irq(cci_dev->irq, false);
- msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, false);
-reg_enable_failed:
- msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
-clk_enable_failed:
- if (cci_dev->cci_pinctrl_status) {
- ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_suspend);
- if (ret)
- pr_err("%s:%d cannot set pin to suspend state\n",
- __func__, __LINE__);
- }
- msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 0);
-request_gpio_failed:
+ cam_soc_util_disable_platform_resource(soc_info, 1, 1);
+
+platform_enable_failed:
cci_dev->ref_count--;
cam_cpas_stop(cci_dev->cpas_handle);
@@ -267,12 +179,9 @@
void cam_cci_soc_remove(struct platform_device *pdev,
struct cci_device *cci_dev)
{
- msm_camera_put_clk_info_and_rates(pdev,
- &cci_dev->cci_clk_info, &cci_dev->cci_clk,
- &cci_dev->cci_clk_rates, cci_dev->num_clk_cases,
- cci_dev->num_clk);
+ struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
- msm_camera_put_reg_base(pdev, cci_dev->base, "cci", true);
+ cam_soc_util_release_platform_resource(soc_info);
}
static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
@@ -293,76 +202,6 @@
}
}
-static int32_t cam_cci_init_gpio_params(struct cci_device *cci_dev)
-{
- int32_t rc = 0, i = 0;
- uint32_t *val_array = NULL;
- uint8_t tbl_size = 0;
- struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
- struct gpio *gpio_tbl = NULL;
-
- cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
- CDBG("%s gpio count %d\n", __func__, tbl_size);
- if (!tbl_size) {
- pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
- return -EINVAL;
- }
-
- gpio_tbl = cci_dev->cci_gpio_tbl =
- kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
- if (!gpio_tbl) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- return -EINVAL;
- }
-
- for (i = 0; i < tbl_size; i++) {
- gpio_tbl[i].gpio = of_get_gpio(of_node, i);
- CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
- gpio_tbl[i].gpio);
- }
-
- val_array = kcalloc(tbl_size, sizeof(uint32_t),
- GFP_KERNEL);
- if (!val_array) {
- rc = -ENOMEM;
- goto free_gpio_tbl;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
- val_array, tbl_size);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_val_array;
- }
- for (i = 0; i < tbl_size; i++) {
- gpio_tbl[i].flags = val_array[i];
- CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
- gpio_tbl[i].flags);
- }
-
- for (i = 0; i < tbl_size; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
- CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
- gpio_tbl[i].label);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_val_array;
- }
- }
-
- kfree(val_array);
- return rc;
-
-free_val_array:
- kfree(val_array);
-free_gpio_tbl:
- kfree(cci_dev->cci_gpio_tbl);
- cci_dev->cci_gpio_tbl = NULL;
- cci_dev->cci_gpio_tbl_size = 0;
- return rc;
-}
-
static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
uint8_t index)
{
@@ -403,75 +242,76 @@
src_node = of_find_node_by_name(of_node,
"qcom,i2c_custom_mode");
- rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
- CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+ rc = of_property_read_u32(src_node, "hw-thigh", &val);
+ CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc);
if (!rc) {
cci_dev->cci_clk_params[count].hw_thigh = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+ rc = of_property_read_u32(src_node, "hw-tlow",
&val);
- CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tlow = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+ rc = of_property_read_u32(src_node, "hw-tsu-sto",
&val);
- CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
- __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sto = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+ rc = of_property_read_u32(src_node, "hw-tsu-sta",
&val);
- CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
- __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsu_sta = val;
- rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+ rc = of_property_read_u32(src_node, "hw-thd-dat",
&val);
- CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
- __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_dat = val;
- rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+ rc = of_property_read_u32(src_node, "hw-thd-sta",
&val);
- CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+ CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d",
val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_thd_sta = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+ rc = of_property_read_u32(src_node, "hw-tbuf",
&val);
- CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tbuf = val;
rc = of_property_read_u32(src_node,
- "qcom,hw-scl-stretch-en", &val);
- CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
- __func__, val, rc);
+ "hw-scl-stretch-en", &val);
+ CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
- rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+ rc = of_property_read_u32(src_node, "hw-trdhld",
&val);
- CDBG("%s qcom,hw-trdhld %d, rc %d\n",
- __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d",
+ val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_trdhld = val;
- rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+ rc = of_property_read_u32(src_node, "hw-tsp",
&val);
- CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+ CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc);
}
if (!rc) {
cci_dev->cci_clk_params[count].hw_tsp = val;
val = 0;
- rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+ rc = of_property_read_u32(src_node, "cci-clk-src",
&val);
- CDBG("%s qcom,cci-clk-src %d, rc %d\n",
- __func__, val, rc);
+ CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc);
cci_dev->cci_clk_params[count].cci_clk_src = val;
} else
cam_cci_init_default_clk_params(cci_dev, count);
@@ -484,141 +324,73 @@
struct cci_device *new_cci_dev)
{
int rc = 0, i = 0;
+ struct cam_hw_soc_info *soc_info =
+ &new_cci_dev->soc_info;
- /* Get Clock Info*/
- rc = msm_camera_get_clk_info_and_rates(pdev,
- &new_cci_dev->cci_clk_info, &new_cci_dev->cci_clk,
- &new_cci_dev->cci_clk_rates, &new_cci_dev->num_clk_cases,
- &new_cci_dev->num_clk);
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("%s: cam_cci_get_clk_info() failed", __func__);
- kfree(new_cci_dev);
- new_cci_dev = NULL;
- return -EFAULT;
+ CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc);
+ return -EINVAL;
}
new_cci_dev->ref_count = 0;
- new_cci_dev->base = msm_camera_get_reg_base(pdev, "cci", true);
- if (!new_cci_dev->base) {
- pr_err("%s: no mem resource?\n", __func__);
- return -ENODEV;
- }
- new_cci_dev->irq = msm_camera_get_irq(pdev, "cci");
- if (!new_cci_dev->irq) {
- pr_err("%s: no irq resource?\n", __func__);
- return -ENODEV;
- }
- CDBG("%s line %d cci irq start %d end %d\n", __func__,
- __LINE__,
- (int) new_cci_dev->irq->start,
- (int) new_cci_dev->irq->end);
- rc = msm_camera_register_irq(pdev, new_cci_dev->irq,
- cam_cci_irq, IRQF_TRIGGER_RISING, "cci", new_cci_dev);
- if (rc < 0) {
- pr_err("%s: irq request fail\n", __func__);
- rc = -EBUSY;
- goto cci_release_mem;
- }
- msm_camera_enable_irq(new_cci_dev->irq, false);
+ rc = cam_soc_util_request_platform_resource(soc_info,
+ cam_cci_irq, new_cci_dev);
+ if (rc < 0) {
+ CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
+ return -EINVAL;
+ }
new_cci_dev->v4l2_dev_str.pdev = pdev;
cam_cci_init_cci_params(new_cci_dev);
cam_cci_init_clk_params(new_cci_dev);
- rc = cam_cci_init_gpio_params(new_cci_dev);
- if (rc < 0) {
- pr_err("%s:%d :Error: In Initializing GPIO params:%d\n",
- __func__, __LINE__, rc);
- goto cci_release_mem;
- }
-
- rc = cam_sensor_get_dt_vreg_data(new_cci_dev->
- v4l2_dev_str.pdev->dev.of_node,
- &(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
- if (rc < 0) {
- pr_err("%s: cam_sensor_get_dt_vreg_data fail\n", __func__);
- rc = -EFAULT;
- goto cci_release_mem;
- }
-
- /* Parse VREG data */
- if ((new_cci_dev->regulator_count < 0) ||
- (new_cci_dev->regulator_count > MAX_REGULATOR)) {
- pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
- new_cci_dev->regulator_count, MAX_REGULATOR);
- rc = -EFAULT;
- goto cci_invalid_vreg_data;
- }
rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (rc)
- pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+ CAM_ERR(CAM_CCI, "failed to add child nodes, rc=%d", rc);
+
for (i = 0; i < MASTER_MAX; i++) {
new_cci_dev->write_wq[i] = create_singlethread_workqueue(
"cam_cci_wq");
if (!new_cci_dev->write_wq[i])
- pr_err("Failed to create write wq\n");
+ CAM_ERR(CAM_CCI, "Failed to create write wq");
}
- CDBG("%s line %d\n", __func__, __LINE__);
+ CAM_DBG(CAM_CCI, "Exit");
return 0;
-
-cci_invalid_vreg_data:
- kfree(new_cci_dev->cci_vreg);
- new_cci_dev->cci_vreg = NULL;
-cci_release_mem:
- msm_camera_put_reg_base(pdev, new_cci_dev->base, "cci", true);
-
- return rc;
}
int cam_cci_soc_release(struct cci_device *cci_dev)
{
uint8_t i = 0, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &cci_dev->soc_info;
if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
- pr_err("%s invalid ref count %d / cci state %d\n",
- __func__, cci_dev->ref_count, cci_dev->cci_state);
+ CAM_ERR(CAM_CCI, "invalid ref count %d / cci state %d",
+ cci_dev->ref_count, cci_dev->cci_state);
return -EINVAL;
}
if (--cci_dev->ref_count) {
- CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+ CAM_DBG(CAM_CCI, "ref_count Exit %d", cci_dev->ref_count);
return 0;
}
for (i = 0; i < MASTER_MAX; i++)
if (cci_dev->write_wq[i])
flush_workqueue(cci_dev->write_wq[i]);
- msm_camera_enable_irq(cci_dev->irq, false);
- msm_camera_clk_enable(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_clk_info, cci_dev->cci_clk,
- cci_dev->num_clk, false);
-
- rc = msm_camera_enable_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
- if (rc < 0)
- pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
-
- rc = msm_camera_config_vreg(&cci_dev->v4l2_dev_str.pdev->dev,
- cci_dev->cci_vreg, cci_dev->regulator_count, NULL, 0,
- &cci_dev->cci_reg_ptr[0], 0);
- if (rc < 0)
- pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
-
- if (cci_dev->cci_pinctrl_status) {
- rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
- cci_dev->cci_pinctrl.gpio_state_suspend);
- if (rc)
- pr_err("%s:%d cannot set pin to active state\n",
- __func__, __LINE__);
- }
- cci_dev->cci_pinctrl_status = 0;
- msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
- cci_dev->cci_gpio_tbl_size, 0);
for (i = 0; i < MASTER_MAX; i++)
cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_CCI, "platform resources disable failed, rc=%d",
+ rc);
+ return rc;
+ }
+
cci_dev->cci_state = CCI_STATE_DISABLED;
cci_dev->cycles_per_us = 0;
- cci_dev->cci_clk_src = 0;
+ soc_info->src_clk_idx = 0;
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
index ca4bbe0..331227b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -14,6 +14,7 @@
#define _CAM_CCI_SOC_H_
#include "cam_cci_core.h"
+#include "cam_soc_util.h"
/**
* @sd: V4L2 sub device
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
index 0337b32..8edbea5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -4,5 +4,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 6751fdd..fcf76c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -19,7 +19,9 @@
void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
struct cam_csiphy_query_cap *csiphy_cap)
{
- csiphy_cap->slot_info = csiphy_dev->v4l2_dev_str.pdev->id;
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ csiphy_cap->slot_info = soc_info->index;
csiphy_cap->version = csiphy_dev->hw_version;
csiphy_cap->clk_lane = csiphy_dev->clk_lane;
}
@@ -27,14 +29,18 @@
void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *base = NULL;
uint32_t size =
csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+ struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+ base = soc_info->reg_map[0].mem_base;
for (i = 0; i < size; i++) {
- cam_io_w(
+ cam_io_w_mb(
csiphy_dev->ctrl_reg->
csiphy_reset_reg[i].reg_data,
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->
csiphy_reset_reg[i].reg_addr);
@@ -57,7 +63,7 @@
size_t len;
if (!cfg_dev || !csiphy_dev) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
return -EINVAL;
}
@@ -69,16 +75,16 @@
rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
(uint64_t *)&generic_ptr, &len);
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed to get packet Mem address: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return rc;
}
if (cfg_dev->offset > len) {
- pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
- __func__, __LINE__, cfg_dev->offset, len);
+ CAM_ERR(CAM_CSIPHY,
+ "offset is out of bounds: offset: %lld len: %zu",
+ cfg_dev->offset, len);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return -EINVAL;
@@ -93,8 +99,8 @@
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
(uint64_t *)&generic_ptr, &len);
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed to get cmd buf Mem address : %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to get cmd buf Mem address : %d", rc);
kfree(csiphy_dev->csiphy_info);
csiphy_dev->csiphy_info = NULL;
return rc;
@@ -118,11 +124,13 @@
void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
- cam_io_w(csiphy_dev->ctrl_reg->
+ cam_io_w_mb(csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_data,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_addr);
}
@@ -130,10 +138,12 @@
void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
{
int32_t i;
+ void __iomem *csiphybase =
+ csiphy_dev->soc_info.reg_map[0].mem_base;
for (i = 0; i < csiphy_dev->num_irq_registers; i++)
- cam_io_w(0x0,
- csiphy_dev->base +
+ cam_io_w_mb(0x0,
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_irq_reg[i].reg_addr);
}
@@ -144,34 +154,38 @@
uint8_t i;
struct csiphy_device *csiphy_dev =
(struct csiphy_device *)data;
+ struct cam_hw_soc_info *soc_info = NULL;
+ void __iomem *base = NULL;
if (!csiphy_dev) {
- pr_err("%s:%d Invalid Args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid Args");
return -EINVAL;
}
+ soc_info = &csiphy_dev->soc_info;
+ base = csiphy_dev->soc_info.reg_map[0].mem_base;
+
for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
irq = cam_io_r(
- csiphy_dev->base +
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_status0_addr + 0x4*i);
- cam_io_w(irq,
- csiphy_dev->base +
+ cam_io_w_mb(irq,
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
- pr_err_ratelimited(
- "%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
- __func__, csiphy_dev->v4l2_dev_str.pdev->id, i, irq);
- cam_io_w(0x0,
- csiphy_dev->base +
+ CAM_ERR_RATE_LIMIT(CAM_CSIPHY,
+ "CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x",
+ soc_info->index, i, irq);
+ cam_io_w_mb(0x0,
+ base +
csiphy_dev->ctrl_reg->csiphy_reg.
mipi_csiphy_interrupt_clear0_addr + 0x4*i);
}
- cam_io_w(0x1, csiphy_dev->base +
+ cam_io_w_mb(0x1, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
- cam_io_w(0x0, csiphy_dev->base +
+ cam_io_w_mb(0x0, base +
csiphy_dev->ctrl_reg->
csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
@@ -183,23 +197,23 @@
int32_t rc = 0;
uint32_t lane_enable = 0, mask = 1, size = 0;
uint16_t lane_mask = 0, i = 0, cfg_size = 0;
- uint8_t settle_cnt, lane_cnt, lane_pos = 0;
+ uint8_t lane_cnt, lane_pos = 0;
+ uint16_t settle_cnt = 0;
void __iomem *csiphybase;
struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
if (csiphy_dev->csiphy_info == NULL) {
- pr_err("%s:%d csiphy_info is NULL, No/Fail CONFIG_DEV ?\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "csiphy_info is NULL, No/Fail CONFIG_DEV?");
return -EINVAL;
}
lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
- csiphybase = csiphy_dev->base;
+ csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
if (!csiphybase) {
- pr_err("%s: csiphybase NULL\n", __func__);
+ CAM_ERR(CAM_CSIPHY, "csiphybase NULL");
return -EINVAL;
}
@@ -242,15 +256,15 @@
switch (csiphy_dev->ctrl_reg->
csiphy_common_reg[i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
- cam_io_w(lane_enable,
- csiphy_dev->base +
+ cam_io_w_mb(lane_enable,
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
- cam_io_w(csiphy_dev->ctrl_reg->
+ cam_io_w_mb(csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_data,
- csiphy_dev->base +
+ csiphybase +
csiphy_dev->ctrl_reg->
csiphy_common_reg[i].reg_addr);
break;
@@ -269,27 +283,27 @@
for (i = 0; i < cfg_size; i++) {
switch (reg_array[lane_pos][i].csiphy_param_type) {
case CSIPHY_LANE_ENABLE:
- cam_io_w(lane_enable,
- csiphy_dev->base +
+ cam_io_w_mb(lane_enable,
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_DEFAULT_PARAMS:
- cam_io_w(reg_array[lane_pos][i].reg_data,
- csiphy_dev->base +
+ cam_io_w_mb(reg_array[lane_pos][i].reg_data,
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
- cam_io_w(settle_cnt & 0xFF,
- csiphy_dev->base +
+ cam_io_w_mb(settle_cnt & 0xFF,
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
- cam_io_w((settle_cnt >> 8) & 0xFF,
- csiphy_dev->base +
+ cam_io_w_mb((settle_cnt >> 8) & 0xFF,
+ csiphybase +
reg_array[lane_pos][i].reg_addr);
break;
default:
- CDBG("%s: %d Do Nothing\n", __func__, __LINE__);
+ CAM_DBG(CAM_CSIPHY, "Do Nothing");
break;
}
usleep_range(reg_array[lane_pos][i].delay*1000,
@@ -313,13 +327,11 @@
int32_t rc = 0;
if (!csiphy_dev || !cmd) {
- pr_err("%s:%d Invalid input args\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Invalid input args");
return -EINVAL;
}
- pr_debug("%s:%d Opcode received: %d\n", __func__, __LINE__,
- cmd->op_code);
+ CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code);
mutex_lock(&csiphy_dev->mutex);
switch (cmd->op_code) {
case CAM_ACQUIRE_DEV: {
@@ -332,16 +344,15 @@
(void __user *)cmd->handle,
sizeof(csiphy_acq_dev));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
goto release_mutex;
}
csiphy_acq_params.combo_mode = 0;
if (csiphy_dev->acquire_count == 2) {
- pr_err("%s:%d CSIPHY device do not allow more than 2 acquires\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY,
+ "CSIPHY device do not allow more than 2 acquires");
rc = -EINVAL;
goto release_mutex;
}
@@ -364,8 +375,7 @@
if (copy_to_user((void __user *)cmd->handle,
&csiphy_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
rc = -EINVAL;
goto release_mutex;
}
@@ -375,30 +385,27 @@
}
break;
case CAM_QUERY_CAP: {
- struct cam_csiphy_query_cap csiphy_cap;
+ struct cam_csiphy_query_cap csiphy_cap = {0};
cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
if (copy_to_user((void __user *)cmd->handle,
&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
- pr_err("%s:%d :ERROR: Failed copying from User\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
rc = -EINVAL;
goto release_mutex;
}
}
break;
case CAM_STOP_DEV: {
- rc = cam_csiphy_soc_release(csiphy_dev);
+ rc = cam_csiphy_disable_hw(csiphy_dev);
if (rc < 0) {
- pr_err("%s:%d Failed in csiphy release\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
rc = cam_cpas_stop(csiphy_dev->cpas_handle);
if (rc < 0) {
- pr_err("%s:%d :Error: de-voting CPAS: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
goto release_mutex;
}
}
@@ -407,8 +414,7 @@
struct cam_release_dev_cmd release;
if (!csiphy_dev->acquire_count) {
- pr_err("%s:%d No valid devices to release\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "No valid devices to release");
rc = -EINVAL;
goto release_mutex;
}
@@ -421,8 +427,7 @@
rc = cam_destroy_device_hdl(release.dev_handle);
if (rc < 0)
- pr_err("%s:%d :ERROR: destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "destroying the device hdl");
if (release.dev_handle ==
csiphy_dev->bridge_intf.device_hdl[0]) {
csiphy_dev->bridge_intf.device_hdl[0] = -1;
@@ -446,8 +451,7 @@
} else {
rc = cam_cmd_buf_parser(csiphy_dev, &config);
if (rc < 0) {
- pr_err("%s:%d Fail in cmd buf parser\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser");
goto release_mutex;
}
}
@@ -465,22 +469,19 @@
rc = cam_cpas_start(csiphy_dev->cpas_handle,
&ahb_vote, &axi_vote);
if (rc < 0) {
- pr_err("%s:%d :Error: voting CPAS: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc);
goto release_mutex;
}
rc = cam_csiphy_enable_hw(csiphy_dev);
if (rc != 0) {
- pr_err("%s: %d cam_csiphy_enable_hw failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
rc = cam_csiphy_config_dev(csiphy_dev);
if (rc < 0) {
- pr_err("%s: %d cam_csiphy_config_dev failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
@@ -489,8 +490,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d :Error: Invalid Opcode: %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
rc = -EINVAL;
goto release_mutex;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index f2ece9d..1c93a1a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -16,13 +16,6 @@
#include "cam_csiphy_core.h"
#include <media/cam_sensor.h>
-#undef CDBG
-#ifdef CAM_CSIPHY_DEV_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
@@ -33,13 +26,12 @@
case VIDIOC_CAM_CONTROL:
rc = cam_csiphy_core_cfg(csiphy_dev, arg);
if (rc != 0) {
- pr_err("%s: %d :ERROR: in configuring the device\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "in configuring the device");
return rc;
}
break;
default:
- pr_err("%s:%d :ERROR: Wrong ioctl\n", __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd);
break;
}
@@ -55,7 +47,7 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -68,15 +60,15 @@
rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
break;
default:
- pr_err("%s:%d Invalid compat ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_CSIPHY,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -120,12 +112,13 @@
mutex_init(&new_csiphy_dev->mutex);
new_csiphy_dev->v4l2_dev_str.pdev = pdev;
+ new_csiphy_dev->soc_info.pdev = pdev;
+
new_csiphy_dev->ref_count = 0;
rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
if (rc < 0) {
- pr_err("%s:%d :ERROR: dt paring failed: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc);
goto csiphy_no_resource;
}
@@ -146,8 +139,7 @@
rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: In cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc);
goto csiphy_no_resource;
}
@@ -167,18 +159,17 @@
new_csiphy_dev->is_acquired_dev_combo_mode = 0;
cpas_parms.cam_cpas_client_cb = NULL;
- cpas_parms.cell_index = pdev->id;
+ cpas_parms.cell_index = new_csiphy_dev->soc_info.index;
cpas_parms.dev = &pdev->dev;
cpas_parms.userdata = new_csiphy_dev;
strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
rc = cam_cpas_register_client(&cpas_parms);
if (rc) {
- pr_err("%s:%d CPAS registration failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc);
goto csiphy_no_resource;
}
- CDBG("CPAS registration successful handle=%d\n",
+ CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d",
cpas_parms.client_handle);
new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 9049e4e..8ed5ba4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -31,9 +31,10 @@
#include <cam_sensor_cmn_header.h>
#include <cam_req_mgr_interface.h>
#include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
#include <cam_io_util.h>
#include <cam_cpas_api.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define MAX_CSIPHY 3
#define MAX_DPHY_DATA_LN 4
@@ -175,19 +176,11 @@
* device is for combo mode
*/
struct csiphy_device {
- struct resource *irq;
- void __iomem *base;
struct mutex mutex;
uint32_t hw_version;
uint32_t csiphy_state;
struct csiphy_ctrl_t *ctrl_reg;
- size_t num_clk;
uint32_t csiphy_max_clk;
- int32_t num_vreg;
- struct clk **csiphy_clk;
- struct msm_cam_clk_info *csiphy_clk_info;
- struct camera_vreg_t *csiphy_vreg;
- struct regulator *csiphy_reg_ptr[MAX_REGULATOR];
struct msm_cam_clk_info csiphy_3p_clk_info[2];
struct clk *csiphy_3p_clk[2];
uint32_t csiphy_clk_index;
@@ -203,6 +196,7 @@
uint32_t acquire_count;
char device_name[20];
uint32_t is_acquired_dev_combo_mode;
+ struct cam_hw_soc_info soc_info;
uint32_t cpas_handle;
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 540ec76..a79e0d4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -13,105 +13,93 @@
#include "cam_csiphy_soc.h"
#include "cam_csiphy_core.h"
#include "include/cam_csiphy_1_0_hwreg.h"
-#include "cam_sensor_util.h"
+
+#ifdef CAM_CSIPHY_MEM_DMP
+int32_t cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0;
+ resource_size_t size = 0;
+ void __iomem *addr = NULL;
+
+ if (!soc_info) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_CSIPHY, "invalid input %d", rc);
+ return rc;
+ }
+ addr = soc_info->reg_map[0].mem_base;
+ size = resource_size(soc_info->mem_block[0]);
+ rc = cam_io_dump(addr, 0, (size >> 2));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "generating dump failed %d", rc);
+ return rc;
+ }
+ return rc;
+}
+#endif
int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
{
int32_t rc = 0;
- long clk_rate = 0;
+ struct cam_hw_soc_info *soc_info;
+
+ soc_info = &csiphy_dev->soc_info;
if (csiphy_dev->ref_count++) {
- pr_err("%s:%d csiphy refcount = %d\n", __func__,
- __LINE__, csiphy_dev->ref_count);
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
return rc;
}
- rc = msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 1);
+ rc = cam_soc_util_enable_platform_resource(soc_info, true,
+ CAM_TURBO_VOTE, ENABLE_IRQ);
if (rc < 0) {
- pr_err("%s:%d failed regulator get\n", __func__, __LINE__);
- goto csiphy_config_regulator_fail;
+ CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
+ rc);
+ return rc;
}
- rc = msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 1);
- if (rc < 0) {
- pr_err("%s:%d failed to enable regulators\n", __func__, rc);
- goto csiphy_regulator_fail;
- }
+ rc = cam_soc_util_set_clk_rate(
+ soc_info->clk[csiphy_dev->csiphy_clk_index],
+ soc_info->clk_name[csiphy_dev->csiphy_clk_index],
+ soc_info->clk_rate[0][csiphy_dev->csiphy_clk_index]);
- /*Enable clocks*/
- rc = msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, true);
if (rc < 0) {
- pr_err("%s: csiphy clk enable failed\n", __func__);
- csiphy_dev->ref_count--;
- goto csiphy_regulator_fail;
- }
-
- clk_rate = msm_camera_clk_set_rate(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk[csiphy_dev->csiphy_clk_index],
- clk_rate);
- if (clk_rate < 0) {
- pr_err("csiphy_clk_set_rate failed\n");
- goto csiphy_clk_enable_fail;
- }
-
- rc = msm_camera_enable_irq(csiphy_dev->irq, ENABLE_IRQ);
- if (rc < 0) {
- pr_err("%s:%d :ERROR: irq enable failed\n",
- __func__, __LINE__);
- goto csiphy_clk_enable_fail;
- return -EINVAL;
+ CAM_ERR(CAM_CSIPHY, "csiphy_clk_set_rate failed rc: %d", rc);
+ goto csiphy_disable_platform_resource;
}
cam_csiphy_reset(csiphy_dev);
return rc;
-csiphy_clk_enable_fail:
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
-csiphy_regulator_fail:
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
-csiphy_config_regulator_fail:
- msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
+
+
+csiphy_disable_platform_resource:
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
return rc;
}
-int32_t cam_csiphy_disable_hw(struct platform_device *pdev)
+int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev)
{
- struct csiphy_device *csiphy_dev =
- platform_get_drvdata(pdev);
+ struct cam_hw_soc_info *soc_info;
- /*Disable regulators*/
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg,
- csiphy_dev->num_vreg, NULL, 0,
- &csiphy_dev->csiphy_reg_ptr[0], 0);
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO");
+ return 0;
+ }
+ soc_info = &csiphy_dev->soc_info;
- /*Disable clocks*/
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
+ if (--csiphy_dev->ref_count) {
+ CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+ csiphy_dev->ref_count);
+ return 0;
+ }
- /*Disable IRQ*/
- msm_camera_enable_irq(csiphy_dev->irq, false);
+ cam_csiphy_reset(csiphy_dev);
+
+ cam_soc_util_disable_platform_resource(soc_info, true, true);
return 0;
-
}
int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
@@ -121,11 +109,15 @@
uint32_t clk_cnt = 0;
char *csi_3p_clk_name = "csi_phy_3p_clk";
char *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+ struct cam_hw_soc_info *soc_info;
- if (pdev->dev.of_node) {
- of_property_read_u32((&pdev->dev)->of_node,
- "cell-index", &pdev->id);
- CDBG("%s: device id = %d\n", __func__, pdev->id);
+ csiphy_dev->is_csiphy_3phase_hw = 0;
+ soc_info = &csiphy_dev->soc_info;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc);
+ return rc;
}
csiphy_dev->is_csiphy_3phase_hw = 0;
@@ -145,130 +137,62 @@
csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
csiphy_dev->clk_lane = 0;
} else {
- pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
- csiphy_dev->hw_version);
+ CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
+ csiphy_dev->hw_version);
rc = -EINVAL;
return rc;
}
- rc = msm_camera_get_clk_info(csiphy_dev->v4l2_dev_str.pdev,
- &csiphy_dev->csiphy_clk_info,
- &csiphy_dev->csiphy_clk,
- &csiphy_dev->num_clk);
- if (rc < 0) {
- pr_err("%s:%d failed clock get\n", __func__, __LINE__);
- return rc;
+ if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
+ CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d",
+ soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+ return -EINVAL;
}
-
- if (csiphy_dev->num_clk > CSIPHY_NUM_CLK_MAX) {
- pr_err("%s: invalid clk count=%zu, max is %d\n", __func__,
- csiphy_dev->num_clk, CSIPHY_NUM_CLK_MAX);
- goto clk_mem_ovf_err;
- }
-
- for (i = 0; i < csiphy_dev->num_clk; i++) {
- if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+ for (i = 0; i < soc_info->num_clk; i++) {
+ if (!strcmp(soc_info->clk_name[i],
csi_3p_clk_src_name)) {
csiphy_dev->csiphy_3p_clk_info[0].clk_name =
- csiphy_dev->csiphy_clk_info[i].clk_name;
+ soc_info->clk_name[i];
csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
- csiphy_dev->csiphy_clk_info[i].clk_rate;
+ soc_info->clk_rate[0][i];
csiphy_dev->csiphy_3p_clk[0] =
- csiphy_dev->csiphy_clk[i];
+ soc_info->clk[i];
continue;
- } else if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
- csi_3p_clk_name)) {
+ } else if (!strcmp(soc_info->clk_name[i],
+ csi_3p_clk_name)) {
csiphy_dev->csiphy_3p_clk_info[1].clk_name =
- csiphy_dev->csiphy_clk_info[i].clk_name;
+ soc_info->clk_name[i];
csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
- csiphy_dev->csiphy_clk_info[i].clk_rate;
+ soc_info->clk_rate[0][i];
csiphy_dev->csiphy_3p_clk[1] =
- csiphy_dev->csiphy_clk[i];
+ soc_info->clk[i];
continue;
}
- if (!strcmp(csiphy_dev->csiphy_clk_info[clk_cnt].clk_name,
+ if (!strcmp(soc_info->clk_name[i],
"csiphy_timer_src_clk")) {
csiphy_dev->csiphy_max_clk =
- csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate;
+ soc_info->clk_rate[0][clk_cnt];
csiphy_dev->csiphy_clk_index = clk_cnt;
}
- CDBG("%s: clk_rate[%d] = %ld\n", __func__, clk_cnt,
- csiphy_dev->csiphy_clk_info[clk_cnt].clk_rate);
+ CAM_DBG(CAM_CSIPHY, "clk_rate[%d] = %d", clk_cnt,
+ soc_info->clk_rate[0][clk_cnt]);
clk_cnt++;
}
+ rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info,
+ cam_csiphy_irq, csiphy_dev);
- rc = cam_sensor_get_dt_vreg_data(pdev->dev.of_node,
- &(csiphy_dev->csiphy_vreg), &(csiphy_dev->num_vreg));
- if (rc < 0) {
- pr_err("%s:%d Reg get failed\n", __func__, __LINE__);
- csiphy_dev->num_vreg = 0;
- }
-
- csiphy_dev->base = msm_camera_get_reg_base(pdev, "csiphy", true);
- if (!csiphy_dev->base) {
- pr_err("%s: no mem resource?\n", __func__);
- rc = -ENODEV;
- goto csiphy_no_resource;
- }
-
- csiphy_dev->irq = msm_camera_get_irq(pdev, "csiphy");
- if (!csiphy_dev->irq) {
- pr_err("%s: no irq resource?\n", __func__);
- rc = -ENODEV;
- goto csiphy_no_resource;
- }
-
- rc = msm_camera_register_irq(pdev, csiphy_dev->irq,
- cam_csiphy_irq, IRQF_TRIGGER_RISING, "csiphy", csiphy_dev);
- if (rc < 0) {
- pr_err("%s: irq request fail\n", __func__);
- rc = -EBUSY;
- goto csiphy_no_resource;
- }
- msm_camera_enable_irq(csiphy_dev->irq, false);
- return rc;
-
-csiphy_no_resource:
- msm_camera_put_reg_base(pdev, csiphy_dev->base, "csiphy", true);
-clk_mem_ovf_err:
- msm_camera_put_clk_info(csiphy_dev->v4l2_dev_str.pdev,
- &csiphy_dev->csiphy_clk_info,
- &csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk);
return rc;
}
int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
{
-
- if (!csiphy_dev || !csiphy_dev->ref_count) {
- pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ if (!csiphy_dev) {
+ CAM_ERR(CAM_CSIPHY, "csiphy dev NULL");
return 0;
}
- if (--csiphy_dev->ref_count) {
- pr_err("%s:%d csiphy refcount = %d\n", __func__,
- __LINE__, csiphy_dev->ref_count);
- return 0;
- }
-
- cam_csiphy_reset(csiphy_dev);
-
- msm_camera_enable_irq(csiphy_dev->irq, false);
-
- msm_camera_clk_enable(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
- csiphy_dev->num_clk, false);
-
- msm_camera_enable_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
- NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
- msm_camera_config_vreg(&csiphy_dev->v4l2_dev_str.pdev->dev,
- csiphy_dev->csiphy_vreg, csiphy_dev->num_vreg,
- NULL, 0, &csiphy_dev->csiphy_reg_ptr[0], 0);
-
+ cam_soc_util_release_platform_resource(&csiphy_dev->soc_info);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
index 27de3fc..4430489 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -60,4 +60,19 @@
*/
int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API disables SOC related parameters
+ */
+int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev);
+
+/**
+ * @soc_info: Soc info of cam hw driver module
+ *
+ * This API dumps memory for the entire mapped region
+ * (needs to be macro enabled before use)
+ */
+int cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info);
+
#endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
new file mode 100644
index 0000000..5490992
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
new file mode 100644
index 0000000..96697f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <media/cam_sensor.h>
+
+#include "cam_eeprom_core.h"
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_eeprom_read_memory() - read map data into buffer
+ * @e_ctrl: eeprom control struct
+ * @block: block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_memory_block_t *block)
+{
+ int rc = 0;
+ int j;
+ struct cam_sensor_i2c_reg_setting i2c_reg_settings;
+ struct cam_sensor_i2c_reg_array i2c_reg_array;
+ struct cam_eeprom_memory_map_t *emap = block->map;
+ struct cam_eeprom_soc_private *eb_info;
+ uint8_t *memptr = block->mapdata;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ for (j = 0; j < block->num_map; j++) {
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr);
+ if (emap[j].saddr) {
+ eb_info->i2c_info.slave_addr = emap[j].saddr;
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &eb_info->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed: to update i2c info rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].page.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].page.addr_type;
+ i2c_reg_settings.data_type = emap[j].page.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].page.addr;
+ i2c_reg_array.reg_data = emap[j].page.data;
+ i2c_reg_array.delay = emap[j].page.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page write failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = emap[j].pageen.data;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "page enable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].poll.valid_size) {
+ rc = camera_io_dev_poll(&e_ctrl->io_master_info,
+ emap[j].poll.addr, emap[j].poll.data,
+ 0, emap[j].poll.addr_type,
+ emap[j].poll.data_type,
+ emap[j].poll.delay);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "poll failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+
+ if (emap[j].mem.valid_size) {
+ rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
+ emap[j].mem.addr, memptr,
+ emap[j].mem.addr_type,
+ emap[j].mem.valid_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read failed rc %d",
+ rc);
+ return rc;
+ }
+ memptr += emap[j].mem.valid_size;
+ }
+
+ if (emap[j].pageen.valid_size) {
+ i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+ i2c_reg_settings.data_type = emap[j].pageen.data_type;
+ i2c_reg_settings.size = 1;
+ i2c_reg_array.reg_addr = emap[j].pageen.addr;
+ i2c_reg_array.reg_data = 0;
+ i2c_reg_array.delay = emap[j].pageen.delay;
+ i2c_reg_settings.reg_setting = &i2c_reg_array;
+ rc = camera_io_dev_write(&e_ctrl->io_master_info,
+ &i2c_reg_settings);
+
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "page disable failed rc %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_up - Power up eeprom hardware
+ * @e_ctrl: ctrl structure
+ * @power_info: power up/down info for eeprom
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &e_ctrl->soc_info;
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill vreg params for power up rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &e_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to fill vreg params power down rc:%d", rc);
+ return rc;
+ }
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ rc = camera_io_init(&(e_ctrl->io_master_info));
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "cci_init failed");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_power_down - Power down eeprom hardware
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_eeprom_soc_private *soc_private;
+ int rc = 0;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &e_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = msm_camera_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER)
+ camera_io_release(&(e_ctrl->io_master_info));
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_match_id - match eeprom id
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct camera_io_master *client = &e_ctrl->io_master_info;
+ uint8_t id[2];
+
+ rc = cam_spi_query_id(client, 0, &id[0], 2);
+ if (rc)
+ return rc;
+ CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
+ id[0], id[1], client->spi_client->mfr_id0,
+ client->spi_client->device_id0);
+ if (id[0] != client->spi_client->mfr_id0
+ || id[1] != client->spi_client->device_id0)
+ return -ENODEV;
+ return 0;
+}
+
+/**
+ * cam_eeprom_parse_read_memory_map - Parse memory map
+ * @of_node: device node
+ * @e_ctrl: ctrl structure
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc);
+ return rc;
+ }
+ rc = cam_eeprom_power_up(e_ctrl, power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
+ goto data_mem_free;
+ }
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
+ rc = cam_eeprom_match_id(e_ctrl);
+ if (rc) {
+ CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc);
+ goto power_down;
+ }
+ }
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_power_down(e_ctrl);
+ if (rc)
+ CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+ return rc;
+power_down:
+ rc = cam_eeprom_power_down(e_ctrl);
+data_mem_free:
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_dev_handle - get device handle
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *arg)
+{
+ struct cam_sensor_acquire_dev eeprom_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (e_ctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_EEPROM, "Device is already acquired");
+ return -EFAULT;
+ }
+ if (copy_from_user(&eeprom_acq_dev, (void __user *) cmd->handle,
+ sizeof(eeprom_acq_dev))) {
+ CAM_ERR(CAM_EEPROM,
+ "EEPROM:ACQUIRE_DEV: copy from user failed");
+ return -EFAULT;
+ }
+
+ bridge_params.session_hdl = eeprom_acq_dev.session_handle;
+ bridge_params.ops = &e_ctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = e_ctrl;
+
+ eeprom_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle;
+ e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
+
+ CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
+ if (copy_to_user((void __user *) cmd->handle, &eeprom_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev))) {
+ CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * cam_eeprom_update_slaveInfo - Update slave info
+ * @e_ctrl: ctrl structure
+ * @cmd_buf: command buffer
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl,
+ void *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_soc_private *soc_private;
+ struct cam_cmd_i2c_info *cmd_i2c_info = NULL;
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr;
+ soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode;
+
+ rc = cam_eeprom_update_i2c_info(e_ctrl,
+ &soc_private->i2c_info);
+ CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d",
+ soc_private->i2c_info.slave_addr,
+ soc_private->i2c_info.i2c_freq_mode);
+
+ return rc;
+}
+
+/**
+ * cam_eeprom_parse_memory_map - Parse memory map info
+ * @data: memory block data
+ * @cmd_buf: command buffer
+ * @cmd_length: command buffer length
+ * @num_map: memory map size
+ * @cmd_length_bytes: command length processed in this function
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_parse_memory_map(
+ struct cam_eeprom_memory_block_t *data,
+ void *cmd_buf, int cmd_length, uint16_t *cmd_length_bytes,
+ int16_t num_map)
+{
+ int32_t rc = 0;
+ int32_t processed_size = 0;
+ struct cam_eeprom_memory_map_t *map = data->map;
+ struct common_header *cmm_hdr =
+ (struct common_header *)cmd_buf;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_random_wr *i2c_random_wr = NULL;
+ struct cam_cmd_i2c_continuous_rd *i2c_cont_rd = NULL;
+ struct cam_cmd_conditional_wait *i2c_poll = NULL;
+
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_random_wr);
+
+ map[num_map].page.addr =
+ i2c_random_wr->random_wr_payload[0].reg_addr;
+ map[num_map].page.addr_type = i2c_random_wr->header.addr_type;
+ map[num_map].page.data =
+ i2c_random_wr->random_wr_payload[0].reg_data;
+ map[num_map].page.data_type = i2c_random_wr->header.data_type;
+ map[num_map].page.valid_size = 1;
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+
+ map[num_map].mem.addr = i2c_cont_rd->reg_addr;
+ map[num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
+ map[num_map].mem.data_type = i2c_cont_rd->header.data_type;
+ map[num_map].mem.valid_size =
+ i2c_cont_rd->header.count;
+ cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+ processed_size +=
+ cmd_length_in_bytes;
+ data->num_data += map[num_map].mem.valid_size;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf;
+ cmd_length_in_bytes = sizeof(struct cam_cmd_conditional_wait);
+
+ map[num_map].poll.addr = i2c_poll->reg_addr;
+ map[num_map].poll.addr_type = i2c_poll->addr_type;
+ map[num_map].poll.data = i2c_poll->reg_data;
+ map[num_map].poll.data_type = i2c_poll->data_type;
+ map[num_map].poll.delay = i2c_poll->timeout;
+ map[num_map].poll.valid_size = 1;
+ break;
+ default:
+ break;
+ }
+ *cmd_length_bytes = processed_size;
+ return rc;
+}
+
+/**
+ * cam_eeprom_init_pkt_parser - Parse eeprom packet
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ int32_t rc = 0;
+ int i = 0;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ uint32_t *offset = NULL;
+ uint32_t *cmd_buf = NULL;
+ uint64_t generic_pkt_addr;
+ size_t pkt_len = 0;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ uint32_t processed_cmd_buf_in_bytes = 0;
+ struct common_header *cmm_hdr = NULL;
+ uint16_t cmd_length_in_bytes = 0;
+ struct cam_cmd_i2c_info *i2c_info = NULL;
+ int num_map = -1;
+ struct cam_eeprom_memory_map_t *map;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+ e_ctrl->cal_data.map = kcalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+ MSM_EEPROM_MAX_MEM_MAP_CNT),
+ (sizeof(struct cam_eeprom_memory_map_t)), GFP_KERNEL);
+ if (!e_ctrl->cal_data.map) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ return rc;
+ }
+ map = e_ctrl->cal_data.map;
+
+ offset = (uint32_t *)&csl_packet->payload;
+ offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ processed_cmd_buf_in_bytes = 0;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ (uint64_t *)&generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_pkt_addr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_EEPROM, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ /* Loop through multiple cmd formats in one cmd buffer */
+ while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) {
+ cmm_hdr = (struct common_header *)cmd_buf;
+ switch (cmm_hdr->cmd_type) {
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+ num_map++;
+ map[num_map].saddr = i2c_info->slave_addr;
+ rc = cam_eeprom_update_slaveInfo(e_ctrl,
+ cmd_buf);
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/4;
+ e_ctrl->cal_data.num_map = num_map + 1;
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_power);
+ rc = cam_sensor_update_power_settings(cmd_buf,
+ cmd_length_in_bytes, power_info);
+ processed_cmd_buf_in_bytes +=
+ total_cmd_buf_in_bytes;
+ cmd_buf += total_cmd_buf_in_bytes/4;
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed");
+ return rc;
+ }
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+ case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+ case CAMERA_SENSOR_CMD_TYPE_WAIT:
+ rc = cam_eeprom_parse_memory_map(
+ &e_ctrl->cal_data, cmd_buf,
+ total_cmd_buf_in_bytes,
+ &cmd_length_in_bytes, num_map);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/4;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_get_cal_data - parse the userspace IO config and
+ * copy read data to share with userspace
+ * @e_ctrl: ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_packet *csl_packet)
+{
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t i = 0;
+ int rc = 0;
+ uint64_t buf_addr;
+ size_t buf_size;
+ uint8_t *read_buffer;
+
+ io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+ &csl_packet->payload +
+ csl_packet->io_configs_offset);
+
+ CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
+ csl_packet->num_io_configs);
+
+ for (i = 0; i < csl_packet->num_io_configs; i++) {
+ CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
+ if (io_cfg->direction == CAM_BUF_OUTPUT) {
+ rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+ (uint64_t *)&buf_addr, &buf_size);
+ CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
+ (void *)buf_addr, buf_size);
+
+ read_buffer = (uint8_t *)buf_addr;
+ if (!read_buffer) {
+ CAM_ERR(CAM_EEPROM,
+ "invalid buffer to copy data");
+ return -EINVAL;
+ }
+ read_buffer += io_cfg->offsets[0];
+
+ if (buf_size < e_ctrl->cal_data.num_data) {
+ CAM_ERR(CAM_EEPROM,
+ "failed to copy, Invalid size");
+ return -EINVAL;
+ }
+
+ CAM_ERR(CAM_EEPROM, "copy the data, len:%d",
+ e_ctrl->cal_data.num_data);
+ memcpy(read_buffer, e_ctrl->cal_data.mapdata,
+ e_ctrl->cal_data.num_data);
+
+ } else {
+ CAM_ERR(CAM_EEPROM, "Invalid direction");
+ rc = -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/**
+ * cam_eeprom_pkt_parse - Parse csl packet
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int32_t rc = 0;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_config_dev_cmd dev_config;
+ uint64_t generic_pkt_addr;
+ size_t pkt_len;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ ioctl_ctrl = (struct cam_control *)arg;
+ if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+ sizeof(dev_config)))
+ return -EFAULT;
+ rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+ (uint64_t *)&generic_pkt_addr, &pkt_len);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "error in converting command Handle Error: %d", rc);
+ return rc;
+ }
+ csl_packet = (struct cam_packet *)
+ (generic_pkt_addr + dev_config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_EEPROM_PACKET_OPCODE_INIT:
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ CAM_ERR(CAM_EEPROM,
+ "Eeprom already probed at kernel boot");
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in parsing the pkt");
+ return rc;
+ }
+
+ e_ctrl->cal_data.mapdata =
+ kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+ if (!e_ctrl->cal_data.mapdata) {
+ rc = -ENOMEM;
+ CAM_ERR(CAM_EEPROM, "failed");
+ goto error;
+ }
+
+ rc = cam_eeprom_power_up(e_ctrl,
+ &soc_private->power_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ goto memdata_free;
+ }
+
+ rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM,
+ "read_eeprom_memory failed");
+ goto power_down;
+ }
+
+ rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+ rc = cam_eeprom_power_down(e_ctrl);
+ } else {
+ CAM_DBG(CAM_EEPROM, "Already read eeprom");
+ }
+ break;
+ default:
+ break;
+ }
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+power_down:
+ rc = cam_eeprom_power_down(e_ctrl);
+memdata_free:
+ kfree(e_ctrl->cal_data.mapdata);
+error:
+ kfree(e_ctrl->cal_data.map);
+ return rc;
+}
+
+/**
+ * cam_eeprom_driver_cmd - Handle eeprom cmds
+ * @e_ctrl: ctrl structure
+ * @arg: Camera control command argument
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_query_cap_t eeprom_cap;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(e_ctrl->eeprom_mutex));
+ switch (cmd->op_code) {
+ case CAM_QUERY_CAP:
+ eeprom_cap.slot_info = e_ctrl->subdev_id;
+ if (e_ctrl->userspace_probe == false)
+ eeprom_cap.eeprom_kernel_probe = true;
+ else
+ eeprom_cap.eeprom_kernel_probe = false;
+
+ if (copy_to_user((void __user *) cmd->handle,
+ &eeprom_cap,
+ sizeof(struct cam_eeprom_query_cap_t))) {
+ CAM_ERR(CAM_EEPROM, "Failed Copy to User");
+ return -EFAULT;
+ goto release_mutex;
+ }
+ CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info);
+ break;
+ case CAM_ACQUIRE_DEV:
+ rc = cam_eeprom_get_dev_handle(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed to acquire dev");
+ goto release_mutex;
+ }
+ break;
+ case CAM_CONFIG_DEV:
+ rc = cam_eeprom_pkt_parse(e_ctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing");
+ goto release_mutex;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_EEPROM, "invalid opcode");
+ break;
+ }
+
+release_mutex:
+ mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
new file mode 100644
index 0000000..84736df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_CORE_H_
+#define _CAM_EEPROM_CORE_H_
+
+#include "cam_eeprom_dev.h"
+
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg);
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+ struct cam_eeprom_ctrl_t *e_ctrl);
+#endif
+/* _CAM_EEPROM_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
new file mode 100644
index 0000000..82dcc9c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -0,0 +1,487 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_eeprom_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_eeprom_soc.h"
+#include "cam_eeprom_core.h"
+#include "cam_debug_util.h"
+
+static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_driver_cmd(e_ctrl, arg);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+
+ return rc;
+}
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info)
+{
+ struct cam_sensor_cci_client *cci_client = NULL;
+
+ if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+ cci_client = e_ctrl->io_master_info.cci_client;
+ if (!cci_client) {
+ CAM_ERR(CAM_EEPROM, "failed: cci_client %pK",
+ cci_client);
+ return -EINVAL;
+ }
+ cci_client->cci_i2c_master = e_ctrl->cci_i2c_master;
+ cci_client->sid = (i2c_info->slave_addr) >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL:
+ rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed in eeprom suddev handling rc %d",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_EEPROM,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops;
+
+static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = {
+ .ioctl = cam_eeprom_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_eeprom_subdev_ops = {
+ .core = &cam_eeprom_subdev_core_ops,
+};
+
+static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_EEPROM, "i2c_check_functionality failed");
+ goto probe_failure;
+ }
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "kzalloc failed");
+ rc = -ENOMEM;
+ goto probe_failure;
+ }
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
+ if (!soc_private) {
+ CAM_ERR(CAM_EEPROM, "board info NULL");
+ rc = -EINVAL;
+ goto ectrl_free;
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+ e_ctrl->io_master_info.master_type = I2C_MASTER;
+ e_ctrl->io_master_info.client = client;
+
+ if (soc_private->i2c_info.slave_addr != 0)
+ e_ctrl->io_master_info.client->addr =
+ soc_private->i2c_info.slave_addr;
+
+ return rc;
+
+ectrl_free:
+ kfree(e_ctrl);
+probe_failure:
+ return rc;
+}
+
+static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (!soc_private) {
+ CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (soc_private) {
+ kfree(soc_private->power_info.gpio_num_info);
+ kfree(soc_private);
+ }
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int cam_eeprom_spi_setup(struct spi_device *spi)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_sensor_spi_client *spi_client;
+ struct cam_eeprom_soc_private *eb_info;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+ int rc = 0;
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ e_ctrl->userspace_probe = false;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+
+ spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+ if (!spi_client) {
+ kfree(e_ctrl);
+ return -ENOMEM;
+ }
+
+ eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+ if (!eb_info)
+ goto spi_free;
+ e_ctrl->soc_info.soc_private = eb_info;
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+ e_ctrl->io_master_info.spi_client = spi_client;
+ e_ctrl->io_master_info.master_type = SPI_MASTER;
+ spi_client->spi_master = spi;
+
+ power_info = &eb_info->power_info;
+ power_info->dev = &spi->dev;
+
+ /* set spi instruction info */
+ spi_client->retry_delay = 1;
+ spi_client->retries = 0;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+
+ rc = cam_eeprom_spi_driver_soc_init(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc);
+ goto board_free;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_parse_read_memory_map(spi->dev.of_node,
+ e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+ goto board_free;
+ }
+ }
+
+ return rc;
+
+board_free:
+ kfree(e_ctrl->soc_info.soc_private);
+spi_free:
+ kfree(spi_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_spi_driver_probe(struct spi_device *spi)
+{
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]",
+ spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0,
+ (spi->mode & SPI_CPOL) ? 1 : 0,
+ (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz);
+
+ return cam_eeprom_spi_setup(spi);
+}
+
+static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(sdev);
+ struct cam_eeprom_ctrl_t *e_ctrl;
+ struct cam_eeprom_soc_private *soc_private;
+
+ if (!sd) {
+ CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+ return -EINVAL;
+ }
+
+ e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->io_master_info.spi_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ if (soc_private) {
+ kfree(soc_private->power_info.gpio_num_info);
+ kfree(soc_private);
+ }
+ kfree(e_ctrl);
+
+ return 0;
+}
+
+static int32_t cam_eeprom_platform_driver_probe(
+ struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_eeprom_ctrl_t *e_ctrl = NULL;
+ struct cam_eeprom_soc_private *soc_private = NULL;
+
+ e_ctrl = kzalloc(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL);
+ if (!e_ctrl)
+ return -ENOMEM;
+
+ e_ctrl->soc_info.pdev = pdev;
+ e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = false;
+
+ e_ctrl->io_master_info.master_type = CCI_MASTER;
+ e_ctrl->io_master_info.cci_client = kzalloc(
+ sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!e_ctrl->io_master_info.cci_client)
+ goto free_e_ctrl;
+
+ soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ e_ctrl->soc_info.soc_private = soc_private;
+
+ /* Initialize mutex */
+ mutex_init(&(e_ctrl->eeprom_mutex));
+ rc = cam_eeprom_platform_driver_soc_init(e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+ goto free_soc;
+ }
+ rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+ goto free_soc;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_eeprom_parse_read_memory_map(pdev->dev.of_node,
+ e_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+ goto free_soc;
+ }
+ }
+
+ e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops;
+ e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+ strlcpy(e_ctrl->device_name, CAM_EEPROM_NAME,
+ sizeof(e_ctrl->device_name));
+ e_ctrl->v4l2_dev_str.name = e_ctrl->device_name;
+ e_ctrl->v4l2_dev_str.sd_flags =
+ (V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+ e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE;
+ e_ctrl->v4l2_dev_str.token = e_ctrl;
+
+ rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str));
+ if (rc) {
+ CAM_ERR(CAM_EEPROM, "fail to create subdev");
+ goto free_soc;
+ }
+
+ e_ctrl->bridge_intf.device_hdl = -1;
+ e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+ e_ctrl->bridge_intf.ops.link_setup = NULL;
+ e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+ platform_set_drvdata(pdev, e_ctrl);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+ return rc;
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(e_ctrl->io_master_info.cci_client);
+free_e_ctrl:
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
+{
+ struct cam_eeprom_ctrl_t *e_ctrl;
+
+ e_ctrl = platform_get_drvdata(pdev);
+ if (!e_ctrl) {
+ CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+ return -EINVAL;
+ }
+
+ kfree(e_ctrl->soc_info.soc_private);
+ kfree(e_ctrl->io_master_info.cci_client);
+ kfree(e_ctrl);
+ return 0;
+}
+
+static const struct of_device_id cam_eeprom_dt_match[] = {
+ { .compatible = "qcom,eeprom" },
+ { }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match);
+
+static struct platform_driver cam_eeprom_platform_driver = {
+ .driver = {
+ .name = "qcom,eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ },
+ .probe = cam_eeprom_platform_driver_probe,
+ .remove = cam_eeprom_platform_driver_remove,
+};
+
+static const struct i2c_device_id cam_eeprom_i2c_id[] = {
+ { "msm_eeprom", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_eeprom_i2c_driver = {
+ .id_table = cam_eeprom_i2c_id,
+ .probe = cam_eeprom_i2c_driver_probe,
+ .remove = cam_eeprom_i2c_driver_remove,
+ .driver = {
+ .name = "msm_eeprom",
+ },
+};
+
+static struct spi_driver cam_eeprom_spi_driver = {
+ .driver = {
+ .name = "qcom_eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_eeprom_dt_match,
+ },
+ .probe = cam_eeprom_spi_driver_probe,
+ .remove = cam_eeprom_spi_driver_remove,
+};
+static int __init cam_eeprom_driver_init(void)
+{
+ int rc = 0;
+
+ rc = platform_driver_register(&cam_eeprom_platform_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d",
+ rc);
+ return rc;
+ }
+
+ rc = spi_register_driver(&cam_eeprom_spi_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ rc = i2c_add_driver(&cam_eeprom_i2c_driver);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void __exit cam_eeprom_driver_exit(void)
+{
+ platform_driver_unregister(&cam_eeprom_platform_driver);
+ spi_unregister_driver(&cam_eeprom_spi_driver);
+ i2c_del_driver(&cam_eeprom_i2c_driver);
+}
+
+module_init(cam_eeprom_driver_init);
+module_exit(cam_eeprom_driver_exit);
+MODULE_DESCRIPTION("CAM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
new file mode 100644
index 0000000..a98bf00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_DEV_H_
+#define _CAM_EEPROM_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE 80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT 8
+#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT 8
+
+/**
+ * struct cam_eeprom_map_t - eeprom map
+ * @data_type : Data type
+ * @addr_type : Address type
+ * @addr : Address
+ * @data : data
+ * @delay : Delay
+ *
+ */
+struct cam_eeprom_map_t {
+ uint32_t valid_size;
+ uint32_t addr;
+ uint32_t addr_type;
+ uint32_t data;
+ uint32_t data_type;
+ uint32_t delay;
+};
+
+/**
+ * struct cam_eeprom_memory_map_t - eeprom memory map types
+ * @page : page memory
+ * @pageen : pageen memory
+ * @poll : poll memory
+ * @mem : mem
+ * @saddr : slave addr
+ *
+ */
+struct cam_eeprom_memory_map_t {
+ struct cam_eeprom_map_t page;
+ struct cam_eeprom_map_t pageen;
+ struct cam_eeprom_map_t poll;
+ struct cam_eeprom_map_t mem;
+ uint32_t saddr;
+};
+
+/**
+ * struct cam_eeprom_memory_block_t - eeprom mem block info
+ * @map : eeprom memory map
+ * @num_map : number of map blocks
+ * @mapdata : map data
+ * @cmd_type : size of total mapdata
+ *
+ */
+struct cam_eeprom_memory_block_t {
+ struct cam_eeprom_memory_map_t *map;
+ uint32_t num_map;
+ uint8_t *mapdata;
+ uint32_t num_data;
+};
+
+/**
+ * struct cam_eeprom_cmm_t - camera multimodule
+ * @cmm_support : cmm support flag
+ * @cmm_compression : cmm compression flag
+ * @cmm_offset : cmm data start offset
+ * @cmm_size : cmm data size
+ *
+ */
+struct cam_eeprom_cmm_t {
+ uint32_t cmm_support;
+ uint32_t cmm_compression;
+ uint32_t cmm_offset;
+ uint32_t cmm_size;
+};
+
+/**
+ * struct cam_eeprom_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ *
+ */
+struct cam_eeprom_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_eeprom_soc_private - eeprom soc private data structure
+ * @eeprom_name : eeprom name
+ * @i2c_info : i2c info structure
+ * @power_info : eeprom power info
+ * @cmm_data : cmm data
+ *
+ */
+struct cam_eeprom_soc_private {
+ const char *eeprom_name;
+ struct cam_eeprom_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
+ struct cam_eeprom_cmm_t cmm_data;
+};
+
+/**
+ * struct cam_eeprom_intf_params - bridge interface params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_eeprom_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @pdev : platform device
+ * @spi : spi device
+ * @eeprom_mutex : eeprom mutex
+ * @soc_info : eeprom soc related info
+ * @io_master_info : Information about the communication master
+ * @gpio_num_info : gpio info
+ * @cci_i2c_master : I2C structure
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : bridge interface params
+ * @subdev_id : subdev id
+ * @userspace_probe : flag indicates userspace or kernel probe
+ * @cal_data : Calibration data
+ * @device_name : Device name
+ *
+ */
+struct cam_eeprom_ctrl_t {
+ struct platform_device *pdev;
+ struct spi_device *spi;
+ struct mutex eeprom_mutex;
+ struct cam_hw_soc_info soc_info;
+ struct camera_io_master io_master_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
+ enum cci_i2c_master_t cci_i2c_master;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_eeprom_intf_params bridge_intf;
+ enum msm_camera_device_type_t eeprom_device_type;
+ uint32_t subdev_id;
+ bool userspace_probe;
+ struct cam_eeprom_memory_block_t cal_data;
+ char device_name[20];
+};
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+ struct cam_eeprom_i2c_info_t *i2c_info);
+
+#endif /*_CAM_EEPROM_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
new file mode 100644
index 0000000..84e723f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/*
+ * cam_eeprom_parse_memory_map() - parse memory map in device node
+ * @of: device node
+ * @data: memory block for output
+ *
+ * This functions parses @of to fill @data. It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+int cam_eeprom_parse_dt_memory_map(struct device_node *node,
+ struct cam_eeprom_memory_block_t *data)
+{
+ int i, rc = 0;
+ char property[PROPERTY_MAXSIZE];
+ uint32_t count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT;
+ struct cam_eeprom_memory_map_t *map;
+
+ snprintf(property, PROPERTY_MAXSIZE, "num-blocks");
+ rc = of_property_read_u32(node, property, &data->num_map);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d",
+ rc);
+ return rc;
+ }
+
+ map = kzalloc((sizeof(*map) * data->num_map), GFP_KERNEL);
+ if (!map) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ data->map = map;
+
+ for (i = 0; i < data->num_map; i++) {
+ snprintf(property, PROPERTY_MAXSIZE, "page%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].page, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: page not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].pageen, count);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "pageen not needed");
+
+ snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].saddr, 1);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i);
+
+ snprintf(property, PROPERTY_MAXSIZE, "poll%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].poll, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d",
+ rc);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "mem%d", i);
+ rc = of_property_read_u32_array(node, property,
+ (uint32_t *) &map[i].mem, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d",
+ rc);
+ goto ERROR;
+ }
+ data->num_data += map[i].mem.valid_size;
+ }
+
+ data->mapdata = kzalloc(data->num_data, GFP_KERNEL);
+ if (!data->mapdata) {
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+ return rc;
+
+ERROR:
+ kfree(data->map);
+ memset(data, 0, sizeof(*data));
+ return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses eeprom dt
+ */
+static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
+
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE)
+ of_node = e_ctrl->io_master_info.
+ spi_client->spi_master->dev.of_node;
+ else if (e_ctrl->eeprom_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ of_node = soc_info->pdev->dev.of_node;
+
+ if (!of_node) {
+ CAM_ERR(CAM_EEPROM, "of_node is NULL, device type %d",
+ e_ctrl->eeprom_device_type);
+ return -EINVAL;
+ }
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "cam_soc_util_get_dt_properties rc %d",
+ rc);
+ return rc;
+ }
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = cam_get_dt_power_setting_data(of_node,
+ soc_info, power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed in getting power settings");
+ return rc;
+ }
+ }
+
+ if (!soc_info->gpio_data) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return 0;
+ }
+
+ if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+ CAM_INFO(CAM_EEPROM, "No GPIO found");
+ return -EINVAL;
+ }
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
+ CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * @eb_info: eeprom private data structure
+ * @of_node: eeprom device node
+ *
+ * This function parses the eeprom dt to get the MM data
+ */
+static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+ cmm_data->cmm_support =
+ of_property_read_bool(of_node, "cmm-data-support");
+ if (!cmm_data->cmm_support) {
+ CAM_DBG(CAM_EEPROM, "No cmm support");
+ return 0;
+ }
+
+ cmm_data->cmm_compression =
+ of_property_read_bool(of_node, "cmm-data-compressed");
+
+ rc = of_property_read_u32(of_node, "cmm-data-offset",
+ &cmm_data->cmm_offset);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc);
+
+ rc = of_property_read_u32(of_node, "cmm-data-size",
+ &cmm_data->cmm_size);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc);
+
+ CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d",
+ cmm_data->cmm_compression, cmm_data->cmm_offset,
+ cmm_data->cmm_size);
+ return 0;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_spi_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+ rc = of_property_read_u32(e_ctrl->spi->dev.of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ return rc;
+ }
+ rc = of_property_read_string(e_ctrl->spi->dev.of_node,
+ "eeprom-name", &soc_private->eeprom_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+ e_ctrl->userspace_probe = true;
+ }
+
+ CAM_DBG(CAM_EEPROM, "eeprom-name %s, rc %d", soc_private->eeprom_name,
+ rc);
+ rc = cam_eeprom_cmm_dts(soc_private,
+ e_ctrl->io_master_info.spi_client->spi_master->dev.of_node);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc);
+ rc = cam_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+ return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_platform_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &e_ctrl->soc_info;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ uint32_t temp;
+
+ if (!soc_info->pdev) {
+ CAM_ERR(CAM_EEPROM, "Error:soc_info is not initialized");
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+ if (!of_node) {
+ CAM_ERR(CAM_EEPROM, "dev.of_node NULL");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "cci-master",
+ &e_ctrl->cci_i2c_master);
+ if (rc < 0) {
+ CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+ return rc;
+ }
+
+ rc = of_property_read_string(of_node, "eeprom-name",
+ &soc_private->eeprom_name);
+ if (rc < 0) {
+ CAM_DBG(CAM_EEPROM, "kernel probe is not enabled");
+ e_ctrl->userspace_probe = true;
+ }
+
+ rc = cam_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+ if (e_ctrl->userspace_probe == false) {
+ rc = of_property_read_u32(of_node, "slave-addr", &temp);
+ if (rc < 0)
+ CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc);
+
+ soc_private->i2c_info.slave_addr = temp;
+
+ rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp);
+ soc_private->i2c_info.i2c_freq_mode = temp;
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM,
+ "i2c-freq-mode read fail %d", rc);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) {
+ CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
+ soc_private->i2c_info.i2c_freq_mode);
+ soc_private->i2c_info.i2c_freq_mode = 0;
+ }
+ CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X",
+ soc_private->i2c_info.slave_addr);
+ }
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
new file mode 100644
index 0000000..02e59d7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_SOC_H_
+#define _CAM_EEPROM_SOC_H_
+
+#include "cam_eeprom_dev.h"
+
+int cam_eeprom_parse_dt_memory_map(struct device_node *of,
+ struct cam_eeprom_memory_block_t *data);
+
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+#endif/* _CAM_EEPROM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
new file mode 100644
index 0000000..9aab0e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
new file mode 100644
index 0000000..7af7efc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -0,0 +1,741 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_flash_core.h"
+
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+ enum cam_flash_state state)
+{
+ int rc = 0;
+
+ if (!(flash_ctrl->switch_trigger)) {
+ CAM_ERR(CAM_FLASH, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if ((state == CAM_FLASH_STATE_INIT) &&
+ (flash_ctrl->is_regulator_enabled == false)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ ENABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator enable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = true;
+ } else if ((state == CAM_FLASH_STATE_RELEASE) &&
+ (flash_ctrl->is_regulator_enabled == true)) {
+ rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ DISABLE_REGULATOR, NULL);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "regulator disable failed rc = %d",
+ rc);
+ return rc;
+ }
+ flash_ctrl->is_regulator_enabled = false;
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
+ flash_ctrl->flash_state);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
+{
+ uint32_t curr = 0, max_current = 0;
+ struct cam_flash_private_soc *soc_private = NULL;
+ int i = 0;
+
+ if (!flash_ctrl || !flash_data) {
+ CAM_ERR(CAM_FLASH, "Fctrl or Data NULL");
+ return -EINVAL;
+ }
+
+ soc_private = (struct cam_flash_private_soc *)
+ &flash_ctrl->soc_info.soc_private;
+
+ if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (flash_ctrl->torch_trigger[i]) {
+ max_current = soc_private->torch_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->torch_op_current[i];
+
+ CAM_DBG(CAM_FLASH,
+ "Led_Current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ curr);
+ }
+ }
+ } else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ if (flash_ctrl->flash_trigger[i]) {
+ max_current = soc_private->flash_max_current[i];
+
+ if (flash_data->led_current_ma[i] <=
+ max_current)
+ curr = flash_data->led_current_ma[i];
+ else
+ curr = soc_private->flash_op_current[i];
+
+ CAM_DBG(CAM_FLASH, "LED flash_current[%d]: %d",
+ i, curr);
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ curr);
+ }
+ }
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op);
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, LED_SWITCH_ON);
+
+ return 0;
+}
+
+int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
+{
+ int i = 0;
+
+ if (!flash_ctrl) {
+ CAM_ERR(CAM_FLASH, "Flash control Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ LED_OFF);
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ LED_OFF);
+
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger,
+ LED_SWITCH_OFF);
+
+ return 0;
+}
+
+static int cam_flash_low(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc);
+
+ return rc;
+}
+
+static int cam_flash_high(
+ struct cam_flash_ctrl *flash_ctrl,
+ struct cam_flash_frame_setting *flash_data)
+{
+ int i = 0, rc = 0;
+
+ if (!flash_data) {
+ CAM_ERR(CAM_FLASH, "Flash Data Null");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ LED_OFF);
+
+ rc = cam_flash_ops(flash_ctrl, flash_data,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc);
+
+ return rc;
+}
+
+static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
+{
+ int i = 0;
+ int frame_offset = 0;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (req_id == 0) {
+ flash_data = &fctrl->nrt_info;
+ if ((fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+ flash_data->cmn_attr.is_settings_valid = false;
+ for (i = 0; i < flash_data->cmn_attr.count; i++)
+ flash_data->led_current_ma[i] = 0;
+ } else {
+ fctrl->flash_init_setting.cmn_attr.
+ is_settings_valid = false;
+ }
+ } else {
+ frame_offset = (req_id + MAX_PER_FRAME_ARRAY -
+ CAM_FLASH_PIPELINE_DELAY) % 8;
+ flash_data = &fctrl->per_frame[frame_offset];
+ if (req_id > flash_data->cmn_attr.request_id) {
+ flash_data->cmn_attr.request_id = 0;
+ flash_data->cmn_attr.is_settings_valid = false;
+ for (i = 0; i < flash_data->cmn_attr.count; i++)
+ flash_data->led_current_ma[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int rc = 0, i = 0;
+ int frame_offset = 0;
+ uint16_t num_iterations;
+ struct cam_flash_frame_setting *flash_data = NULL;
+
+ if (req_id == 0) {
+ if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
+ flash_data = &fctrl->nrt_info;
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ if (!(fctrl->is_regulator_enabled)) {
+ rc = cam_flash_prepare(fctrl,
+ CAM_FLASH_STATE_INIT);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Reg Enable Failed %d",
+ rc);
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed : %d",
+ rc);
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_LOW;
+ }
+ } else if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_OFF) {
+ if (fctrl->flash_state !=
+ CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_off(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "LED off failed: %d",
+ rc);
+ }
+
+ rc = cam_flash_prepare(fctrl,
+ CAM_FLASH_STATE_RELEASE);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Regulator Disable failed %d",
+ rc);
+ goto nrt_del_req;
+ }
+
+ fctrl->flash_state =
+ CAM_FLASH_STATE_RELEASE;
+ fctrl->is_regulator_enabled = false;
+ }
+ } else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
+ flash_data = &fctrl->nrt_info;
+
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d",
+ rc);
+ } else {
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ }
+ }
+
+ num_iterations = flash_data->num_iterations;
+ for (i = 0; i < num_iterations; i++) {
+ /* Turn On Torch */
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Fire Torch Failed");
+ goto nrt_del_req;
+ }
+ fctrl->flash_state =
+ CAM_FLASH_STATE_LOW;
+ }
+ usleep_range(
+ flash_data->led_on_delay_ms * 1000,
+ flash_data->led_on_delay_ms * 1000 + 100);
+
+ /* Turn Off Torch */
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed: %d",
+ rc);
+ continue;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ usleep_range(
+ flash_data->led_off_delay_ms * 1000,
+ flash_data->led_off_delay_ms * 1000 + 100);
+ }
+ }
+ } else {
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ flash_data = &fctrl->per_frame[frame_offset];
+
+ if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ /* Turn On Flash */
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_high(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_HIGH;
+ }
+ } else if ((flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ /* Turn Off Flash */
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Torch ON failed: rc= %d",
+ rc);
+ goto apply_setting_err;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_LOW;
+ }
+ } else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
+ (flash_data->cmn_attr.is_settings_valid)) {
+ if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
+ (fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
+ rc = cam_flash_off(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Flash off failed %d", rc);
+ } else {
+ fctrl->flash_state =
+ CAM_FLASH_STATE_INIT;
+ }
+ }
+ } else {
+ CAM_ERR(CAM_FLASH, "Wrong opcode : %d",
+ flash_data->opcode);
+ rc = -EINVAL;
+ goto apply_setting_err;
+ }
+ }
+
+nrt_del_req:
+ delete_req(fctrl, req_id);
+apply_setting_err:
+ return rc;
+}
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+ int rc = 0, i = 0;
+ uint64_t generic_ptr;
+ uint32_t *cmd_buf = NULL;
+ uint32_t *offset = NULL;
+ uint32_t frame_offset = 0;
+ size_t len_of_buffer;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct common_header *cmn_hdr;
+ struct cam_config_dev_cmd config;
+ struct cam_req_mgr_add_request add_req;
+ struct cam_flash_init *cam_flash_info = NULL;
+ struct cam_flash_set_rer *flash_rer_info = NULL;
+ struct cam_flash_set_on_off *flash_operation_info = NULL;
+ struct cam_flash_query_curr *flash_query_info = NULL;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+ return -EINVAL;
+ }
+ /* getting CSL Packet */
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+ sizeof(config))) {
+ CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+ rc = -EFAULT;
+ return rc;
+ }
+
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+ return rc;
+ }
+
+ csl_packet = (struct cam_packet *)generic_ptr;
+
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_FLASH_PACKET_OPCODE_INIT: {
+ /* INIT packet*/
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ fctrl->flash_init_setting.cmn_attr.request_id = 0;
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cam_flash_info = (struct cam_flash_init *)cmd_buf;
+
+ switch (cam_flash_info->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT:
+ fctrl->flash_type = cam_flash_info->flash_type;
+ fctrl->is_regulator_enabled = false;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT;
+ break;
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cam_flash_info->cmd_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ frame_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id =
+ csl_packet->header.request_id;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
+ CAM_DBG(CAM_FLASH,
+ "CAMERA_FLASH_CMD_TYPE_OPS case called");
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->per_frame[frame_offset].opcode =
+ flash_operation_info->opcode;
+ fctrl->per_frame[frame_offset].cmn_attr.count =
+ flash_operation_info->count;
+ for (i = 0; i < flash_operation_info->count; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i]
+ = flash_operation_info->
+ led_current_ma[i];
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ fctrl->nrt_info.cmn_attr.is_settings_valid = true;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+ (uint64_t *)&generic_ptr, &len_of_buffer);
+ cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+ cmd_desc->offset);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: {
+ CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ fctrl->nrt_info.cmn_attr.count =
+ flash_operation_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.opcode =
+ flash_operation_info->opcode;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET;
+
+ for (i = 0; i < flash_operation_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_operation_info->led_current_ma[i];
+
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ return rc;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
+ int query_curr_ma = 0;
+
+ flash_query_info =
+ (struct cam_flash_query_curr *)cmd_buf;
+
+ rc = qpnp_flash_led_prepare(fctrl->switch_trigger,
+ QUERY_MAX_CURRENT, &query_curr_ma);
+ CAM_DBG(CAM_FLASH, "query_curr_ma = %d",
+ query_curr_ma);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Query current failed with rc=%d", rc);
+ return rc;
+ }
+ flash_query_info->query_current_ma = query_curr_ma;
+ break;
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
+ rc = 0;
+ flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
+ fctrl->nrt_info.opcode = flash_rer_info->opcode;
+ fctrl->nrt_info.cmn_attr.count = flash_rer_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.num_iterations =
+ flash_rer_info->num_iteration;
+ fctrl->nrt_info.led_on_delay_ms =
+ flash_rer_info->led_on_delay_ms;
+ fctrl->nrt_info.led_off_delay_ms =
+ flash_rer_info->led_off_delay_ms;
+
+ for (i = 0; i < flash_rer_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_rer_info->led_current_ma[i];
+
+
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ return rc;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d",
+ cmn_hdr->cmd_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+ case CAM_PKT_NOP_OPCODE: {
+ goto update_req_mgr;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+ (csl_packet->header.op_code & 0xFFFFFF));
+ return -EINVAL;
+ }
+update_req_mgr:
+ if (((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_PKT_NOP_OPCODE) ||
+ ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+ add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+ if (fctrl->bridge_intf.crm_cb &&
+ fctrl->bridge_intf.crm_cb->add_req)
+ fctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+ }
+
+ return rc;
+}
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+ info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
+ strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
+ info->p_delay = CAM_FLASH_PIPELINE_DELAY;
+ return 0;
+}
+
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!link)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, " Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (link->link_enable) {
+ fctrl->bridge_intf.link_hdl = link->link_hdl;
+ fctrl->bridge_intf.crm_cb = link->crm_cb;
+ } else {
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.crm_cb = NULL;
+ }
+
+ return 0;
+}
+
+static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int j = 0;
+ struct cam_flash_frame_setting *nrt_settings;
+
+ if (!fctrl)
+ return -EINVAL;
+
+ nrt_settings = &fctrl->nrt_info;
+
+ if (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+ } else if ((nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+ fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+ fctrl->nrt_info.cmn_attr.count = 0;
+ fctrl->nrt_info.num_iterations = 0;
+ fctrl->nrt_info.led_on_delay_ms = 0;
+ fctrl->nrt_info.led_off_delay_ms = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->nrt_info.led_current_ma[j] = 0;
+ }
+
+ return 0;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+ int rc = 0;
+ int i = 0, j = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ int frame_offset = 0;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ /* flush all requests*/
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ fctrl->per_frame[i].cmn_attr.request_id = 0;
+ fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+ fctrl->per_frame[i].cmn_attr.count = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->per_frame[i].led_current_ma[j] = 0;
+ }
+
+ rc = cam_flash_flush_nrt(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "NonRealTime flush error");
+ } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ /* flush request with req_id*/
+ frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ false;
+ fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+ for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+ }
+ return rc;
+}
+
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ if (!apply)
+ return -EINVAL;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ rc = -EINVAL;
+ goto free_resource;
+ }
+
+ if (!(apply->report_if_bubble)) {
+ mutex_lock(&fctrl->flash_wq_mutex);
+ rc = cam_flash_apply_setting(fctrl, apply->request_id);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
+ rc);
+ mutex_unlock(&fctrl->flash_wq_mutex);
+ }
+
+free_resource:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
new file mode 100644
index 0000000..4b0cf8d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_CORE_H_
+#define _CAM_FLASH_CORE_H_
+
+#include <linux/leds-qpnp-flash.h>
+#include <media/cam_sensor.h>
+#include "cam_flash_dev.h"
+#include "cam_sync_api.h"
+#include "cam_mem_mgr_api.h"
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
+int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+ enum cam_flash_state state);
+
+#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
new file mode 100644
index 0000000..32df2f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include "cam_flash_dev.h"
+#include "cam_flash_soc.h"
+#include "cam_flash_core.h"
+
+static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
+ void *arg, struct cam_flash_private_soc *soc_private)
+{
+ int rc = 0;
+ int i = 0;
+ struct cam_control *cmd = (struct cam_control *)arg;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK",
+ fctrl, arg);
+ return -EINVAL;
+ }
+
+ mutex_lock(&(fctrl->flash_mutex));
+ switch (cmd->op_code) {
+ case CAM_ACQUIRE_DEV: {
+ struct cam_sensor_acquire_dev flash_acq_dev;
+ struct cam_create_dev_hdl bridge_params;
+
+ CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+ if (fctrl->bridge_intf.device_hdl != -1) {
+ CAM_ERR(CAM_FLASH, "Device is already acquired");
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ rc = copy_from_user(&flash_acq_dev, (void __user *)cmd->handle,
+ sizeof(flash_acq_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copying from User");
+ goto release_mutex;
+ }
+
+ bridge_params.session_hdl = flash_acq_dev.session_handle;
+ bridge_params.ops = &fctrl->bridge_intf.ops;
+ bridge_params.v4l2_sub_dev_flag = 0;
+ bridge_params.media_entity_flag = 0;
+ bridge_params.priv = fctrl;
+
+ flash_acq_dev.device_handle =
+ cam_create_device_hdl(&bridge_params);
+ fctrl->bridge_intf.device_hdl =
+ flash_acq_dev.device_handle;
+ fctrl->bridge_intf.session_hdl =
+ flash_acq_dev.session_handle;
+
+ rc = copy_to_user((void __user *) cmd->handle, &flash_acq_dev,
+ sizeof(struct cam_sensor_acquire_dev));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
+ rc);
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_RELEASE_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
+ if (fctrl->bridge_intf.device_hdl == -1) {
+ CAM_ERR(CAM_FLASH,
+ "Invalid Handle: Link Hdl: %d device hdl: %d",
+ fctrl->bridge_intf.device_hdl,
+ fctrl->bridge_intf.link_hdl);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+ rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Failed in destroying the device Handle rc= %d",
+ rc);
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.link_hdl = -1;
+ fctrl->bridge_intf.session_hdl = -1;
+ break;
+ }
+ case CAM_QUERY_CAP: {
+ struct cam_flash_query_cap_info flash_cap;
+
+ CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
+ flash_cap.slot_info = fctrl->soc_info.index;
+ for (i = 0; i < fctrl->flash_num_sources; i++) {
+ flash_cap.max_current_flash[i] =
+ soc_private->flash_max_current[i];
+ flash_cap.max_duration_flash[i] =
+ soc_private->flash_max_duration[i];
+ }
+
+ for (i = 0; i < fctrl->torch_num_sources; i++)
+ flash_cap.max_current_torch[i] =
+ soc_private->torch_max_current[i];
+
+ if (copy_to_user((void __user *) cmd->handle, &flash_cap,
+ sizeof(struct cam_flash_query_cap_info))) {
+ CAM_ERR(CAM_FLASH, "Failed Copy to User");
+ rc = -EFAULT;
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_START_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_START_DEV");
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_INIT);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ goto release_mutex;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ rc = cam_flash_apply_setting(fctrl, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+ goto release_mutex;
+ }
+ break;
+ }
+ case CAM_STOP_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_STOP_DEV");
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT)
+ cam_flash_off(fctrl);
+
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+ rc);
+ goto release_mutex;
+ }
+ fctrl->flash_state = CAM_FLASH_STATE_RELEASE;
+
+ break;
+ }
+ case CAM_CONFIG_DEV: {
+ CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
+ rc = cam_flash_parser(fctrl, arg);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
+ goto release_mutex;
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code);
+ rc = -EINVAL;
+ }
+
+release_mutex:
+ mutex_unlock(&(fctrl->flash_mutex));
+ return rc;
+}
+
+static const struct of_device_id cam_flash_dt_match[] = {
+ {.compatible = "qcom,camera-flash", .data = NULL},
+ {}
+};
+
+static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ struct cam_flash_private_soc *soc_private = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+
+ fctrl = v4l2_get_subdevdata(sd);
+ soc_private = fctrl->soc_info.soc_private;
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_driver_cmd(fctrl, arg,
+ soc_private);
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type");
+ rc = -EINVAL;
+ break;
+ }
+
+ CAM_DBG(CAM_FLASH, "Exit");
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cam_control cmd_data;
+ int32_t rc = 0;
+
+ if (copy_from_user(&cmd_data, (void __user *)arg,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy from user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case VIDIOC_CAM_CONTROL: {
+ rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed");
+ break;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d",
+ cmd);
+ rc = -EINVAL;
+ }
+
+ if (!rc) {
+ if (copy_to_user((void __user *)arg, &cmd_data,
+ sizeof(cmd_data))) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to copy to user_ptr=%pK size=%zu",
+ (void __user *)arg, sizeof(cmd_data));
+ rc = -EFAULT;
+ }
+ }
+
+ return rc;
+}
+#endif
+
+static int cam_flash_platform_remove(struct platform_device *pdev)
+{
+ struct cam_flash_ctrl *fctrl;
+
+ fctrl = platform_get_drvdata(pdev);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash device is NULL");
+ return 0;
+ }
+
+ devm_kfree(&pdev->dev, fctrl);
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = {
+ .ioctl = cam_flash_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = cam_flash_subdev_do_ioctl
+#endif
+};
+
+static struct v4l2_subdev_ops cam_flash_subdev_ops = {
+ .core = &cam_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_flash_internal_ops;
+
+static int32_t cam_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct cam_flash_ctrl *flash_ctrl = NULL;
+
+ CAM_DBG(CAM_FLASH, "Enter");
+ if (!pdev->dev.of_node) {
+ CAM_ERR(CAM_FLASH, "of_node NULL");
+ return -EINVAL;
+ }
+
+ flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+ if (!flash_ctrl)
+ return -ENOMEM;
+
+ flash_ctrl->pdev = pdev;
+ flash_ctrl->soc_info.pdev = pdev;
+
+ rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
+ kfree(flash_ctrl);
+ return -EINVAL;
+ }
+
+ flash_ctrl->v4l2_dev_str.internal_ops =
+ &cam_flash_internal_ops;
+ flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+ flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+ flash_ctrl->v4l2_dev_str.sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+ flash_ctrl->v4l2_dev_str.token = flash_ctrl;
+
+ rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+ goto free_resource;
+ }
+ flash_ctrl->bridge_intf.device_hdl = -1;
+ flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+ platform_set_drvdata(pdev, flash_ctrl);
+ v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl);
+
+ mutex_init(&(flash_ctrl->flash_mutex));
+ mutex_init(&(flash_ctrl->flash_wq_mutex));
+
+ CAM_DBG(CAM_FLASH, "Probe success");
+ return rc;
+free_resource:
+ kfree(flash_ctrl);
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_flash_dt_match);
+
+static struct platform_driver cam_flash_platform_driver = {
+ .probe = cam_flash_platform_probe,
+ .remove = cam_flash_platform_remove,
+ .driver = {
+ .name = "CAM-FLASH-DRIVER",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_flash_dt_match,
+ },
+};
+
+static int __init cam_flash_init_module(void)
+{
+ int32_t rc = 0;
+
+ rc = platform_driver_register(&cam_flash_platform_driver);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "platform probe for flash failed");
+
+ return rc;
+}
+
+static void __exit cam_flash_exit_module(void)
+{
+ platform_driver_unregister(&cam_flash_platform_driver);
+}
+
+module_init(cam_flash_init_module);
+module_exit(cam_flash_exit_module);
+MODULE_DESCRIPTION("CAM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
new file mode 100644
index 0000000..1897eb6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CAM_FLASH_DEV_H_
+#define _CAM_FLASH_DEV_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_sensor_cmn_header.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
+
+#define CAM_FLASH_PIPELINE_DELAY 1
+
+#define CAM_FLASH_PACKET_OPCODE_INIT 0
+#define CAM_FLASH_PACKET_OPCODE_SET_OPS 1
+#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+
+enum cam_flash_switch_trigger_ops {
+ LED_SWITCH_OFF = 0,
+ LED_SWITCH_ON,
+};
+
+enum cam_flash_state {
+ CAM_FLASH_STATE_INIT,
+ CAM_FLASH_STATE_LOW,
+ CAM_FLASH_STATE_HIGH,
+ CAM_FLASH_STATE_RELEASE,
+};
+
+/**
+ * struct cam_flash_intf_params
+ * @device_hdl : Device Handle
+ * @session_hdl : Session Handle
+ * @link_hdl : Link Handle
+ * @ops : KMD operations
+ * @crm_cb : Callback API pointers
+ */
+struct cam_flash_intf_params {
+ int32_t device_hdl;
+ int32_t session_hdl;
+ int32_t link_hdl;
+ struct cam_req_mgr_kmd_ops ops;
+ struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_flash_common_attr
+ * @is_settings_valid : Notify the valid settings
+ * @request_id : Request id provided by umd
+ * @count : Number of led count
+ * @cmd_type : Command buffer type
+ */
+struct cam_flash_common_attr {
+ bool is_settings_valid;
+ int32_t request_id;
+ uint16_t count;
+ uint8_t cmd_type;
+};
+
+/**
+ * struct flash_init_packet
+ * @cmn_attr : Provides common attributes
+ * @flash_type : Flash type(PMIC/I2C/GPIO)
+ */
+struct cam_flash_init_packet {
+ struct cam_flash_common_attr cmn_attr;
+ uint8_t flash_type;
+};
+
+/**
+ * struct flash_frame_setting
+ * @cmn_attr : Provides common attributes
+ * @num_iterations : Iterations used to perform RER
+ * @led_on_delay_ms : LED on time in milisec
+ * @led_off_delay_ms : LED off time in milisec
+ * @opcode : Command buffer opcode
+ * @led_current_ma[] : LED current array in miliamps
+ *
+ */
+struct cam_flash_frame_setting {
+ struct cam_flash_common_attr cmn_attr;
+ uint16_t num_iterations;
+ uint16_t led_on_delay_ms;
+ uint16_t led_off_delay_ms;
+ int8_t opcode;
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ * struct cam_flash_private_soc
+ * @switch_trigger_name : Switch trigger name
+ * @flash_trigger_name : Flash trigger name array
+ * @flash_op_current : Flash operational current
+ * @flash_max_current : Max supported current for LED in flash mode
+ * @flash_max_duration : Max turn on duration for LED in Flash mode
+ * @torch_trigger_name : Torch trigger name array
+ * @torch_op_current : Torch operational current
+ * @torch_max_current : Max supported current for LED in torch mode
+ */
+
+struct cam_flash_private_soc {
+ const char *switch_trigger_name;
+ const char *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS];
+ const char *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ * struct cam_flash_ctrl
+ * @soc_info : Soc related information
+ * @pdev : Platform device
+ * @per_frame[] : Per_frame setting array
+ * @nrt_info : NonRealTime settings
+ * @of_node : Of Node ptr
+ * @v4l2_dev_str : V4L2 device structure
+ * @bridge_intf : CRM interface
+ * @flash_init_setting : Init command buffer structure
+ * @switch_trigger : Switch trigger ptr
+ * @flash_num_sources : Number of flash sources
+ * @torch_num_source : Number of torch sources
+ * @flash_mutex : Mutex for flash operations
+ * @flash_wq_mutex : Mutex for flash apply setting
+ * @flash_state : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_type : Flash types (PMIC/I2C/GPIO)
+ * @is_regulator_enable : Regulator disable/enable notifier
+ * @flash_trigger : Flash trigger ptr
+ * @torch_trigger : Torch trigger ptr
+ */
+struct cam_flash_ctrl {
+ struct cam_hw_soc_info soc_info;
+ struct platform_device *pdev;
+ struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY];
+ struct cam_flash_frame_setting nrt_info;
+ struct device_node *of_node;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_flash_intf_params bridge_intf;
+ struct cam_flash_init_packet flash_init_setting;
+ struct led_trigger *switch_trigger;
+ uint32_t flash_num_sources;
+ uint32_t torch_num_sources;
+ struct mutex flash_mutex;
+ struct mutex flash_wq_mutex;
+ enum cam_flash_state flash_state;
+ uint8_t flash_type;
+ bool is_regulator_enabled;
+ struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+ struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
new file mode 100644
index 0000000..a9ab169
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "cam_flash_soc.h"
+
+static int32_t cam_get_source_node_info(
+ struct device_node *of_node,
+ struct cam_flash_ctrl *fctrl,
+ struct cam_flash_private_soc *soc_private)
+{
+ int32_t rc = 0;
+ uint32_t count = 0, i = 0;
+ struct device_node *flash_src_node = NULL;
+ struct device_node *torch_src_node = NULL;
+ struct device_node *switch_src_node = NULL;
+
+ switch_src_node = of_parse_phandle(of_node, "switch-source", 0);
+ if (!switch_src_node) {
+ CAM_DBG(CAM_FLASH, "switch_src_node NULL");
+ } else {
+ rc = of_property_read_string(switch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->switch_trigger_name);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "default-led-trigger read failed rc=%d", rc);
+ } else {
+ CAM_DBG(CAM_FLASH, "switch trigger %s",
+ soc_private->switch_trigger_name);
+ led_trigger_register_simple(
+ soc_private->switch_trigger_name,
+ &fctrl->switch_trigger);
+ }
+
+ of_node_put(switch_src_node);
+ }
+
+ if (of_get_property(of_node, "flash-source", &count)) {
+ count /= sizeof(uint32_t);
+
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->flash_num_sources = count;
+
+ for (i = 0; i < count; i++) {
+ flash_src_node = of_parse_phandle(of_node,
+ "flash-source", i);
+ if (!flash_src_node) {
+ CAM_WARN(CAM_FLASH, "flash_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(flash_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->flash_trigger_name[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "defalut-led-trigger read failed rc=%d", rc);
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ CAM_DBG(CAM_FLASH, "default trigger %s",
+ soc_private->flash_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current-ma",
+ &soc_private->flash_op_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH, "op-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,max-current",
+ &soc_private->flash_max_current[i]);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-duration */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration-ms",
+ &soc_private->flash_max_duration[i]);
+ if (rc)
+ CAM_WARN(CAM_FLASH,
+ "max-duration: read failed");
+
+ of_node_put(flash_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->flash_max_current[i]);
+
+ led_trigger_register_simple(
+ soc_private->flash_trigger_name[i],
+ &fctrl->flash_trigger[i]);
+ }
+ }
+
+ if (of_get_property(of_node, "torch-source", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count);
+ return -EINVAL;
+ }
+
+ fctrl->torch_num_sources = count;
+
+ CAM_DBG(CAM_FLASH, "torch_num_sources = %d",
+ fctrl->torch_num_sources);
+ for (i = 0; i < count; i++) {
+ torch_src_node = of_parse_phandle(of_node,
+ "torch-source", i);
+ if (!torch_src_node) {
+ CAM_WARN(CAM_FLASH, "torch_src_node NULL");
+ continue;
+ }
+
+ rc = of_property_read_string(torch_src_node,
+ "qcom,default-led-trigger",
+ &soc_private->torch_trigger_name[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "default-trigger read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read operational-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,current-ma",
+ &soc_private->torch_op_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH, "current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,max-current",
+ &soc_private->torch_max_current[i]);
+ if (rc < 0) {
+ CAM_WARN(CAM_FLASH,
+ "max-current: read failed");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ of_node_put(torch_src_node);
+
+ CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+ i, soc_private->torch_max_current[i]);
+
+ led_trigger_register_simple(
+ soc_private->torch_trigger_name[i],
+ &fctrl->torch_trigger[i]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0;
+ struct device_node *of_node = NULL;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "NULL flash control structure");
+ return -EINVAL;
+ }
+
+ of_node = fctrl->pdev->dev.of_node;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+ return rc;
+ }
+
+ soc_info->soc_private =
+ kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
+ if (!soc_info->soc_private) {
+ rc = -ENOMEM;
+ goto release_soc_res;
+ }
+
+ rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "cam_flash_get_pmic_source_info failed rc %d", rc);
+ goto free_soc_private;
+ }
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+release_soc_res:
+ cam_soc_util_release_platform_resource(soc_info);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
new file mode 100644
index 0000000..2e1da69
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_SOC_H_
+#define _CAM_FLASH_SOC_H_
+
+#include "cam_flash_dev.h"
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+ struct cam_hw_soc_info *soc_info);
+
+#endif /*_CAM_FLASH_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
index d8c75fb..5a9441f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
@@ -1,8 +1,10 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_dev.o cam_sensor_core.o cam_sensor_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 4fc3aa1..f6e6a9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -13,7 +13,9 @@
#include <linux/module.h>
#include <cam_sensor_cmn_header.h>
#include "cam_sensor_core.h"
-#include <cam_sensor_util.h>
+#include "cam_sensor_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
void *arg)
@@ -33,8 +35,7 @@
ioctl_ctrl = (struct cam_control *)arg;
if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
- pr_err("%s:%d :Error: Invalid Handle Type\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Handle Type");
return -EINVAL;
}
@@ -47,22 +48,21 @@
(uint64_t *)&generic_ptr,
&len_of_buff);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in getting the buffer: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
return rc;
}
csl_packet = (struct cam_packet *)(generic_ptr +
config.offset);
if (config.offset > len_of_buff) {
- pr_err("%s: %d offset is out of bounds: off: %lld len: %zu\n",
- __func__, __LINE__, config.offset, len_of_buff);
+ CAM_ERR(CAM_SENSOR,
+ "offset is out of bounds: off: %lld len: %zu",
+ config.offset, len_of_buff);
return -EINVAL;
}
i2c_data = &(s_ctrl->i2c_data);
- CDBG("%s:%d Header OpCode: %d\n",
- __func__, __LINE__, csl_packet->header.op_code);
+ CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code);
if ((csl_packet->header.op_code & 0xFFFFFF) ==
CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG) {
i2c_reg_settings = &i2c_data->init_settings;
@@ -74,16 +74,16 @@
&i2c_data->
per_frame[csl_packet->header.request_id %
MAX_PER_FRAME_ARRAY];
- CDBG("%s:%d Received Packet: %lld\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
if (i2c_reg_settings->is_settings_valid == 1) {
- pr_err("%s:%d :Error: Already some pkt in offset req : %lld\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Already some pkt in offset req : %lld",
csl_packet->header.request_id);
rc = delete_request(i2c_reg_settings);
if (rc < 0) {
- pr_err("%s: %d :Error: Failed in Deleting the err: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in Deleting the err: %d", rc);
return rc;
}
}
@@ -95,7 +95,7 @@
CAM_PKT_NOP_OPCODE) {
goto update_req_mgr;
} else {
- pr_err("%s:%d Invalid Packet Header\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
return -EINVAL;
}
@@ -105,8 +105,7 @@
rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings, cmd_desc, 1);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail parsing I2C Pkt: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
return rc;
}
@@ -116,14 +115,14 @@
CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE)) {
add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
add_req.req_id = csl_packet->header.request_id;
- CDBG("%s:%d Rxed Req Id: %lld\n",
- __func__, __LINE__, csl_packet->header.request_id);
+ CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
+ csl_packet->header.request_id);
add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
if (s_ctrl->bridge_intf.crm_cb &&
s_ctrl->bridge_intf.crm_cb->add_req)
s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
- CDBG("%s:%d add req to req mgr: %lld\n",
- __func__, __LINE__, add_req.req_id);
+ CAM_DBG(CAM_SENSOR, " add req to req mgr: %lld",
+ add_req.req_id);
}
return rc;
}
@@ -137,7 +136,8 @@
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
cci_client = s_ctrl->io_master_info.cci_client;
if (!cci_client) {
- pr_err("failed: cci_client %pK", cci_client);
+ CAM_ERR(CAM_SENSOR, "failed: cci_client %pK",
+ cci_client);
return -EINVAL;
}
cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
@@ -145,8 +145,7 @@
cci_client->retries = 3;
cci_client->id_map = 0;
cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
- CDBG("%s:%d Master: %d sid: %d freq_mode: %d\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, " Master: %d sid: %d freq_mode: %d",
cci_client->cci_i2c_master, i2c_info->slave_addr,
i2c_info->i2c_freq_mode);
}
@@ -168,211 +167,14 @@
s_ctrl->sensor_probe_addr_type = probe_info->addr_type;
s_ctrl->sensor_probe_data_type = probe_info->data_type;
- CDBG("%s:%d Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x\n",
- __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR,
+ "Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x",
s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
s_ctrl->sensordata->slave_info.sensor_id,
s_ctrl->sensordata->slave_info.sensor_id_mask);
return rc;
}
-int32_t cam_sensor_update_power_settings(void *cmd_buf,
- int cmd_length, struct cam_sensor_ctrl_t *s_ctrl)
-{
- int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
- int32_t i = 0, pwr_up = 0, pwr_down = 0;
- void *ptr = cmd_buf, *scr;
- struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
- struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
- struct cam_sensor_power_ctrl_t *power_info =
- &s_ctrl->sensordata->power_info;
-
- if (!pwr_cmd || !cmd_length) {
- pr_err("%s:%d Invalid Args: pwr_cmd %pK, cmd_length: %d\n",
- __func__, __LINE__, pwr_cmd, cmd_length);
- return -EINVAL;
- }
-
- power_info->power_setting_size = 0;
- power_info->power_setting =
- (struct cam_sensor_power_setting *)
- kzalloc(sizeof(struct cam_sensor_power_setting) *
- MAX_POWER_CONFIG, GFP_KERNEL);
- if (!power_info->power_setting)
- return -ENOMEM;
-
- power_info->power_down_setting =
- (struct cam_sensor_power_setting *)
- kzalloc(sizeof(struct cam_sensor_power_setting) *
- MAX_POWER_CONFIG, GFP_KERNEL);
- if (!power_info->power_down_setting) {
- rc = -ENOMEM;
- goto free_power_settings;
- }
-
- while (tot_size < cmd_length) {
- if (cmm_hdr->cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
- struct cam_cmd_power *pwr_cmd =
- (struct cam_cmd_power *)ptr;
-
- power_info->
- power_setting_size +=
- pwr_cmd->count;
- scr = ptr + sizeof(struct cam_cmd_power);
- tot_size = tot_size + sizeof(struct cam_cmd_power);
-
- if (pwr_cmd->count == 0)
- CDBG("%s:%d Un expected Command\n",
- __func__, __LINE__);
-
- for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
- power_info->
- power_setting[pwr_up].seq_type =
- pwr_cmd->power_settings[i].
- power_seq_type;
- power_info->
- power_setting[pwr_up].config_val =
- pwr_cmd->power_settings[i].
- config_val_low;
- power_info->power_setting[pwr_up].delay = 0;
- if (i) {
- scr = scr +
- sizeof(
- struct cam_power_settings);
- tot_size = tot_size +
- sizeof(
- struct cam_power_settings);
- }
- if (tot_size > cmd_length) {
- pr_err("%s:%d :Error: Command Buffer is wrong\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto free_power_down_settings;
- }
- CDBG("Seq Type[%d]: %d Config_val: %ldn",
- pwr_up,
- power_info->
- power_setting[pwr_up].seq_type,
- power_info->
- power_setting[pwr_up].
- config_val);
- }
- last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
- ptr = (void *) scr;
- cmm_hdr = (struct common_header *)ptr;
- } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
- struct cam_cmd_unconditional_wait *wait_cmd =
- (struct cam_cmd_unconditional_wait *)ptr;
- if (wait_cmd->op_code ==
- CAMERA_SENSOR_WAIT_OP_SW_UCND) {
- if (last_cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
- if (pwr_up > 0)
- power_info->
- power_setting
- [pwr_up - 1].delay +=
- wait_cmd->delay;
- else
- pr_err("%s:%d Delay is expected only after valid power up setting\n",
- __func__, __LINE__);
- } else if (last_cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
- if (pwr_down > 0)
- power_info->
- power_down_setting
- [pwr_down - 1].delay +=
- wait_cmd->delay;
- else
- pr_err("%s:%d Delay is expected only after valid power down setting\n",
- __func__, __LINE__);
- }
- } else
- CDBG("%s:%d Invalid op code: %d\n",
- __func__, __LINE__, wait_cmd->op_code);
- tot_size = tot_size +
- sizeof(struct cam_cmd_unconditional_wait);
- if (tot_size > cmd_length) {
- pr_err("Command Buffer is wrong\n");
- return -EINVAL;
- }
- scr = (void *) (wait_cmd);
- ptr = (void *)
- (scr +
- sizeof(struct cam_cmd_unconditional_wait));
- CDBG("%s:%d ptr: %pK sizeof: %d Next: %pK\n",
- __func__, __LINE__, scr,
- (int32_t)sizeof(
- struct cam_cmd_unconditional_wait), ptr);
-
- cmm_hdr = (struct common_header *)ptr;
- } else if (cmm_hdr->cmd_type ==
- CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
- struct cam_cmd_power *pwr_cmd =
- (struct cam_cmd_power *)ptr;
-
- scr = ptr + sizeof(struct cam_cmd_power);
- tot_size = tot_size + sizeof(struct cam_cmd_power);
- power_info->power_down_setting_size += pwr_cmd->count;
-
- if (pwr_cmd->count == 0)
- pr_err("%s:%d Invalid Command\n",
- __func__, __LINE__);
-
- for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
- power_info->
- power_down_setting[pwr_down].
- seq_type =
- pwr_cmd->power_settings[i].
- power_seq_type;
- power_info->
- power_down_setting[pwr_down].
- config_val =
- pwr_cmd->power_settings[i].
- config_val_low;
- power_info->
- power_down_setting[pwr_down].delay = 0;
- if (i) {
- scr = scr +
- sizeof(
- struct cam_power_settings);
- tot_size =
- tot_size +
- sizeof(
- struct cam_power_settings);
- }
- if (tot_size > cmd_length) {
- pr_err("Command Buffer is wrong\n");
- rc = -EINVAL;
- goto free_power_down_settings;
- }
- CDBG("%s:%d Seq Type[%d]: %d Config_val: %ldn",
- __func__, __LINE__,
- pwr_down,
- power_info->
- power_down_setting[pwr_down].
- seq_type,
- power_info->
- power_down_setting[pwr_down].
- config_val);
- }
- last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
- ptr = (void *) scr;
- cmm_hdr = (struct common_header *)ptr;
- } else {
- pr_err("%s:%d: :Error: Un expected Header Type: %d\n",
- __func__, __LINE__, cmm_hdr->cmd_type);
- }
- }
-
- return rc;
-free_power_down_settings:
- kfree(power_info->power_down_setting);
-free_power_settings:
- kfree(power_info->power_setting);
- return rc;
-}
-
int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
struct cam_sensor_ctrl_t *s_ctrl,
int32_t cmd_buf_num, int cmd_buf_length)
@@ -387,16 +189,14 @@
i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
if (rc < 0) {
- pr_err("%s:%d Failed in Updating the i2c Info\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info");
return rc;
}
probe_info = (struct cam_cmd_probe *)
(cmd_buf + sizeof(struct cam_cmd_i2c_info));
rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Updating the slave Info\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Updating the slave Info");
return rc;
}
cmd_buf = probe_info;
@@ -404,16 +204,16 @@
break;
case 1: {
rc = cam_sensor_update_power_settings(cmd_buf,
- cmd_buf_length, s_ctrl);
+ cmd_buf_length, &s_ctrl->sensordata->power_info);
if (rc < 0) {
- pr_err("Failed in updating power settings\n");
+ CAM_ERR(CAM_SENSOR,
+ "Failed in updating power settings");
return rc;
}
}
break;
default:
- pr_err("%s:%d Invalid command buffer\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid command buffer");
break;
}
return rc;
@@ -432,21 +232,19 @@
rc = cam_mem_get_cpu_buf(handle,
(uint64_t *)&packet, &len);
if (rc < 0) {
- pr_err("%s: %d Failed to get the command Buffer\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
return -EINVAL;
}
pkt = (struct cam_packet *)packet;
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
if (cmd_desc == NULL) {
- pr_err("%s: %d command descriptor pos is invalid\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid");
return -EINVAL;
}
if (pkt->num_cmd_buf != 2) {
- pr_err("%s: %d Expected More Command Buffers : %d\n",
- __func__, __LINE__, pkt->num_cmd_buf);
+ CAM_ERR(CAM_SENSOR, "Expected More Command Buffers : %d",
+ pkt->num_cmd_buf);
return -EINVAL;
}
for (i = 0; i < pkt->num_cmd_buf; i++) {
@@ -455,8 +253,8 @@
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
(uint64_t *)&cmd_buf1, &len);
if (rc < 0) {
- pr_err("%s: %d Failed to parse the command Buffer Header\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
return -EINVAL;
}
cmd_buf = (uint32_t *)cmd_buf1;
@@ -466,8 +264,8 @@
rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
i, cmd_desc[i].length);
if (rc < 0) {
- pr_err("%s: %d Failed to parse the command Buffer Header\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to parse the command Buffer Header");
return -EINVAL;
}
}
@@ -492,7 +290,7 @@
query_cap->ois_slot_id =
s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
query_cap->slot_info =
- s_ctrl->id;
+ s_ctrl->soc_info.index;
}
static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
@@ -523,8 +321,8 @@
slave_info = &(s_ctrl->sensordata->slave_info);
if (!slave_info) {
- pr_err("%s:%d failed: %pK\n",
- __func__, __LINE__, slave_info);
+ CAM_ERR(CAM_SENSOR, " failed: %pK",
+ slave_info);
return -EINVAL;
}
@@ -534,11 +332,11 @@
&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
CAMERA_SENSOR_I2C_TYPE_WORD);
- CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
- __func__, __LINE__, chipid, slave_info->sensor_id);
+ CAM_DBG(CAM_SENSOR, "read id: 0x%x expected id 0x%x:",
+ chipid, slave_info->sensor_id);
if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
- pr_err("%s: chip id %x does not match %x\n",
- __func__, chipid, slave_info->sensor_id);
+ CAM_ERR(CAM_SENSOR, "chip id %x does not match %x",
+ chipid, slave_info->sensor_id);
return -ENODEV;
}
return rc;
@@ -555,8 +353,7 @@
&s_ctrl->sensordata->power_info;
if (!s_ctrl || !arg) {
- pr_err("%s: %d s_ctrl is NULL\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "s_ctrl is NULL");
return -EINVAL;
}
@@ -564,7 +361,8 @@
switch (cmd->op_code) {
case CAM_SENSOR_PROBE_CMD: {
if (s_ctrl->is_probe_succeed == 1) {
- pr_err("Already Sensor Probed in the slot\n");
+ CAM_ERR(CAM_SENSOR,
+ "Already Sensor Probed in the slot");
break;
}
/* Allocate memory for power up setting */
@@ -590,26 +388,25 @@
CAM_HANDLE_MEM_HANDLE) {
rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
if (rc < 0) {
- pr_err("%s: %d Get Buffer Handle Failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed");
kfree(pu);
kfree(pd);
goto release_mutex;
}
} else {
- pr_err("%s:%d :Error: Invalid Command Type: %d",
- __func__, __LINE__, cmd->handle_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
+ cmd->handle_type);
}
/* Parse and fill vreg params for powerup settings */
rc = msm_camera_fill_vreg_params(
- s_ctrl->sensordata->power_info.cam_vreg,
- s_ctrl->sensordata->power_info.num_vreg,
+ &s_ctrl->soc_info,
s_ctrl->sensordata->power_info.power_setting,
s_ctrl->sensordata->power_info.power_setting_size);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail in filling vreg params for PUP rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PUP rc %d",
+ rc);
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -617,13 +414,13 @@
/* Parse and fill vreg params for powerdown settings*/
rc = msm_camera_fill_vreg_params(
- s_ctrl->sensordata->power_info.cam_vreg,
- s_ctrl->sensordata->power_info.num_vreg,
+ &s_ctrl->soc_info,
s_ctrl->sensordata->power_info.power_down_setting,
s_ctrl->sensordata->power_info.power_down_setting_size);
if (rc < 0) {
- pr_err("%s:%d :Error: Fail in filling vreg params for PDOWN rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in filling vreg params for PDOWN rc %d",
+ rc);
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -632,7 +429,7 @@
/* Power up and probe sensor */
rc = cam_sensor_power_up(s_ctrl);
if (rc < 0) {
- pr_err("power up failed");
+ CAM_ERR(CAM_SENSOR, "power up failed");
cam_sensor_power_down(s_ctrl);
kfree(pu);
kfree(pd);
@@ -649,12 +446,11 @@
goto release_mutex;
}
- CDBG("%s:%d Probe Succeeded on the slot: %d\n",
- __func__, __LINE__, s_ctrl->id);
+ CAM_DBG(CAM_SENSOR, "Probe Succeeded on the slot: %d",
+ s_ctrl->soc_info.index);
rc = cam_sensor_power_down(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: fail in Sensor Power Down\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "fail in Sensor Power Down");
kfree(pu);
kfree(pd);
goto release_mutex;
@@ -671,15 +467,14 @@
struct cam_create_dev_hdl bridge_params;
if (s_ctrl->bridge_intf.device_hdl != -1) {
- pr_err("%s:%d Device is already acquired\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Device is already acquired");
rc = -EINVAL;
goto release_mutex;
}
rc = copy_from_user(&sensor_acq_dev,
(void __user *) cmd->handle, sizeof(sensor_acq_dev));
if (rc < 0) {
- pr_err("Failed Copying from user\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copying from user");
goto release_mutex;
}
@@ -694,11 +489,11 @@
s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
- CDBG("%s:%d Device Handle: %d\n", __func__, __LINE__,
+ CAM_DBG(CAM_SENSOR, "Device Handle: %d",
sensor_acq_dev.device_handle);
if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
- pr_err("Failed Copy to User\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -706,8 +501,8 @@
break;
case CAM_RELEASE_DEV: {
if (s_ctrl->bridge_intf.device_hdl == -1) {
- pr_err("%s:%d Invalid Handles: link hdl: %d device hdl: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Invalid Handles: link hdl: %d device hdl: %d",
s_ctrl->bridge_intf.device_hdl,
s_ctrl->bridge_intf.link_hdl);
rc = -EINVAL;
@@ -715,8 +510,8 @@
}
rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
if (rc < 0)
- pr_err("%s:%d Failed in destroying the device hdl\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "failed in destroying the device hdl");
s_ctrl->bridge_intf.device_hdl = -1;
s_ctrl->bridge_intf.link_hdl = -1;
s_ctrl->bridge_intf.session_hdl = -1;
@@ -728,7 +523,7 @@
cam_sensor_query_cap(s_ctrl, &sensor_cap);
if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
sizeof(struct cam_sensor_query_cap))) {
- pr_err("Failed Copy to User\n");
+ CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
}
@@ -737,19 +532,18 @@
case CAM_START_DEV: {
rc = cam_sensor_power_up(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: Sensor Power up failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
goto release_mutex;
}
rc = cam_sensor_apply_settings(s_ctrl, 0);
if (rc < 0) {
- pr_err("cannot apply settings\n");
+ CAM_ERR(CAM_SENSOR, "cannot apply settings");
goto release_mutex;
}
rc = delete_request(&s_ctrl->i2c_data.init_settings);
if (rc < 0) {
- pr_err("%s:%d Fail in deleting the Init settings\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Fail in deleting the Init settings");
rc = -EINVAL;
goto release_mutex;
}
@@ -758,8 +552,7 @@
case CAM_STOP_DEV: {
rc = cam_sensor_power_down(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d Sensor Power Down failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
goto release_mutex;
}
}
@@ -767,8 +560,7 @@
case CAM_CONFIG_DEV: {
rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed CCI Config: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed CCI Config: %d", rc);
goto release_mutex;
}
}
@@ -776,8 +568,7 @@
case CAM_SD_SHUTDOWN:
break;
default:
- pr_err("%s:%d :Error: Invalid Opcode: %d\n",
- __func__, __LINE__, cmd->op_code);
+ CAM_ERR(CAM_SENSOR, "Invalid Opcode: %d", cmd->op_code);
rc = -EINVAL;
goto release_mutex;
}
@@ -811,7 +602,7 @@
s_ctrl = (struct cam_sensor_ctrl_t *)
cam_get_device_priv(link->dev_hdl);
if (!s_ctrl) {
- pr_err("%s: Device data is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
if (link->link_enable) {
@@ -844,10 +635,11 @@
int rc;
struct cam_sensor_power_ctrl_t *power_info;
struct cam_camera_slave_info *slave_info;
+ struct cam_hw_soc_info *soc_info =
+ &s_ctrl->soc_info;
if (!s_ctrl) {
- pr_err("%s:%d failed: %pK\n",
- __func__, __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl);
return -EINVAL;
}
@@ -855,23 +647,20 @@
slave_info = &(s_ctrl->sensordata->slave_info);
if (!power_info || !slave_info) {
- pr_err("%s:%d failed: %pK %pK\n",
- __func__, __LINE__, power_info,
- slave_info);
+ CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info);
return -EINVAL;
}
- rc = cam_sensor_core_power_up(power_info);
+ rc = cam_sensor_core_power_up(power_info, soc_info);
if (rc < 0) {
- pr_err("%s:%d power up the core is failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "power up the core is failed:%d", rc);
return rc;
}
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
rc = camera_io_init(&(s_ctrl->io_master_info));
if (rc < 0) {
- pr_err("%s cci_init failed\n", __func__);
+ CAM_ERR(CAM_SENSOR, "cci_init failed");
return -EINVAL;
}
}
@@ -884,25 +673,24 @@
int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
{
struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info;
int rc = 0;
if (!s_ctrl) {
- pr_err("%s:%d failed: s_ctrl %pK\n",
- __func__, __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl);
return -EINVAL;
}
power_info = &s_ctrl->sensordata->power_info;
+ soc_info = &s_ctrl->soc_info;
if (!power_info) {
- pr_err("%s:%d failed: power_info %pK\n",
- __func__, __LINE__, power_info);
+ CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
return -EINVAL;
}
- rc = msm_camera_power_down(power_info);
+ rc = msm_camera_power_down(power_info, soc_info);
if (rc < 0) {
- pr_err("%s:%d power down the core is failed:%d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
return rc;
}
@@ -930,15 +718,16 @@
&(s_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("Failed to write the I2C settings\n");
+ CAM_ERR(CAM_SENSOR,
+ "Failed to write the I2C settings");
return rc;
}
}
rc = delete_request(&(s_ctrl->i2c_data.init_settings));
i2c_set->is_settings_valid = 0;
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in deleting the Init request: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in deleting the Init request: %d", rc);
}
}
} else {
@@ -952,8 +741,9 @@
&(s_ctrl->io_master_info),
&(i2c_list->i2c_settings));
if (rc < 0) {
- pr_err("%s:%d :Error: Fail to write the I2C settings: %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Fail to write the I2C settings: %d",
+ rc);
return rc;
}
}
@@ -961,8 +751,8 @@
MAX_PER_FRAME_ARRAY -
MAX_SYSTEM_PIPELINE_DELAY) %
MAX_PER_FRAME_ARRAY;
- CDBG("%s:%d Deleting the Request: %d\n",
- __func__, __LINE__, del_req_id);
+ CAM_DBG(CAM_SENSOR, "Deleting the Request: %d",
+ del_req_id);
if (req_id >
s_ctrl->i2c_data.per_frame[del_req_id].
request_id) {
@@ -972,13 +762,13 @@
&(s_ctrl->i2c_data.
per_frame[del_req_id]));
if (rc < 0)
- pr_err("%s:%d :Error: Failed in deleting the request: %d rc: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Delete request Fail:%d rc:%d",
del_req_id, rc);
}
} else {
- CDBG("%s:%d Invalid/NOP request to apply: %lld\n",
- __func__, __LINE__, req_id);
+ CAM_DBG(CAM_SENSOR,
+ "Invalid/NOP request to apply: %lld", req_id);
}
}
return rc;
@@ -995,11 +785,58 @@
s_ctrl = (struct cam_sensor_ctrl_t *)
cam_get_device_priv(apply->dev_hdl);
if (!s_ctrl) {
- pr_err("%s: Device data is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
- CDBG("%s:%d Req Id: %lld\n", __func__, __LINE__,
- apply->request_id);
+ CAM_DBG(CAM_SENSOR, " Req Id: %lld", apply->request_id);
+ trace_cam_apply_req("Sensor", apply);
rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
return rc;
}
+
+int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+ int32_t rc = 0, i;
+ uint32_t cancel_req_id_found = 0;
+ struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!flush_req)
+ return -EINVAL;
+
+ s_ctrl = (struct cam_sensor_ctrl_t *)
+ cam_get_device_priv(flush_req->dev_hdl);
+ if (!s_ctrl) {
+ CAM_ERR(CAM_SENSOR, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+
+ if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+ && (i2c_set->request_id != flush_req->req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+
+ if (flush_req->type ==
+ CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+
+ if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+ !cancel_req_id_found)
+ CAM_DBG(CAM_SENSOR,
+ "Flush request id:%lld not found in the pending list",
+ flush_req->req_id);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
index b23edce..c8158fa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -53,6 +53,13 @@
int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush);
+
+/**
* @info: Sub device info to req mgr
*
* Publish the subdevice info
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index 448ce51..40cf689 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -27,8 +27,7 @@
rc = cam_sensor_driver_cmd(s_ctrl, arg);
break;
default:
- pr_err("%s:%d Invalid ioctl cmd: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_SENSOR, "Invalid ioctl cmd: %d", cmd);
rc = -EINVAL;
break;
}
@@ -42,8 +41,8 @@
struct cam_sensor_ctrl_t *s_ctrl;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- pr_err("%s %s :Error: i2c_check_functionality failed\n",
- __func__, client->name);
+ CAM_ERR(CAM_SENSOR,
+ "%s :i2c_check_functionality failed", client->name);
return -EFAULT;
}
@@ -60,8 +59,7 @@
rc = cam_sensor_parse_dt(s_ctrl);
if (rc < 0) {
- pr_err("%s:%d :Error: cam_sensor_parse_dt rc %d",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc);
goto free_s_ctrl;
}
@@ -77,7 +75,7 @@
s_ctrl = platform_get_drvdata(pdev);
if (!s_ctrl) {
- pr_err("%s: sensor device is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
return 0;
}
@@ -92,7 +90,7 @@
struct cam_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
if (!s_ctrl) {
- pr_err("%s: sensor device is NULL\n", __func__);
+ CAM_ERR(CAM_SENSOR, "sensor device is NULL");
return 0;
}
@@ -111,7 +109,7 @@
if (copy_from_user(&cmd_data, (void __user *)arg,
sizeof(cmd_data))) {
- pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
return -EFAULT;
}
@@ -120,19 +118,18 @@
case VIDIOC_CAM_CONTROL:
rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
if (rc < 0)
- pr_err("%s:%d cam_sensor_subdev_ioctl failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed");
break;
default:
- pr_err("%s:%d Invalid compat ioctl cmd_type: %d\n",
- __func__, __LINE__, cmd);
+ CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd);
rc = -EINVAL;
}
if (!rc) {
if (copy_to_user((void __user *)arg, &cmd_data,
sizeof(cmd_data))) {
- pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+ CAM_ERR(CAM_SENSOR,
+ "Failed to copy to user_ptr=%pK size=%zu",
(void __user *)arg, sizeof(cmd_data));
rc = -EFAULT;
}
@@ -166,6 +163,7 @@
{
int32_t rc = 0, i = 0;
struct cam_sensor_ctrl_t *s_ctrl = NULL;
+ struct cam_hw_soc_info *soc_info = NULL;
/* Create sensor control structure */
s_ctrl = devm_kzalloc(&pdev->dev,
@@ -173,6 +171,9 @@
if (!s_ctrl)
return -ENOMEM;
+ soc_info = &s_ctrl->soc_info;
+ soc_info->pdev = pdev;
+
/* Initialize sensor device type */
s_ctrl->of_node = pdev->dev.of_node;
s_ctrl->is_probe_succeed = 0;
@@ -184,12 +185,12 @@
rc = cam_sensor_parse_dt(s_ctrl);
if (rc < 0) {
- pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+ CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc);
goto free_s_ctrl;
}
/* Fill platform device id*/
- pdev->id = s_ctrl->id;
+ pdev->id = soc_info->index;
s_ctrl->v4l2_dev_str.internal_ops =
&cam_sensor_internal_ops;
@@ -207,8 +208,7 @@
rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
if (rc < 0) {
- pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev");
goto free_s_ctrl;
}
@@ -230,6 +230,7 @@
s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+ s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
s_ctrl->sensordata->power_info.dev = &pdev->dev;
platform_set_drvdata(pdev, s_ctrl);
@@ -273,11 +274,11 @@
rc = platform_driver_register(&cam_sensor_platform_driver);
if (rc)
- pr_err("%s platform_driver_register failed rc = %d",
- __func__, rc);
+ CAM_ERR(CAM_SENSOR, "platform_driver_register failed rc = %d",
+ rc);
rc = i2c_add_driver(&cam_sensor_driver_i2c);
if (rc)
- pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+ CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index f597c36..f3c70c4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -30,8 +30,8 @@
#include <cam_cci_dev.h>
#include <cam_sensor_cmn_header.h>
#include <cam_subdev.h>
-#include <cam_sensor_soc_api.h>
#include <cam_sensor_io.h>
+#include "cam_debug_util.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -91,6 +91,7 @@
*/
struct cam_sensor_ctrl_t {
struct platform_device *pdev;
+ struct cam_hw_soc_info soc_info;
struct mutex cam_sensor_mutex;
struct cam_sensor_board_info *sensordata;
enum cci_i2c_master_t cci_i2c_master;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
index 8cb1078..c10d634 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -17,6 +17,7 @@
#include <cam_sensor_io.h>
#include <cam_req_mgr_util.h>
#include "cam_sensor_soc.h"
+#include "cam_soc_util.h"
int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
struct cam_sensor_board_info *s_info)
@@ -31,15 +32,14 @@
for (i = 0; i < SUB_MODULE_MAX; i++)
sensor_info->subdev_id[i] = -1;
- src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ src_node = of_parse_phandle(of_node, "actuator-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -47,15 +47,14 @@
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ src_node = of_parse_phandle(of_node, "ois-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -63,15 +62,14 @@
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+ src_node = of_parse_phandle(of_node, "eeprom-src", 0);
if (!src_node) {
- CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "eeprom src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -79,15 +77,14 @@
of_node_put(src_node);
}
- src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+ src_node = of_parse_phandle(of_node, "led-flash-src", 0);
if (!src_node) {
- CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, " src_node NULL");
} else {
rc = of_property_read_u32(src_node, "cell-index", &val);
- CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
- val, rc);
+ CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc);
if (rc < 0) {
- pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
of_node_put(src_node);
return rc;
}
@@ -95,10 +92,9 @@
of_node_put(src_node);
}
- rc = of_property_read_u32(of_node, "qcom,csiphy-sd-index", &val);
+ rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
if (rc < 0)
- pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc);
else
sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
@@ -110,27 +106,31 @@
int32_t rc = 0;
struct cam_sensor_board_info *sensordata = NULL;
struct device_node *of_node = s_ctrl->of_node;
- uint32_t cell_id;
-
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
if (!s_ctrl->sensordata)
return -ENOMEM;
sensordata = s_ctrl->sensordata;
- /*
- * Read cell index - this cell index will be the camera slot where
- * this camera will be mounted
- */
- rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
if (rc < 0) {
- pr_err("failed: cell-index rc %d", rc);
+ CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc);
goto FREE_SENSOR_DATA;
}
- s_ctrl->id = cell_id;
+
+ rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+ &sensordata->power_info.gpio_num_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc);
+ goto FREE_SENSOR_DATA;
+ }
+
+ s_ctrl->id = soc_info->index;
/* Validate cell_id */
- if (cell_id >= MAX_CAMERAS) {
- pr_err("failed: invalid cell_id %d", cell_id);
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id);
rc = -EINVAL;
goto FREE_SENSOR_DATA;
}
@@ -138,57 +138,39 @@
/* Read subdev info */
rc = cam_sensor_get_sub_module_index(of_node, sensordata);
if (rc < 0) {
- pr_err("failed");
+ CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d",
+ rc);
goto FREE_SENSOR_DATA;
}
- /* Read vreg information */
- rc = cam_sensor_get_dt_vreg_data(of_node,
- &sensordata->power_info.cam_vreg,
- &sensordata->power_info.num_vreg);
- if (rc < 0) {
- pr_err("failed: cam_sensor_get_dt_vreg_data rc %d", rc);
- goto FREE_SENSOR_DATA;
- }
-
- /* Read gpio information */
- rc = msm_sensor_driver_get_gpio_data
- (&(sensordata->power_info.gpio_conf), of_node);
- if (rc < 0) {
- pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
- goto FREE_VREG_DATA;
- }
-
/* Get CCI master */
- rc = of_property_read_u32(of_node, "qcom,cci-master",
+ rc = of_property_read_u32(of_node, "cci-master",
&s_ctrl->cci_i2c_master);
- CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+ CAM_DBG(CAM_SENSOR, "cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
if (rc < 0) {
/* Set default master 0 */
s_ctrl->cci_i2c_master = MASTER_0;
rc = 0;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-pitch",
+ if (of_property_read_u32(of_node, "sensor-position-pitch",
&sensordata->pos_pitch) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_pitch = 360;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-roll",
+ if (of_property_read_u32(of_node, "sensor-position-roll",
&sensordata->pos_roll) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_roll = 360;
}
- if (of_property_read_u32(of_node, "qcom,sensor-position-yaw",
+ if (of_property_read_u32(of_node, "sensor-position-yaw",
&sensordata->pos_yaw) < 0) {
- CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Invalid sensor position");
sensordata->pos_yaw = 360;
}
return rc;
-FREE_VREG_DATA:
- kfree(sensordata->power_info.cam_vreg);
FREE_SENSOR_DATA:
kfree(sensordata);
return rc;
@@ -198,13 +180,13 @@
{
/* Validate input parameters */
if (!s_ctrl) {
- pr_err("%s:%d failed: invalid params s_ctrl %pK\n", __func__,
- __LINE__, s_ctrl);
+ CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK",
+ s_ctrl);
return -EINVAL;
}
- CDBG("%s: %d master_type: %d\n", __func__, __LINE__,
- s_ctrl->io_master_info.master_type);
+ CAM_DBG(CAM_SENSOR,
+ "master_type: %d", s_ctrl->io_master_info.master_type);
/* Initialize cci_client */
if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
@@ -213,8 +195,8 @@
return -ENOMEM;
} else {
- pr_err("%s:%d Invalid master / Master type Not supported\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Invalid master / Master type Not supported");
return -EINVAL;
}
@@ -223,43 +205,40 @@
int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
{
- int32_t rc = 0;
+ int32_t i, rc = 0;
+ struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
/* Parse dt information and store in sensor control structure */
rc = cam_sensor_driver_get_dt_data(s_ctrl);
if (rc < 0) {
- pr_err("failed: rc %d", rc);
+ CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc);
return rc;
}
/* Initialize mutex */
mutex_init(&(s_ctrl->cam_sensor_mutex));
- pr_err("%s: %d\n", __func__, __LINE__);
/* Initialize default parameters */
+ for (i = 0; i < soc_info->num_clk; i++) {
+ soc_info->clk[i] = devm_clk_get(&soc_info->pdev->dev,
+ soc_info->clk_name[i]);
+ if (!soc_info->clk[i]) {
+ CAM_ERR(CAM_SENSOR, "get failed for %s",
+ soc_info->clk_name[i]);
+ rc = -ENOENT;
+ return rc;
+ }
+ }
rc = msm_sensor_init_default_params(s_ctrl);
if (rc < 0) {
- pr_err("failed: msm_sensor_init_default_params rc %d", rc);
- goto FREE_DT_DATA;
- }
-
- /* Get clocks information */
- rc = msm_camera_get_clk_info(s_ctrl->pdev,
- &s_ctrl->sensordata->power_info.clk_info,
- &s_ctrl->sensordata->power_info.clk_ptr,
- &s_ctrl->sensordata->power_info.clk_info_size);
- if (rc < 0) {
- pr_err("failed: msm_camera_get_clk_info rc %d", rc);
+ CAM_ERR(CAM_SENSOR,
+ "failed: msm_sensor_init_default_params rc %d", rc);
goto FREE_DT_DATA;
}
return rc;
FREE_DT_DATA:
- kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
- kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
- kfree(s_ctrl->sensordata->power_info.gpio_conf);
- kfree(s_ctrl->sensordata->power_info.cam_vreg);
kfree(s_ctrl->sensordata);
s_ctrl->sensordata = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index 6292a9f..6a0a0e1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -4,5 +4,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o cam_sensor_spi.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 40a69ef..ca648f01 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -14,9 +14,6 @@
#include "cam_sensor_i2c.h"
#include "cam_cci_dev.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
uint32_t addr, uint32_t *data,
enum camera_sensor_i2c_type addr_type,
@@ -41,9 +38,10 @@
rc = v4l2_subdev_call(cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "line %d rc = %d", rc);
return rc;
}
+
rc = cci_ctrl.status;
if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
*data = buf[0];
@@ -58,6 +56,46 @@
return rc;
}
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i = 0;
+ struct cam_cci_ctrl cci_ctrl;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (num_byte > I2C_REG_DATA_MAX)) {
+ CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
+ num_byte);
+ return rc;
+ }
+
+ buf = kzalloc(num_byte, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ cci_ctrl.status = -EFAULT;
+ rc = v4l2_subdev_call(cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ rc = cci_ctrl.status;
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]);
+ }
+ kfree(buf);
+ return rc;
+}
+
static int32_t cam_cci_i2c_write_table_cmd(
struct camera_io_master *client,
struct cam_sensor_i2c_reg_setting *write_setting,
@@ -85,7 +123,7 @@
rc = v4l2_subdev_call(client->cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
return rc;
}
rc = cci_ctrl.status;
@@ -135,12 +173,12 @@
int32_t rc = -EINVAL;
int32_t i = 0;
- CDBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
- __func__, addr, data, data_type);
+ CAM_DBG(CAM_SENSOR, "addr: 0x%x data: 0x%x dt: %d",
+ addr, data, data_type);
if (delay_ms > MAX_POLL_DELAY_MS) {
- pr_err("%s:%d invalid delay = %d max_delay = %d\n",
- __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
return -EINVAL;
}
for (i = 0; i < delay_ms; i++) {
@@ -154,11 +192,10 @@
/* If rc is 1 then read is successful but poll is failure */
if (rc == 1)
- pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
if (rc < 0)
- pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
return rc;
}
@@ -169,13 +206,12 @@
int32_t rc = 0;
struct cam_cci_ctrl cci_ctrl;
- CDBG("%s line %d\n", __func__, __LINE__);
cci_ctrl.cmd = cci_cmd;
cci_ctrl.cci_info = cci_client;
rc = v4l2_subdev_call(cci_client->cci_subdev,
core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
if (rc < 0) {
- pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
return rc;
}
return cci_ctrl.status;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 06e8104..6207a8a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -26,6 +26,8 @@
#define I2C_COMPARE_MATCH 0
#define I2C_COMPARE_MISMATCH 1
+#define I2C_REG_DATA_MAX (8*1024)
+
/**
* @client: CCI client structure
* @data: I2C data
@@ -41,6 +43,20 @@
/**
* @client: CCI client structure
+ * @addr: I2c address
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @num_byte: number of bytes
+ *
+ * This API handles CCI sequential read
+ */
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type,
+ uint32_t num_byte);
+
+/**
+ * @client: CCI client structure
* @cci_cmd: CCI command type
*
* This API handles CCI random write
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 3e1b331..154f4ad 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -22,7 +22,7 @@
int16_t mask = data_mask & 0xFF;
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -34,8 +34,8 @@
addr, data, data_mask, addr_type, data_type,
delay_ms);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -46,7 +46,7 @@
enum camera_sensor_i2c_type data_type)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -56,18 +56,44 @@
} else if (io_master_info->master_type == I2C_MASTER) {
return cam_qup_i2c_read(io_master_info->client,
addr, data, addr_type, data_type);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read(io_master_info,
+ addr, data, addr_type);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
+ return 0;
+}
+
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+ uint32_t addr, uint8_t *data,
+ enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+ if (io_master_info->master_type == CCI_MASTER) {
+ return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
+ addr, data, addr_type, num_bytes);
+ } else if (io_master_info->master_type == I2C_MASTER) {
+ return cam_qup_i2c_read_seq(io_master_info->client,
+ addr, data, addr_type, num_bytes);
+ } else if (io_master_info->master_type == SPI_MASTER) {
+ return cam_spi_read(io_master_info,
+ addr, (uint32_t *)data, addr_type);
+ } else {
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
+ return -EINVAL;
+ }
+ return 0;
}
int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
struct cam_sensor_i2c_reg_setting *write_setting)
{
if (!write_setting || !io_master_info) {
- pr_err("Input parameters not valid ws: %pK ioinfo: %pK",
+ CAM_ERR(CAM_SENSOR,
+ "Input parameters not valid ws: %pK ioinfo: %pK",
write_setting, io_master_info);
return -EINVAL;
}
@@ -79,8 +105,8 @@
return cam_qup_i2c_write_table(io_master_info,
write_setting);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -88,7 +114,7 @@
int32_t camera_io_init(struct camera_io_master *io_master_info)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -98,8 +124,8 @@
return cam_sensor_cci_i2c_util(io_master_info->cci_client,
MSM_CCI_INIT);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
@@ -107,7 +133,7 @@
int32_t camera_io_release(struct camera_io_master *io_master_info)
{
if (!io_master_info) {
- pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid Args");
return -EINVAL;
}
@@ -115,8 +141,8 @@
return cam_sensor_cci_i2c_util(io_master_info->cci_client,
MSM_CCI_RELEASE);
} else {
- pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
- __LINE__, io_master_info->master_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+ io_master_info->master_type);
return -EINVAL;
}
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index f721afd..9a60fd0e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -52,16 +52,15 @@
* @io_master_info: I2C/SPI master information
* @addr: I2C address
* @data: I2C data
- * @addr_type: I2C addr type
+ * @data_type: I2C data type
* @num_bytes: number of bytes
*
- * This API abstracts sequential read functionality based on master type
+ * This API abstracts read functionality based on master type
*/
int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
uint32_t addr, uint8_t *data,
enum camera_sensor_i2c_type addr_type,
- uint32_t num_bytes);
-
+ int32_t num_bytes);
/**
* @io_master_info: I2C/SPI master information
@@ -103,4 +102,6 @@
enum camera_sensor_i2c_type addr_type,
uint32_t delay_ms);
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_spi.h"
#endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
index b25b1855..72e51ee 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -40,7 +40,7 @@
};
rc = i2c_transfer(dev_client->adapter, msgs, 2);
if (rc < 0)
- pr_err("%s:failed 0x%x\n", __func__, saddr);
+ CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
return rc;
}
@@ -61,7 +61,7 @@
};
rc = i2c_transfer(dev_client->client->adapter, msg, 1);
if (rc < 0)
- pr_err("%s: failed 0x%x\n", __func__, saddr);
+ CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
return rc;
}
@@ -77,8 +77,7 @@
|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("ERR: %s Failed with addr/data_type verfication\n",
- __func__);
+ CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verfication");
return rc;
}
@@ -105,7 +104,7 @@
rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
if (rc < 0) {
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
goto read_fail;
}
@@ -119,7 +118,7 @@
*data = buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3];
- CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ CAM_DBG(CAM_SENSOR, "addr = 0x%x data: 0x%x", addr, *data);
read_fail:
kfree(buf);
buf = NULL;
@@ -137,14 +136,13 @@
if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
- pr_err("ERR: %s Failed with addr_type verification\n",
- __func__);
+ CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
return rc;
}
if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
- pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
- __func__, num_byte, I2C_REG_DATA_MAX);
+ CAM_ERR(CAM_SENSOR, "num_byte:0x%x max supported:0x%x",
+ num_byte, I2C_REG_DATA_MAX);
return rc;
}
@@ -170,7 +168,7 @@
rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
if (rc < 0) {
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
goto read_seq_fail;
}
@@ -213,8 +211,8 @@
int i = 0;
if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
- pr_err("%s:%d invalid delay = %d max_delay = %d\n",
- __func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+ CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+ delay_ms, MAX_POLL_DELAY_MS);
return -EINVAL;
}
@@ -234,10 +232,9 @@
}
/* If rc is MISMATCH then read is successful but poll is failure */
if (rc == I2C_COMPARE_MISMATCH)
- pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
if (rc < 0)
- pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
return rc;
}
@@ -251,20 +248,17 @@
unsigned char buf[I2C_REG_MAX_BUF_SIZE];
uint8_t len = 0;
- CDBG("%s reg addr = 0x%x data type: %d\n",
- __func__, reg_setting->reg_addr, data_type);
+ CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+ reg_setting->reg_addr, data_type);
if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
buf[0] = reg_setting->reg_addr;
- CDBG("%s byte %d: 0x%x\n", __func__,
- len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
len = 1;
} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
buf[0] = reg_setting->reg_addr >> 8;
buf[1] = reg_setting->reg_addr;
- CDBG("%s byte %d: 0x%x\n", __func__,
- len, buf[len]);
- CDBG("%s byte %d: 0x%x\n", __func__,
- len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
len = 2;
} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
buf[0] = reg_setting->reg_addr >> 16;
@@ -278,47 +272,47 @@
buf[3] = reg_setting->reg_addr;
len = 4;
} else {
- pr_err("%s: Invalid I2C addr type\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
return -EINVAL;
}
- CDBG("Data: 0x%x\n", reg_setting->reg_data);
+ CAM_DBG(CAM_SENSOR, "Data: 0x%x", reg_setting->reg_data);
if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
buf[len] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
len += 1;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
buf[len] = reg_setting->reg_data >> 8;
buf[len+1] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
len += 2;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
buf[len] = reg_setting->reg_data >> 16;
buf[len + 1] = reg_setting->reg_data >> 8;
buf[len + 2] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
- CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
len += 3;
} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
buf[len] = reg_setting->reg_data >> 24;
buf[len + 1] = reg_setting->reg_data >> 16;
buf[len + 2] = reg_setting->reg_data >> 8;
buf[len + 3] = reg_setting->reg_data;
- CDBG("Byte %d: 0x%x\n", len, buf[len]);
- CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
- CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
- CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+ CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+3, buf[len+3]);
len += 4;
} else {
- pr_err("%s: Invalid Data Type\n", __func__);
+ CAM_ERR(CAM_SENSOR, "Invalid Data Type");
return -EINVAL;
}
rc = cam_qup_i2c_txdata(client, buf, len);
if (rc < 0)
- pr_err("%s fail\n", __func__);
+ CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
return rc;
}
@@ -341,7 +335,7 @@
reg_setting = write_setting->reg_setting;
for (i = 0; i < write_setting->size; i++) {
- CDBG("%s addr 0x%x data 0x%x\n", __func__,
+ CAM_DBG(CAM_SENSOR, "addr 0x%x data 0x%x",
reg_setting->reg_addr, reg_setting->reg_data);
rc = cam_qup_i2c_write(client, reg_setting,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
new file mode 100644
index 0000000..4011aa0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -0,0 +1,469 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_spi.h"
+#include "cam_debug_util.h"
+
+static int cam_spi_txfr(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int num_byte)
+{
+ struct spi_transfer txfr;
+ struct spi_message msg;
+
+ memset(&txfr, 0, sizeof(txfr));
+ txfr.tx_buf = txbuf;
+ txfr.rx_buf = rxbuf;
+ txfr.len = num_byte;
+ spi_message_init(&msg);
+ spi_message_add_tail(&txfr, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int txlen, int rxlen)
+{
+ struct spi_transfer tx;
+ struct spi_transfer rx;
+ struct spi_message m;
+
+ memset(&tx, 0, sizeof(tx));
+ memset(&rx, 0, sizeof(rx));
+ tx.tx_buf = txbuf;
+ rx.rx_buf = rxbuf;
+ tx.len = txlen;
+ rx.len = rxlen;
+ spi_message_init(&m);
+ spi_message_add_tail(&tx, &m);
+ spi_message_add_tail(&rx, &m);
+ return spi_sync(spi, &m);
+}
+
+/**
+ * cam_set_addr() - helper function to set transfer address
+ * @addr: device address
+ * @addr_len: the addr field length of an instruction
+ * @type: type (i.e. byte-length) of @addr
+ * @str: shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void cam_set_addr(uint32_t addr, uint8_t addr_len,
+ enum camera_sensor_i2c_type type,
+ char *str)
+{
+ int i, len;
+
+ if (!addr_len)
+ return;
+
+ if (addr_len < type)
+ CAM_DBG(CAM_EEPROM, "omitting higher bits in address");
+
+ /* only support transfer MSB first for now */
+ len = addr_len - type;
+ for (i = len; i < addr_len; i++) {
+ if (i >= 0)
+ str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
+ & 0xFF;
+ }
+
+}
+
+/**
+ * cam_spi_tx_helper() - wrapper for SPI transaction
+ * @client: io client
+ * @inst: inst of this transaction
+ * @addr: device addr following the inst
+ * @data: output byte array (could be NULL)
+ * @num_byte: size of @data
+ * @tx, rx: optional transfer buffer. It must be at least header
+ * + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes. It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL. This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long. For
+ * large transfers, the allocation could fail. External buffer @tx, @rx
+ * should be passed in to bypass allocation. The size of buffer should be
+ * at least header + num_byte long. Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+static int32_t cam_spi_tx_helper(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t len, hlen;
+ uint8_t retries = client->spi_client->retries;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ return rc;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ len = hlen + num_byte;
+
+ if (tx) {
+ ctx = tx;
+ } else {
+ ctx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ }
+
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ crx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+ while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_EEPROM, "failed: spi txfr rc %d", rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx + hlen, num_byte);
+
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+static int32_t cam_spi_tx_read(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t hlen;
+ uint8_t retries = client->spi_client->retries;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if ((addr_type != CAMERA_SENSOR_I2C_TYPE_WORD)
+ && (addr_type != CAMERA_SENSOR_I2C_TYPE_BYTE)
+ && (addr_type != CAMERA_SENSOR_I2C_TYPE_3B))
+ return rc;
+
+ hlen = cam_camera_spi_get_hlen(inst);
+ if (tx) {
+ ctx = tx;
+ } else {
+ ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+ if (!ctx)
+ return -ENOMEM;
+ }
+ if (num_byte) {
+ if (rx) {
+ crx = rx;
+ } else {
+ crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+ cam_set_addr(addr, inst->addr_len, addr_type,
+ ctx + 1);
+ } else {
+ ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
+ ctx[2] = (addr & 0xFF);
+ ctx[3] = 0;
+ }
+ CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
+ ctx[1], ctx[2], ctx[3]);
+ while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+ && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx, num_byte);
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type data_type)
+{
+ int rc = -EINVAL;
+ uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
+
+ if ((data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+ || (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+ return rc;
+
+ rc = cam_spi_tx_read(client,
+ &client->spi_client->cmd_tbl.read, addr, &temp[0],
+ data_type, NULL, NULL);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ return rc;
+ }
+
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+ *data = temp[0];
+ else
+ *data = (temp[0] << BITS_PER_BYTE) | temp[1];
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u", addr, *data);
+ return rc;
+}
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return cam_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
+ NULL, NULL);
+}
+
+static int32_t cam_spi_read_status_reg(
+ struct camera_io_master *client, uint8_t *status)
+{
+ struct cam_camera_spi_inst *rs =
+ &client->spi_client->cmd_tbl.read_status;
+
+ if (rs->addr_len != 0) {
+ CAM_ERR(CAM_SENSOR, "not implemented yet");
+ return -ENXIO;
+ }
+ return cam_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+}
+
+static int32_t cam_spi_device_busy(struct camera_io_master *client,
+ uint8_t *busy)
+{
+ int rc;
+ uint8_t st = 0;
+
+ rc = cam_spi_read_status_reg(client, &st);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed to read status reg");
+ return rc;
+ }
+ *busy = st & client->spi_client->busy_mask;
+ return 0;
+}
+
+static int32_t cam_spi_wait(struct camera_io_master *client,
+ struct cam_camera_spi_inst *inst)
+{
+ uint8_t busy;
+ int i, rc;
+
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
+ for (i = 0; i < inst->delay_count; i++) {
+ rc = cam_spi_device_busy(client, &busy);
+ if (rc < 0)
+ return rc;
+ if (!busy)
+ break;
+ msleep(inst->delay_intv);
+ CAM_DBG(CAM_SENSOR, "op 0x%x wait", inst->opcode);
+ }
+ if (i > inst->delay_count) {
+ CAM_ERR(CAM_SENSOR, "op %x timed out", inst->opcode);
+ return -ETIMEDOUT;
+ }
+ CAM_DBG(CAM_SENSOR, "op %x finished", inst->opcode);
+ return 0;
+}
+
+static int32_t cam_spi_write_enable(
+ struct camera_io_master *client)
+{
+ struct cam_camera_spi_inst *we =
+ &client->spi_client->cmd_tbl.write_enable;
+ int rc;
+
+ if (we->opcode == 0)
+ return 0;
+ if (we->addr_len != 0) {
+ CAM_ERR(CAM_SENSOR, "not implemented yet");
+ return -EINVAL;
+ }
+ rc = cam_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "write enable failed");
+ return rc;
+}
+
+/**
+ * cam_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check. Writing range
+ * should not cross page boundary, or data will be corrupted. Transaction is
+ * guaranteed to be finished when it returns. This function should never be
+ * used outside cam_spi_write_seq().
+ */
+static int32_t cam_spi_page_program(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+{
+ int rc;
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ struct spi_device *spi = client->spi_client->spi_master;
+ uint8_t retries = client->spi_client->retries;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
+ rc = cam_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ memset(tx, 0, header_len);
+ tx[0] = pg->opcode;
+ cam_set_addr(addr, pg->addr_len, addr_type, tx + 1);
+ memcpy(tx + header_len, data, len);
+ CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x",
+ len, tx[0], tx[1], tx[2], tx[3]);
+ while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+ rc = cam_spi_wait(client, pg);
+ msleep(client->spi_client->retry_delay);
+ retries--;
+ }
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed %d", rc);
+ return rc;
+ }
+ rc = cam_spi_wait(client, pg);
+ return rc;
+}
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint16_t data,
+ enum camera_sensor_i2c_type data_type)
+{
+ struct cam_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len = 0;
+ char buf[2];
+ char *tx;
+ int rc = -EINVAL;
+ enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+ if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+ || (data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+ && data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+ return rc;
+ CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
+ len = header_len + (uint8_t)data_type;
+ tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!tx)
+ goto NOMEM;
+ if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+ buf[0] = data;
+ CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
+ } else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+ buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+ buf[1] = (data & 0x00FF);
+ }
+ rc = cam_spi_page_program(client, addr, buf,
+ (uint16_t)data_type, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+NOMEM:
+ CAM_ERR(CAM_SENSOR, "memory allocation failed");
+ return -ENOMEM;
+ERROR:
+ CAM_ERR(CAM_SENSOR, "error write");
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting)
+{
+ int i;
+ int rc = -EFAULT;
+ struct cam_sensor_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+ enum camera_sensor_i2c_type addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+ if (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+ || (write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+ && write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+ return rc;
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = addr_type;
+ addr_type = write_setting->addr_type;
+ for (i = 0; i < write_setting->size; i++) {
+ CAM_DBG(CAM_SENSOR, "addr %x data %x",
+ reg_setting->reg_addr, reg_setting->reg_data);
+ rc = cam_spi_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000,
+ (write_setting->delay
+ * 1000) + 1000);
+ addr_type = client_addr_type;
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
new file mode 100644
index 0000000..a497491
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SPI_H_
+#define _CAM_SENSOR_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <media/cam_sensor.h>
+#include "cam_sensor_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+struct cam_camera_spi_inst {
+ uint8_t opcode;
+ uint8_t addr_len;
+ uint8_t dummy_len;
+ uint8_t delay_intv;
+ uint8_t delay_count;
+};
+
+struct cam_spi_write_burst_data {
+ u8 data_msb;
+ u8 data_lsb;
+};
+
+struct cam_spi_write_burst_packet {
+ u8 cmd;
+ u8 addr_msb;
+ u8 addr_lsb;
+ struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct cam_camera_burst_info {
+ uint32_t burst_addr;
+ uint32_t burst_start;
+ uint32_t burst_len;
+ uint32_t chunk_size;
+};
+
+struct cam_camera_spi_inst_tbl {
+ struct cam_camera_spi_inst read;
+ struct cam_camera_spi_inst read_seq;
+ struct cam_camera_spi_inst query_id;
+ struct cam_camera_spi_inst page_program;
+ struct cam_camera_spi_inst write_enable;
+ struct cam_camera_spi_inst read_status;
+ struct cam_camera_spi_inst erase;
+};
+
+struct cam_sensor_spi_client {
+ struct spi_device *spi_master;
+ struct cam_camera_spi_inst_tbl cmd_tbl;
+ uint8_t device_id0;
+ uint8_t device_id1;
+ uint8_t mfr_id0;
+ uint8_t mfr_id1;
+ uint8_t retry_delay;
+ uint8_t retries;
+ uint8_t busy_mask;
+ uint16_t page_size;
+ uint32_t erase_size;
+};
+static __always_inline
+uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst)
+{
+ return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+ uint32_t addr, uint32_t *data,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_query_id(struct camera_io_master *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int cam_spi_write(struct camera_io_master *client,
+ uint32_t addr, uint16_t data,
+ enum camera_sensor_i2c_type data_type);
+
+int cam_spi_write_table(struct camera_io_master *client,
+ struct cam_sensor_i2c_reg_setting *write_setting);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index 766828e..bf61fb3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o cam_sensor_soc_api.o
\ No newline at end of file
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index e5e4872..6520042b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -31,6 +31,8 @@
#define CAM_SENSOR_NAME "cam-sensor"
#define CAM_ACTUATOR_NAME "cam-actuator"
#define CAM_CSIPHY_NAME "cam-csiphy"
+#define CAM_FLASH_NAME "cam-flash"
+#define CAM_EEPROM_NAME "cam-eeprom"
#define MAX_SYSTEM_PIPELINE_DELAY 2
@@ -47,6 +49,11 @@
CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
CAMERA_SENSOR_CMD_TYPE_WAIT,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
CAMERA_SENSOR_CMD_TYPE_MAX,
};
@@ -69,6 +76,14 @@
CAMERA_SENSOR_WAIT_OP_MAX,
};
+enum camera_flash_opcode {
+ CAMERA_SENSOR_FLASH_OP_INVALID,
+ CAMERA_SENSOR_FLASH_OP_OFF,
+ CAMERA_SENSOR_FLASH_OP_FIRELOW,
+ CAMERA_SENSOR_FLASH_OP_FIREHIGH,
+ CAMERA_SENSOR_FLASH_OP_MAX,
+};
+
enum camera_sensor_i2c_type {
CAMERA_SENSOR_I2C_TYPE_INVALID,
CAMERA_SENSOR_I2C_TYPE_BYTE,
@@ -146,6 +161,10 @@
CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
};
+enum cam_eeprom_packet_opcodes {
+ CAM_EEPROM_PACKET_OPCODE_INIT
+};
+
enum msm_bus_perf_setting {
S_INIT,
S_PREVIEW,
@@ -166,6 +185,12 @@
MSM_CAMERA_SPI_DEVICE,
};
+enum cam_flash_device_type {
+ CAMERA_FLASH_DEVICE_TYPE_PMIC = 0,
+ CAMERA_FLASH_DEVICE_TYPE_I2C,
+ CAMERA_FLASH_DEVICE_TYPE_GPIO,
+};
+
enum cci_i2c_master_t {
MASTER_0,
MASTER_1,
@@ -263,14 +288,9 @@
uint16_t power_setting_size;
struct cam_sensor_power_setting *power_down_setting;
uint16_t power_down_setting_size;
- struct msm_camera_gpio_conf *gpio_conf;
- struct camera_vreg_t *cam_vreg;
- int num_vreg;
- struct clk **clk_ptr;
- struct msm_cam_clk_info *clk_info;
+ struct msm_camera_gpio_num_info *gpio_num_info;
struct msm_pinctrl_info pinctrl_info;
uint8_t cam_pinctrl_status;
- size_t clk_info_size;
};
struct cam_camera_slave_info {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
deleted file mode 100644
index 2eed9ce..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.c
+++ /dev/null
@@ -1,1331 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/msm-bus.h>
-#include "cam_sensor_soc_api.h"
-
-#define NO_SET_RATE -1
-#define INIT_RATE -2
-
-#ifdef CONFIG_CAM_SOC_API_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct msm_cam_clk_info *clk_src_info, int num_clk)
-{
- int i;
- int rc = 0;
- struct clk *mux_clk = NULL;
- struct clk *src_clk = NULL;
-
- for (i = 0; i < num_clk; i++) {
- if (clk_src_info[i].clk_name) {
- mux_clk = clk_get(dev, clk_info[i].clk_name);
- if (IS_ERR(mux_clk)) {
- pr_err("%s get failed\n",
- clk_info[i].clk_name);
- continue;
- }
- src_clk = clk_get(dev, clk_src_info[i].clk_name);
- if (IS_ERR(src_clk)) {
- pr_err("%s get failed\n",
- clk_src_info[i].clk_name);
- continue;
- }
- clk_set_parent(mux_clk, src_clk);
- }
- }
- return rc;
-}
-
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable)
-{
- int i;
- int rc = 0;
- long clk_rate;
-
- if (enable) {
- for (i = 0; i < num_clk; i++) {
- CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
- clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
- if (IS_ERR(clk_ptr[i])) {
- pr_err("%s get failed\n", clk_info[i].clk_name);
- rc = PTR_ERR(clk_ptr[i]);
- goto cam_clk_get_err;
- }
- if (clk_info[i].clk_rate > 0) {
- clk_rate = clk_round_rate(clk_ptr[i],
- clk_info[i].clk_rate);
- if (clk_rate < 0) {
- pr_err("%s round failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
-
- } else if (clk_info[i].clk_rate == INIT_RATE) {
- clk_rate = clk_get_rate(clk_ptr[i]);
- if (clk_rate == 0) {
- clk_rate =
- clk_round_rate(clk_ptr[i], 0);
- if (clk_rate < 0) {
- pr_err("%s round rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- }
- }
- rc = clk_prepare(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s prepare failed\n",
- clk_info[i].clk_name);
- goto cam_clk_prepare_err;
- }
-
- rc = clk_enable(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s enable failed\n",
- clk_info[i].clk_name);
- goto cam_clk_enable_err;
- }
- if (clk_info[i].delay > 20)
- msleep(clk_info[i].delay);
- else if (clk_info[i].delay)
- usleep_range(clk_info[i].delay * 1000,
- (clk_info[i].delay * 1000) + 1000);
- }
- } else {
- for (i = num_clk - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- CDBG("%s disable %s\n", __func__,
- clk_info[i].clk_name);
- clk_disable(clk_ptr[i]);
- clk_unprepare(clk_ptr[i]);
- clk_put(clk_ptr[i]);
- }
- }
- }
-
- return rc;
-
-cam_clk_enable_err:
- clk_unprepare(clk_ptr[i]);
-cam_clk_prepare_err:
-cam_clk_set_err:
- clk_put(clk_ptr[i]);
-cam_clk_get_err:
- for (i--; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- clk_disable(clk_ptr[i]);
- clk_unprepare(clk_ptr[i]);
- clk_put(clk_ptr[i]);
- }
- }
-
- return rc;
-}
-
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int config)
-{
- int i = 0, j = 0;
- int rc = 0;
- struct camera_vreg_t *curr_vreg;
-
- if (num_vreg_seq > num_vreg) {
- pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (!num_vreg_seq)
- num_vreg_seq = num_vreg;
-
- if (config) {
- for (i = 0; i < num_vreg_seq; i++) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- reg_ptr[j] = regulator_get(dev,
- curr_vreg->reg_name);
- if (IS_ERR(reg_ptr[j])) {
- pr_err("%s: %s get failed\n",
- __func__,
- curr_vreg->reg_name);
- reg_ptr[j] = NULL;
- goto vreg_get_fail;
- }
- if (regulator_count_voltages(reg_ptr[j]) > 0) {
- rc = regulator_set_voltage(
- reg_ptr[j],
- curr_vreg->min_voltage,
- curr_vreg->max_voltage);
- if (rc < 0) {
- pr_err("%s: %s set voltage failed\n",
- __func__,
- curr_vreg->reg_name);
- goto vreg_set_voltage_fail;
- }
- if (curr_vreg->op_mode >= 0) {
- rc = regulator_set_load(
- reg_ptr[j],
- curr_vreg->op_mode);
- if (rc < 0) {
- pr_err(
- "%s:%s set optimum mode fail\n",
- __func__,
- curr_vreg->reg_name);
- goto vreg_set_opt_mode_fail;
- }
- }
- }
- }
- } else {
- for (i = num_vreg_seq-1; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- if (reg_ptr[j]) {
- if (regulator_count_voltages(reg_ptr[j]) > 0) {
- if (curr_vreg->op_mode >= 0) {
- regulator_set_load(
- reg_ptr[j], 0);
- }
- regulator_set_voltage(
- reg_ptr[j], 0, curr_vreg->
- max_voltage);
- }
- regulator_put(reg_ptr[j]);
- reg_ptr[j] = NULL;
- }
- }
- }
-
- return 0;
-
-vreg_unconfig:
- if (regulator_count_voltages(reg_ptr[j]) > 0)
- regulator_set_load(reg_ptr[j], 0);
-
-vreg_set_opt_mode_fail:
- if (regulator_count_voltages(reg_ptr[j]) > 0)
- regulator_set_voltage(reg_ptr[j], 0,
- curr_vreg->max_voltage);
-
-vreg_set_voltage_fail:
- regulator_put(reg_ptr[j]);
- reg_ptr[j] = NULL;
-
-vreg_get_fail:
- for (i--; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else {
- j = i;
- }
- curr_vreg = &cam_vreg[j];
- goto vreg_unconfig;
- }
-
- return -ENODEV;
-}
-
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int enable)
-{
- int i = 0, j = 0, rc = 0;
-
- if (num_vreg_seq > num_vreg) {
- pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (!num_vreg_seq)
- num_vreg_seq = num_vreg;
-
- if (enable) {
- for (i = 0; i < num_vreg_seq; i++) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- if (IS_ERR(reg_ptr[j])) {
- pr_err("%s: %s null regulator\n",
- __func__, cam_vreg[j].reg_name);
- goto disable_vreg;
- }
- rc = regulator_enable(reg_ptr[j]);
- if (rc < 0) {
- pr_err("%s: %s enable failed\n",
- __func__, cam_vreg[j].reg_name);
- goto disable_vreg;
- }
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
- } else {
- for (i = num_vreg_seq-1; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- regulator_disable(reg_ptr[j]);
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
- }
-
- return rc;
-disable_vreg:
- for (i--; i >= 0; i--) {
- if (vreg_seq) {
- j = vreg_seq[i];
- if (j >= num_vreg)
- continue;
- } else
- j = i;
- regulator_disable(reg_ptr[j]);
- if (cam_vreg[j].delay > 20)
- msleep(cam_vreg[j].delay);
- else if (cam_vreg[j].delay)
- usleep_range(cam_vreg[j].delay * 1000,
- (cam_vreg[j].delay * 1000) + 1000);
- }
-
- return rc;
-}
-
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
- uint8_t gpio_tbl_size, int gpio_en)
-{
- int rc = 0, i;
-
- if (gpio_en) {
- for (i = 0; i < gpio_tbl_size; i++) {
- gpio_set_value_cansleep(gpio_tbl[i].gpio,
- gpio_tbl[i].flags);
- usleep_range(gpio_tbl[i].delay,
- gpio_tbl[i].delay + 1000);
- }
- } else {
- for (i = gpio_tbl_size - 1; i >= 0; i--) {
- if (gpio_tbl[i].flags)
- gpio_set_value_cansleep(gpio_tbl[i].gpio,
- GPIOF_OUT_INIT_LOW);
- }
- }
-
- return rc;
-}
-
-int msm_camera_config_single_vreg(struct device *dev,
- struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
-{
- int rc = 0;
- const char *vreg_name = NULL;
-
- if (!dev || !cam_vreg || !reg_ptr) {
- pr_err("%s: get failed NULL parameter\n", __func__);
- goto vreg_get_fail;
- }
- if (cam_vreg->type == VREG_TYPE_CUSTOM) {
- if (cam_vreg->custom_vreg_name == NULL) {
- pr_err("%s : can't find sub reg name",
- __func__);
- goto vreg_get_fail;
- }
- vreg_name = cam_vreg->custom_vreg_name;
- } else {
- if (cam_vreg->reg_name == NULL) {
- pr_err("%s : can't find reg name", __func__);
- goto vreg_get_fail;
- }
- vreg_name = cam_vreg->reg_name;
- }
-
- if (config) {
- CDBG("%s enable %s\n", __func__, vreg_name);
- *reg_ptr = regulator_get(dev, vreg_name);
- if (IS_ERR(*reg_ptr)) {
- pr_err("%s: %s get failed\n", __func__, vreg_name);
- *reg_ptr = NULL;
- goto vreg_get_fail;
- }
- if (regulator_count_voltages(*reg_ptr) > 0) {
- CDBG("%s: voltage min=%d, max=%d\n",
- __func__, cam_vreg->min_voltage,
- cam_vreg->max_voltage);
- rc = regulator_set_voltage(
- *reg_ptr, cam_vreg->min_voltage,
- cam_vreg->max_voltage);
- if (rc < 0) {
- pr_err("%s: %s set voltage failed\n",
- __func__, vreg_name);
- goto vreg_set_voltage_fail;
- }
- if (cam_vreg->op_mode >= 0) {
- rc = regulator_set_load(*reg_ptr,
- cam_vreg->op_mode);
- if (rc < 0) {
- pr_err(
- "%s: %s set optimum mode failed\n",
- __func__, vreg_name);
- goto vreg_set_opt_mode_fail;
- }
- }
- }
- rc = regulator_enable(*reg_ptr);
- if (rc < 0) {
- pr_err("%s: %s regulator_enable failed\n", __func__,
- vreg_name);
- goto vreg_unconfig;
- }
- } else {
- CDBG("%s disable %s\n", __func__, vreg_name);
- if (*reg_ptr) {
- CDBG("%s disable %s\n", __func__, vreg_name);
- regulator_disable(*reg_ptr);
- if (regulator_count_voltages(*reg_ptr) > 0) {
- if (cam_vreg->op_mode >= 0)
- regulator_set_load(*reg_ptr, 0);
- regulator_set_voltage(
- *reg_ptr, 0, cam_vreg->max_voltage);
- }
- regulator_put(*reg_ptr);
- *reg_ptr = NULL;
- } else {
- pr_err("%s can't disable %s\n", __func__, vreg_name);
- }
- }
-
- return 0;
-
-vreg_unconfig:
- if (regulator_count_voltages(*reg_ptr) > 0)
- regulator_set_load(*reg_ptr, 0);
-
-vreg_set_opt_mode_fail:
- if (regulator_count_voltages(*reg_ptr) > 0)
- regulator_set_voltage(*reg_ptr, 0,
- cam_vreg->max_voltage);
-
-vreg_set_voltage_fail:
- regulator_put(*reg_ptr);
- *reg_ptr = NULL;
-
-vreg_get_fail:
- return -EINVAL;
-}
-
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
- int gpio_en)
-{
- int rc = 0, i = 0, err = 0;
-
- if (!gpio_tbl || !size) {
- pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
- __LINE__, gpio_tbl, size);
- return -EINVAL;
- }
- for (i = 0; i < size; i++) {
- CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
- gpio_tbl[i].gpio, gpio_tbl[i].flags);
- }
- if (gpio_en) {
- for (i = 0; i < size; i++) {
- err = gpio_request_one(gpio_tbl[i].gpio,
- gpio_tbl[i].flags, gpio_tbl[i].label);
- if (err) {
- /*
- * After GPIO request fails, contine to
- * apply new gpios, outout a error message
- * for driver bringup debug
- */
- pr_err("%s:%d gpio %d:%s request fails\n",
- __func__, __LINE__,
- gpio_tbl[i].gpio, gpio_tbl[i].label);
- }
- }
- } else {
- gpio_free_array(gpio_tbl, size);
- }
-
- return rc;
-}
-
-/* Get all clocks from DT */
-static int msm_camera_get_clk_info_internal(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
- size_t cnt, tmp;
- uint32_t *rates, i = 0;
- const char *clk_ctl = NULL;
- bool clock_cntl_support = false;
- struct device_node *of_node;
-
- of_node = dev->of_node;
-
- cnt = of_property_count_strings(of_node, "clock-names");
- if (cnt <= 0) {
- pr_err("err: No clocks found in DT=%zu\n", cnt);
- return -EINVAL;
- }
-
- tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
- if (tmp <= 0) {
- pr_err("err: No clk rates device tree, count=%zu", tmp);
- return -EINVAL;
- }
-
- if (cnt != tmp) {
- pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
- cnt, tmp);
- return -EINVAL;
- }
-
- if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
- tmp = of_property_count_strings(of_node,
- "qcom,clock-control");
- if (tmp <= 0) {
- pr_err("err: control strings not found in DT count=%zu",
- tmp);
- return -EINVAL;
- }
- if (cnt != tmp) {
- pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
- cnt, tmp);
- return -EINVAL;
- }
- clock_cntl_support = true;
- }
-
- *num_clk = cnt;
-
- *clk_info = devm_kcalloc(dev, cnt,
- sizeof(struct msm_cam_clk_info), GFP_KERNEL);
- if (!*clk_info)
- return -ENOMEM;
-
- *clk_ptr = devm_kcalloc(dev, cnt, sizeof(struct clk *),
- GFP_KERNEL);
- if (!*clk_ptr) {
- rc = -ENOMEM;
- goto free_clk_info;
- }
-
- rates = devm_kcalloc(dev, cnt, sizeof(long), GFP_KERNEL);
- if (!rates) {
- rc = -ENOMEM;
- goto free_clk_ptr;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
- rates, cnt);
- if (rc < 0) {
- pr_err("err: failed reading clock rates\n");
- rc = -EINVAL;
- goto free_rates;
- }
-
- for (i = 0; i < cnt; i++) {
- rc = of_property_read_string_index(of_node, "clock-names",
- i, &((*clk_info)[i].clk_name));
- if (rc < 0) {
- pr_err("%s reading clock-name failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rates;
- }
-
- CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
- if (clock_cntl_support) {
- rc = of_property_read_string_index(of_node,
- "qcom,clock-control", i, &clk_ctl);
- if (rc < 0) {
- pr_err("%s reading clock-control failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rates;
- }
-
- if (!strcmp(clk_ctl, "NO_SET_RATE")) {
- (*clk_info)[i].clk_rate = NO_SET_RATE;
- } else if (!strcmp(clk_ctl, "INIT_RATE")) {
- (*clk_info)[i].clk_rate = INIT_RATE;
- } else if (!strcmp(clk_ctl, "SET_RATE")) {
- (*clk_info)[i].clk_rate = rates[i];
- } else {
- pr_err("%s: error: clock control has invalid value\n",
- __func__);
- rc = -EINVAL;
- goto free_rates;
- }
- } else {
- (*clk_info)[i].clk_rate =
- (rates[i] == 0) ? (long)-1 : rates[i];
- }
-
- CDBG("dbg: clk-rate[%d] = rate: %ld\n",
- i, (*clk_info)[i].clk_rate);
-
- (*clk_ptr)[i] =
- devm_clk_get(dev, (*clk_info)[i].clk_name);
- if (IS_ERR((*clk_ptr)[i])) {
- rc = PTR_ERR((*clk_ptr)[i]);
- goto release_clk;
- }
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
-
- devm_kfree(dev, rates);
-
- return rc;
-
-release_clk:
- for (--i; i >= 0; i--)
- devm_clk_put(dev, (*clk_ptr)[i]);
-free_rates:
- devm_kfree(dev, rates);
-free_clk_ptr:
- devm_kfree(dev, *clk_ptr);
-free_clk_info:
- devm_kfree(dev, *clk_info);
- return rc;
-}
-
-/* Get all clocks from DT for I2C devices */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
-
- if (!dev || !clk_info || !clk_ptr || !num_clk)
- return -EINVAL;
-
- rc = msm_camera_get_clk_info_internal(dev, clk_info, clk_ptr, num_clk);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_get_clk_info);
-
-/* Get all clocks from DT for platform devices */
-int msm_camera_get_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk)
-{
- int rc = 0;
-
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
- return -EINVAL;
-
- rc = msm_camera_get_clk_info_internal(&pdev->dev,
- clk_info, clk_ptr, num_clk);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info);
-
-/* Get all clocks and multiple rates from DT */
-int msm_camera_get_clk_info_and_rates(
- struct platform_device *pdev,
- struct msm_cam_clk_info **pclk_info,
- struct clk ***pclks,
- uint32_t ***pclk_rates,
- size_t *num_set,
- size_t *num_clk)
-{
- int rc = 0, tmp_var, cnt, tmp;
- uint32_t i = 0, j = 0;
- struct device_node *of_node;
- uint32_t **rates;
- struct clk **clks;
- struct msm_cam_clk_info *clk_info;
-
- if (!pdev || !pclk_info || !num_clk
- || !pclk_rates || !pclks || !num_set)
- return -EINVAL;
-
- of_node = pdev->dev.of_node;
-
- cnt = of_property_count_strings(of_node, "clock-names");
- if (cnt <= 0) {
- pr_err("err: No clocks found in DT=%d\n", cnt);
- return -EINVAL;
- }
-
- tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
- if (tmp <= 0) {
- pr_err("err: No clk rates device tree, count=%d\n", tmp);
- return -EINVAL;
- }
-
- if ((tmp % cnt) != 0) {
- pr_err("err: clk name/rates mismatch, strings=%d, rates=%d\n",
- cnt, tmp);
- return -EINVAL;
- }
-
- *num_clk = cnt;
- *num_set = (tmp / cnt);
-
- clk_info = devm_kcalloc(&pdev->dev, cnt,
- sizeof(struct msm_cam_clk_info), GFP_KERNEL);
- if (!clk_info)
- return -ENOMEM;
-
- clks = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
- GFP_KERNEL);
- if (!clks) {
- rc = -ENOMEM;
- goto free_clk_info;
- }
-
- rates = devm_kcalloc(&pdev->dev, *num_set,
- sizeof(uint32_t *), GFP_KERNEL);
- if (!rates) {
- rc = -ENOMEM;
- goto free_clk;
- }
-
- for (i = 0; i < *num_set; i++) {
- rates[i] = devm_kcalloc(&pdev->dev, *num_clk,
- sizeof(uint32_t), GFP_KERNEL);
- if (!rates[i]) {
- rc = -ENOMEM;
- for (--i; i >= 0; i--)
- devm_kfree(&pdev->dev, rates[i]);
- goto free_rate;
- }
- }
-
- tmp_var = 0;
- for (i = 0; i < *num_set; i++) {
- for (j = 0; j < *num_clk; j++) {
- rc = of_property_read_u32_index(of_node,
- "qcom,clock-rates", tmp_var++, &rates[i][j]);
- if (rc < 0) {
- pr_err("err: failed reading clock rates\n");
- rc = -EINVAL;
- goto free_rate_array;
- }
- CDBG("Clock rate idx %d idx %d value %d\n",
- i, j, rates[i][j]);
- }
- }
- for (i = 0; i < *num_clk; i++) {
- rc = of_property_read_string_index(of_node, "clock-names",
- i, &clk_info[i].clk_name);
- if (rc < 0) {
- pr_err("%s reading clock-name failed index %d\n",
- __func__, i);
- rc = -EINVAL;
- goto free_rate_array;
- }
-
- CDBG("dbg: clk-name[%d] = %s\n", i, clk_info[i].clk_name);
-
- clks[i] =
- devm_clk_get(&pdev->dev, clk_info[i].clk_name);
- if (IS_ERR(clks[i])) {
- rc = PTR_ERR(clks[i]);
- goto release_clk;
- }
- CDBG("clk ptr[%d] :%pK\n", i, clks[i]);
- }
- *pclk_info = clk_info;
- *pclks = clks;
- *pclk_rates = rates;
-
- return rc;
-
-release_clk:
- for (--i; i >= 0; i--)
- devm_clk_put(&pdev->dev, clks[i]);
-free_rate_array:
- for (i = 0; i < *num_set; i++)
- devm_kfree(&pdev->dev, rates[i]);
-free_rate:
- devm_kfree(&pdev->dev, rates);
-free_clk:
- devm_kfree(&pdev->dev, clks);
-free_clk_info:
- devm_kfree(&pdev->dev, clk_info);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_clk_info_and_rates);
-
-/* Enable/Disable all clocks */
-int msm_camera_clk_enable(struct device *dev,
- struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable)
-{
- int i;
- int rc = 0;
- long clk_rate;
-
- if (enable) {
- for (i = 0; i < num_clk; i++) {
- pr_err("enable %s\n", clk_info[i].clk_name);
- if (clk_info[i].clk_rate > 0) {
- clk_rate = clk_round_rate(clk_ptr[i],
- clk_info[i].clk_rate);
- if (clk_rate < 0) {
- pr_err("%s round failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
-
- } else if (clk_info[i].clk_rate == INIT_RATE) {
- clk_rate = clk_get_rate(clk_ptr[i]);
- if (clk_rate == 0) {
- clk_rate =
- clk_round_rate(clk_ptr[i], 0);
- if (clk_rate < 0) {
- pr_err("%s round rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- rc = clk_set_rate(clk_ptr[i],
- clk_rate);
- if (rc < 0) {
- pr_err("%s set rate failed\n",
- clk_info[i].clk_name);
- goto cam_clk_set_err;
- }
- }
- }
- rc = clk_prepare_enable(clk_ptr[i]);
- if (rc < 0) {
- pr_err("%s enable failed\n",
- clk_info[i].clk_name);
- goto cam_clk_enable_err;
- }
- if (clk_info[i].delay > 20) {
- msleep(clk_info[i].delay);
- } else if (clk_info[i].delay) {
- usleep_range(clk_info[i].delay * 1000,
- (clk_info[i].delay * 1000) + 1000);
- }
- }
- } else {
- for (i = num_clk - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL) {
- pr_err("%s disable %s\n", __func__,
- clk_info[i].clk_name);
- clk_disable_unprepare(clk_ptr[i]);
- }
- }
- }
- return rc;
-
-cam_clk_enable_err:
-cam_clk_set_err:
- for (i--; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- clk_disable_unprepare(clk_ptr[i]);
- }
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_clk_enable);
-
-/* Set rate on a specific clock */
-long msm_camera_clk_set_rate(struct device *dev,
- struct clk *clk,
- long clk_rate)
-{
- int rc = 0;
- long rate = 0;
-
- if (!dev || !clk || (clk_rate < 0))
- return -EINVAL;
-
- CDBG("clk : %pK, enable : %ld\n", clk, clk_rate);
-
- if (clk_rate > 0) {
- rate = clk_round_rate(clk, clk_rate);
- if (rate < 0) {
- pr_err("round rate failed\n");
- return -EINVAL;
- }
-
- rc = clk_set_rate(clk, rate);
- if (rc < 0) {
- pr_err("set rate failed\n");
- return -EINVAL;
- }
- }
-
- return rate;
-}
-EXPORT_SYMBOL(msm_camera_clk_set_rate);
-
-/* release memory allocated for clocks */
-static int msm_camera_put_clk_info_internal(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int i;
-
- for (i = cnt - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- devm_clk_put(dev, (*clk_ptr)[i]);
-
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
- devm_kfree(dev, *clk_info);
- devm_kfree(dev, *clk_ptr);
- *clk_info = NULL;
- *clk_ptr = NULL;
- return 0;
-}
-
-/* release memory allocated for clocks for i2c devices */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int rc = 0;
-
- if (!dev || !clk_info || !clk_ptr)
- return -EINVAL;
-
- rc = msm_camera_put_clk_info_internal(dev, clk_info, clk_ptr, cnt);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_i2c_dev_put_clk_info);
-
-/* release memory allocated for clocks for platform devices */
-int msm_camera_put_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt)
-{
- int rc = 0;
-
- if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
- return -EINVAL;
-
- rc = msm_camera_put_clk_info_internal(&pdev->dev,
- clk_info, clk_ptr, cnt);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info);
-
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, uint32_t ***clk_rates,
- size_t set, size_t cnt)
-{
- int i;
-
- for (i = set - 1; i >= 0; i--)
- devm_kfree(&pdev->dev, (*clk_rates)[i]);
-
- devm_kfree(&pdev->dev, *clk_rates);
- for (i = cnt - 1; i >= 0; i--) {
- if (clk_ptr[i] != NULL)
- devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
- CDBG("clk ptr[%d] :%pK\n", i, (*clk_ptr)[i]);
- }
- devm_kfree(&pdev->dev, *clk_info);
- devm_kfree(&pdev->dev, *clk_ptr);
- *clk_info = NULL;
- *clk_ptr = NULL;
- *clk_rates = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_clk_info_and_rates);
-
-/* Get regulators from DT */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info,
- int *num_reg)
-{
- uint32_t cnt;
- int i, rc;
- struct device_node *of_node;
- char prop_name[32];
- struct msm_cam_regulator *tmp_reg;
-
- if (!pdev || !vdd_info || !num_reg)
- return -EINVAL;
-
- of_node = pdev->dev.of_node;
-
- if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
- pr_err("err: Regulators property not found\n");
- return -EINVAL;
- }
-
- cnt = of_property_count_strings(of_node, "qcom,vdd-names");
- if (cnt <= 0) {
- pr_err("err: no regulators found in device tree, count=%d",
- cnt);
- return -EINVAL;
- }
-
- tmp_reg = devm_kcalloc(&pdev->dev, cnt,
- sizeof(struct msm_cam_regulator), GFP_KERNEL);
- if (!tmp_reg)
- return -ENOMEM;
-
- for (i = 0; i < cnt; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,vdd-names", i, &tmp_reg[i].name);
- if (rc < 0) {
- pr_err("Fail to fetch regulators: %d\n", i);
- rc = -EINVAL;
- goto err1;
- }
-
- CDBG("regulator-names[%d] = %s\n", i, tmp_reg[i].name);
-
- snprintf(prop_name, 32, "%s-supply", tmp_reg[i].name);
-
- if (of_get_property(of_node, prop_name, NULL)) {
- tmp_reg[i].vdd =
- devm_regulator_get(&pdev->dev, tmp_reg[i].name);
- if (IS_ERR(tmp_reg[i].vdd)) {
- rc = -EINVAL;
- pr_err("Fail to get regulator :%d\n", i);
- goto err1;
- }
- } else {
- pr_err("Regulator phandle not found :%s\n",
- tmp_reg[i].name);
- rc = -EINVAL;
- goto err1;
- }
- CDBG("vdd ptr[%d] :%pK\n", i, tmp_reg[i].vdd);
- }
-
- *num_reg = cnt;
- *vdd_info = tmp_reg;
-
- return 0;
-
-err1:
- for (--i; i >= 0; i--)
- devm_regulator_put(tmp_reg[i].vdd);
- devm_kfree(&pdev->dev, tmp_reg);
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_get_regulator_info);
-
-
-/* Enable/Disable regulators */
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
- int cnt, int enable)
-{
- int i;
- int rc;
- struct msm_cam_regulator *tmp = vdd_info;
-
- if (!tmp) {
- pr_err("Invalid params");
- return -EINVAL;
- }
- CDBG("cnt : %d\n", cnt);
-
- for (i = 0; i < cnt; i++) {
- if (tmp && !IS_ERR_OR_NULL(tmp->vdd)) {
- CDBG("name : %s, enable : %d\n", tmp->name, enable);
- if (enable) {
- rc = regulator_enable(tmp->vdd);
- if (rc < 0) {
- pr_err("regulator enable failed %d\n",
- i);
- goto disable_reg;
- }
- } else {
- rc = regulator_disable(tmp->vdd);
- if (rc < 0)
- pr_err("regulator disable failed %d\n",
- i);
- }
- }
- tmp++;
- }
-
- return 0;
-disable_reg:
- for (--i; i > 0; i--) {
- --tmp;
- if (!IS_ERR_OR_NULL(tmp->vdd))
- regulator_disable(tmp->vdd);
- }
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_regulator_enable);
-
-/* Put regulators regulators */
-void msm_camera_put_regulators(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int cnt)
-{
- int i;
-
- if (!vdd_info || !*vdd_info) {
- pr_err("Invalid params\n");
- return;
- }
-
- for (i = cnt - 1; i >= 0; i--) {
- if (vdd_info[i] && !IS_ERR_OR_NULL(vdd_info[i]->vdd))
- devm_regulator_put(vdd_info[i]->vdd);
- CDBG("vdd ptr[%d] :%pK\n", i, vdd_info[i]->vdd);
- }
-
- devm_kfree(&pdev->dev, *vdd_info);
- *vdd_info = NULL;
-}
-EXPORT_SYMBOL(msm_camera_put_regulators);
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
- char *irq_name)
-{
- if (!pdev || !irq_name) {
- pr_err("Invalid params\n");
- return NULL;
- }
-
- CDBG("Get irq for %s\n", irq_name);
- return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
-}
-EXPORT_SYMBOL(msm_camera_get_irq);
-
-int msm_camera_register_irq(struct platform_device *pdev,
- struct resource *irq, irq_handler_t handler,
- unsigned long irqflags, char *irq_name, void *dev_id)
-{
- int rc = 0;
-
- if (!pdev || !irq || !handler || !irq_name || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- rc = devm_request_irq(&pdev->dev, irq->start, handler,
- irqflags, irq_name, dev_id);
- if (rc < 0) {
- pr_err("irq request fail\n");
- rc = -EINVAL;
- }
-
- CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_irq);
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
- struct resource *irq, irq_handler_t handler_fn,
- irq_handler_t thread_fn, unsigned long irqflags,
- const char *irq_name, void *dev_id)
-{
- int rc = 0;
-
- if (!pdev || !irq || !irq_name || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- rc = devm_request_threaded_irq(&pdev->dev, irq->start, handler_fn,
- thread_fn, irqflags, irq_name, dev_id);
- if (rc < 0) {
- pr_err("irq request fail\n");
- rc = -EINVAL;
- }
-
- CDBG("Registered irq for %s[resource - %pK]\n", irq_name, irq);
-
- return rc;
-}
-EXPORT_SYMBOL(msm_camera_register_threaded_irq);
-
-int msm_camera_enable_irq(struct resource *irq, int enable)
-{
- if (!irq) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("irq Enable %d\n", enable);
- if (enable)
- enable_irq(irq->start);
- else
- disable_irq(irq->start);
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_enable_irq);
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
- struct resource *irq, void *dev_id)
-{
-
- if (!pdev || !irq || !dev_id) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("Un Registering irq for [resource - %pK]\n", irq);
- devm_free_irq(&pdev->dev, irq->start, dev_id);
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_unregister_irq);
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
- char *device_name, int reserve_mem)
-{
- struct resource *mem;
- void *base;
-
- if (!pdev || !device_name) {
- pr_err("Invalid params\n");
- return NULL;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return NULL;
- }
-
- if (reserve_mem) {
- CDBG("device:%pK, mem : %pK, size : %d\n",
- &pdev->dev, mem, (int)resource_size(mem));
- if (!devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem),
- device_name)) {
- pr_err("err: no valid mem region for device:%s\n",
- device_name);
- return NULL;
- }
- }
-
- base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (!base) {
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
- pr_err("err: ioremap failed: %s\n", device_name);
- return NULL;
- }
-
- CDBG("base : %pK\n", base);
- return base;
-}
-EXPORT_SYMBOL(msm_camera_get_reg_base);
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
- char *device_name)
-{
- struct resource *mem;
-
- if (!pdev || !device_name) {
- pr_err("Invalid params\n");
- return 0;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return 0;
- }
- return resource_size(mem);
-}
-EXPORT_SYMBOL(msm_camera_get_res_size);
-
-
-int msm_camera_put_reg_base(struct platform_device *pdev,
- void __iomem *base, char *device_name, int reserve_mem)
-{
- struct resource *mem;
-
- if (!pdev || !base || !device_name) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- CDBG("device name :%s\n", device_name);
- mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, device_name);
- if (!mem) {
- pr_err("err: mem resource %s not found\n", device_name);
- return -EINVAL;
- }
- CDBG("mem : %pK, size : %d\n", mem, (int)resource_size(mem));
-
- devm_iounmap(&pdev->dev, base);
- if (reserve_mem)
- devm_release_mem_region(&pdev->dev,
- mem->start, resource_size(mem));
-
- return 0;
-}
-EXPORT_SYMBOL(msm_camera_put_reg_base);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
deleted file mode 100644
index c316090..0000000
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_soc_api.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _CAM_SENSOR_SOC_API_H_
-#define _CAM_SENSOR_SOC_API_H_
-
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock_types.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include "cam_sensor_cmn_header.h"
-
-struct msm_cam_regulator {
- const char *name;
- struct regulator *vdd;
-};
-
-struct msm_gpio_set_tbl {
- unsigned int gpio;
- unsigned long flags;
- uint32_t delay;
-};
-
-/**
- * @brief : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev : Platform device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk);
-
-/**
- * @brief : Gets clock information from dtsi
- *
- * This function extracts the clocks information for a specific
- * i2c device
- *
- * @param dev : i2c device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_get_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- size_t *num_clk);
-
-/**
- * @brief : Gets clock information and rates from dtsi
- *
- * This function extracts the clocks information for a specific
- * platform device
- *
- * @param pdev : Platform device to get clocks information
- * @param clk_info : Pointer to populate clock information array
- * @param clk_ptr : Pointer to populate clock resource pointers
- * @param clk_rates : Pointer to populate clock rates
- * @param num_set: Pointer to populate the number of sets of rates
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_clk_info_and_rates(
- struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr,
- uint32_t ***clk_rates,
- size_t *num_set,
- size_t *num_clk);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev : Pointer to platform device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param dev : Pointer to i2c device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_i2c_dev_put_clk_info(struct device *dev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, int cnt);
-
-/**
- * @brief : Puts clock information
- *
- * This function releases the memory allocated for the clocks
- *
- * @param pdev : Pointer to platform device
- * @param clk_info : Pointer to release the allocated memory
- * @param clk_ptr : Pointer to release the clock resources
- * @param clk_ptr : Pointer to release the clock rates
- * @param set : Number of sets of clock rates
- * @param cnt : Number of clk resources
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_put_clk_info_and_rates(struct platform_device *pdev,
- struct msm_cam_clk_info **clk_info,
- struct clk ***clk_ptr, uint32_t ***clk_rates,
- size_t set, size_t cnt);
-/**
- * @brief : Enable clocks
- *
- * This function enables the clocks for a specified device
- *
- * @param dev : Device to get clocks information
- * @param clk_info : Pointer to populate clock information
- * @param clk_ptr : Pointer to populate clock information
- * @param num_clk: Pointer to populate the number of clocks
- * extracted from dtsi
- * @param enable : Flag to specify enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_clk_enable(struct device *dev,
- struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr,
- int num_clk,
- int enable);
-/**
- * @brief : Set clock rate
- *
- * This function sets the rate for a specified clock and
- * returns the rounded value
- *
- * @param dev : Device to get clocks information
- * @param clk : Pointer to clock to set rate
- * @param clk_rate : Rate to be set
- *
- * @return Status of operation. Negative in case of error. clk rate otherwise.
- */
-
-long msm_camera_clk_set_rate(struct device *dev,
- struct clk *clk,
- long clk_rate);
-/**
- * @brief : Gets regulator info
- *
- * This function extracts the regulator information for a specific
- * platform device
- *
- * @param pdev : platform device to get regulator information
- * @param vdd_info: Pointer to populate the regulator names
- * @param num_reg: Pointer to populate the number of regulators
- * extracted from dtsi
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_get_regulator_info(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int *num_reg);
-/**
- * @brief : Enable/Disable the regultors
- *
- * This function enables/disables the regulators for a specific
- * platform device
- *
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to enable/disable
- * @param enable: Flags specifies either enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_regulator_enable(struct msm_cam_regulator *vdd_info,
- int cnt, int enable);
-
-/**
- * @brief : Release the regulators
- *
- * This function releases the regulator resources.
- *
- * @param pdev: Pointer to platform device
- * @param vdd_info: Pointer to list of regulators
- * @param cnt: Number of regulators to release
- */
-
-void msm_camera_put_regulators(struct platform_device *pdev,
- struct msm_cam_regulator **vdd_info, int cnt);
-/**
- * @brief : Get the IRQ resource
- *
- * This function gets the irq resource from dtsi for a specific
- * platform device
- *
- * @param pdev : Platform device to get IRQ
- * @param irq_name: Name of the IRQ resource to get from DTSI
- *
- * @return Pointer to resource if success else null
- */
-
-struct resource *msm_camera_get_irq(struct platform_device *pdev,
- char *irq_name);
-/**
- * @brief : Register the IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev : Platform device to register IRQ resource
- * @param irq : IRQ resource
- * @param handler : IRQ handler
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_irq(struct platform_device *pdev,
- struct resource *irq,
- irq_handler_t handler,
- unsigned long irqflags,
- char *irq_name,
- void *dev);
-
-/**
- * @brief : Register the threaded IRQ
- *
- * This function registers the irq resource for specified hardware
- *
- * @param pdev : Platform device to register IRQ resource
- * @param irq : IRQ resource
- * @param handler_fn : IRQ handler function
- * @param thread_fn : thread handler function
- * @param irqflags : IRQ flags
- * @param irq_name: Name of the IRQ
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_register_threaded_irq(struct platform_device *pdev,
- struct resource *irq,
- irq_handler_t handler_fn,
- irq_handler_t thread_fn,
- unsigned long irqflags,
- const char *irq_name,
- void *dev);
-
-/**
- * @brief : Enable/Disable the IRQ
- *
- * This function enables or disables a specific IRQ
- *
- * @param irq : IRQ resource
- * @param flag : flag to enable/disable
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_enable_irq(struct resource *irq, int flag);
-
-/**
- * @brief : UnRegister the IRQ
- *
- * This function Unregisters/Frees the irq resource
- *
- * @param pdev : Pointer to platform device
- * @param irq : IRQ resource
- * @param dev : Token of the device
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_unregister_irq(struct platform_device *pdev,
- struct resource *irq, void *dev_id);
-
-/**
- * @brief : Gets device register base
- *
- * This function extracts the device's register base from the dtsi
- * for the specified platform device
- *
- * @param pdev : Platform device to get regulator infor
- * @param device_name : Name of the device to fetch the register base
- * @param reserve_mem : Flag to decide whether to reserve memory
- * region or not.
- *
- * @return Pointer to resource if success else null
- */
-
-void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
- char *device_name, int reserve_mem);
-
-/**
- * @brief : Puts device register base
- *
- * This function releases the memory region for the specified
- * resource
- *
- * @param pdev : Pointer to platform device
- * @param base : Pointer to base to unmap
- * @param device_name : Device name
- * @param reserve_mem : Flag to decide whether to release memory
- * region or not.
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-
-int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
- char *device_name, int reserve_mem);
-
-/**
- * @brief : Gets resource size
- *
- * This function returns the size of the resource for the
- * specified platform device
- *
- * @param pdev : Platform device to get regulator infor
- * @param device_name : Name of the device to fetch the register base
- *
- * @return size of the resource
- */
-
-uint32_t msm_camera_get_res_size(struct platform_device *pdev,
- char *device_name);
-
-/**
- * @brief : Selects clock source
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_src_info : Clock Info structure
- * @param num_clk : Number of clocks
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct msm_cam_clk_info *clk_src_info, int num_clk);
-
-/**
- * @brief : Enables the clock
- *
- *
- * @param dev : Token of the device
- * @param clk_info : Clock Info structure
- * @param clk_tr : Pointer to lock strucure
- * @param num_clk : Number of clocks
- * @param enable : Enable/disable the clock
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
- struct clk **clk_ptr, int num_clk, int enable);
-
-/**
- * @brief : Configures voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int config);
-
-/**
- * @brief : Enables voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param vreg_seq : Regulator sequence type
- * @param num_clk : Number of clocks
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
- int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
- int num_vreg_seq, struct regulator **reg_ptr, int enable);
-
-/**
- * @brief : Sets table of GPIOs
- *
- * @param gpio_tbl : GPIO table parsed from dt
- * @param gpio_tbl_size : Size of GPIO table
- * @param gpio_en : Enable/disable the GPIO
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
- uint8_t gpio_tbl_size, int gpio_en);
-
-/**
- * @brief : Configures single voltage regulator
- *
- *
- * @param dev : Token of the device
- * @param cam_vreg : Regulator dt structure
- * @param num_vreg : Number of regulators
- * @param reg_ptr : Regulator pointer
- * @param config : Enable/disable configuring the regulator
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_config_single_vreg(struct device *dev,
- struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
-
-/**
- * @brief : Request table of gpios
- *
- *
- * @param gpio_tbl : Table of GPIOs
- * @param size : Size of table
- * @param gpio_en : Enable/disable the gpio
- *
- * @return Status of operation. Negative in case of error. Zero otherwise.
- */
-int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
- int gpio_en);
-
-#endif /* _CAM_SENSOR_SOC_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 44294e8..b1698ca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include "cam_sensor_util.h"
-#include "cam_sensor_soc_api.h"
+#include <cam_mem_mgr.h>
#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -20,9 +20,6 @@
#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
(config_val >= min) && (config_val <= max))
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
static struct i2c_settings_list*
cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
uint32_t size)
@@ -57,8 +54,7 @@
int32_t rc = 0;
if (i2c_array == NULL) {
- pr_err("%s:%d ::FATAL:: Invalid argument\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument");
return -EINVAL;
}
@@ -87,8 +83,7 @@
struct i2c_settings_list *i2c_list = NULL;
if (i2c_list == NULL) {
- pr_err("%s:%d Invalid list ptr\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Invalid list ptr");
return -EINVAL;
}
@@ -110,8 +105,7 @@
sizeof(
struct cam_cmd_unconditional_wait);
} else {
- pr_err("%s: %d Error: Delay Rxed Before any buffer: %d\n",
- __func__, __LINE__, offset);
+ CAM_ERR(CAM_SENSOR, "Delay Rxed Before any buffer: %d", offset);
return -EINVAL;
}
@@ -132,8 +126,7 @@
i2c_list =
cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
- pr_err("%s: %d Failed in allocating mem for list\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in allocating mem for list");
return -ENOMEM;
}
@@ -172,8 +165,7 @@
cam_cmd_i2c_random_wr->header.count);
if (i2c_list == NULL ||
i2c_list->i2c_settings.reg_setting == NULL) {
- pr_err("%s: %d Failed in allocating i2c_list\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
return -ENOMEM;
}
@@ -235,8 +227,8 @@
* be spread across multiple cmd buffers
*/
- CDBG("%s:%d Total cmd Buf in Bytes: %d\n", __func__,
- __LINE__, cmd_desc[i].length);
+ CAM_DBG(CAM_SENSOR, "Total cmd Buf in Bytes: %d",
+ cmd_desc[i].length);
if (!cmd_desc[i].length)
continue;
@@ -245,10 +237,9 @@
(uint64_t *)&generic_ptr, &len_of_buff);
cmd_buf = (uint32_t *)generic_ptr;
if (rc < 0) {
- pr_err("%s:%d Failed in getting cmd hdl: %d Err: %d Buffer Len: %ld\n",
- __func__, __LINE__,
- cmd_desc[i].mem_handle, rc,
- len_of_buff);
+ CAM_ERR(CAM_SENSOR,
+ "cmd hdl failed:%d, Err: %d, Buffer_len: %ld",
+ cmd_desc[i].mem_handle, rc, len_of_buff);
return rc;
}
cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
@@ -268,8 +259,8 @@
i2c_reg_settings,
&cmd_length_in_bytes, &j, &list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in random read %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Failed in random read %d", rc);
return rc;
}
@@ -289,8 +280,9 @@
i2c_reg_settings, j, &byte_cnt,
list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in handling delay %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "delay hdl failed: %d",
+ rc);
return rc;
}
@@ -300,21 +292,22 @@
&cmd_buf, i2c_reg_settings,
&byte_cnt, &j, &list);
if (rc < 0) {
- pr_err("%s:%d :Error: Failed in random read %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "Random read fail: %d",
+ rc);
return rc;
}
} else {
- pr_err("%s: %d Wrong Wait Command: %d\n",
- __func__, __LINE__,
+ CAM_ERR(CAM_SENSOR,
+ "Wrong Wait Command: %d",
generic_op_code);
return -EINVAL;
}
break;
}
default:
- pr_err("%s:%d Invalid Command Type:%d\n",
- __func__, __LINE__, cmm_hdr->cmd_type);
+ CAM_ERR(CAM_SENSOR, "Invalid Command Type:%d",
+ cmm_hdr->cmd_type);
return -EINVAL;
}
}
@@ -324,22 +317,25 @@
return rc;
}
-int32_t msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
- int num_vreg, struct cam_sensor_power_setting *power_setting,
+int32_t msm_camera_fill_vreg_params(
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
uint16_t power_setting_size)
{
int32_t rc = 0, j = 0, i = 0;
+ uint32_t num_vreg;
/* Validate input parameters */
- if (!cam_vreg || !power_setting) {
- pr_err("%s:%d failed: cam_vreg %pK power_setting %pK", __func__,
- __LINE__, cam_vreg, power_setting);
+ if (!soc_info || !power_setting) {
+ CAM_ERR(CAM_SENSOR, "failed: soc_info %pK power_setting %pK",
+ soc_info, power_setting);
return -EINVAL;
}
- /* Validate size of num_vreg */
+ num_vreg = soc_info->num_rgltr;
+
if (num_vreg <= 0) {
- pr_err("failed: num_vreg %d", num_vreg);
+ CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
return -EINVAL;
}
@@ -347,16 +343,19 @@
switch (power_setting[i].seq_type) {
case SENSOR_VDIG:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
- CDBG("%s:%d i %d j %d cam_vdig\n",
- __func__, __LINE__, i, j);
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vdig")) {
+
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vdig", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
@@ -368,20 +367,24 @@
case SENSOR_VIO:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
- CDBG("%s:%d i %d j %d cam_vio\n",
- __func__, __LINE__, i, j);
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vio")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vio", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -389,20 +392,24 @@
case SENSOR_VANA:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
- CDBG("%s:%d i %d j %d cam_vana\n",
- __func__, __LINE__, i, j);
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vana")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vana", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -410,20 +417,25 @@
case SENSOR_VAF:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
- CDBG("%s:%d i %d j %d cam_vaf\n",
- __func__, __LINE__, i, j);
+
+ if (!strcmp(soc_info->rgltr_name[j],
+ "cam_vaf")) {
+ CAM_DBG(CAM_SENSOR,
+ "i: %d j: %d cam_vaf", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
+
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
@@ -431,38 +443,43 @@
case SENSOR_CUSTOM_REG1:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom1")) {
- CDBG("%s:%d i %d j %d cam_vcustom1\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom1", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
}
+
}
if (j == num_vreg)
power_setting[i].seq_val = INVALID_VREG;
break;
case SENSOR_CUSTOM_REG2:
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_v_custom2")) {
- CDBG("%s:%d i %d j %d cam_vcustom2\n",
- __func__, __LINE__, i, j);
+ CAM_DBG(CAM_SENSOR,
+ "i:%d j:%d cam_vcustom2", i, j);
power_setting[i].seq_val = j;
+
if (VALIDATE_VOLTAGE(
- cam_vreg[j].min_voltage,
- cam_vreg[j].max_voltage,
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
power_setting[i].config_val)) {
- cam_vreg[j].min_voltage =
- cam_vreg[j].max_voltage =
+ soc_info->rgltr_min_volt[j] =
+ soc_info->rgltr_max_volt[j] =
power_setting[i].config_val;
}
break;
@@ -473,8 +490,8 @@
break;
default: {
- pr_err("%s:%d invalid seq_val %d\n", __func__,
- __LINE__, power_setting[i].seq_val);
+ CAM_ERR(CAM_SENSOR, "invalid seq_val %d",
+ power_setting[i].seq_val);
break;
}
}
@@ -483,269 +500,558 @@
return rc;
}
-int32_t msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size)
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en)
{
- int32_t rc = 0, i = 0;
- uint32_t count = 0, *val_array = NULL;
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = gpio_conf->cam_gpio_req_tbl;
- if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
- return 0;
+ size = gpio_conf->cam_gpio_req_tbl_size;
- count /= sizeof(uint32_t);
- if (!count) {
- pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ CAM_INFO(CAM_SENSOR, "No GPIO entry");
return 0;
}
- val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
- if (!val_array)
- return -ENOMEM;
-
- gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
- GFP_KERNEL);
- if (!gconf->cam_gpio_req_tbl) {
- rc = -ENOMEM;
- goto free_val_array;
- }
- gconf->cam_gpio_req_tbl_size = count;
-
- rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
- val_array, count);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
+ if (!gpio_tbl || !size) {
+ CAM_ERR(CAM_SENSOR, "invalid gpio_tbl %pK / size %d",
+ gpio_tbl, size);
+ return -EINVAL;
}
- for (i = 0; i < count; i++) {
- if (val_array[i] >= gpio_array_size) {
- pr_err("%s gpio req tbl index %d invalid\n",
- __func__, val_array[i]);
- return -EINVAL;
+ for (i = 0; i < size; i++) {
+ CAM_DBG(CAM_SENSOR, "i: %d, gpio %d dir %ld", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ CAM_ERR(CAM_SENSOR, "gpio %d:%s request fails",
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
}
- gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
- CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].gpio);
+ } else {
+ gpio_free_array(gpio_tbl, size);
}
- rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
- val_array, count);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
- }
-
- for (i = 0; i < count; i++) {
- gconf->cam_gpio_req_tbl[i].flags = val_array[i];
- CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].flags);
- }
-
- for (i = 0; i < count; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,gpio-req-tbl-label", i,
- &gconf->cam_gpio_req_tbl[i].label);
- CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
- gconf->cam_gpio_req_tbl[i].label);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_gpio_req_tbl;
- }
- }
-
- kfree(val_array);
-
- return rc;
-
-free_gpio_req_tbl:
- kfree(gconf->cam_gpio_req_tbl);
-free_val_array:
- kfree(val_array);
- gconf->cam_gpio_req_tbl_size = 0;
-
return rc;
}
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size)
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info)
{
- int rc = 0, val = 0;
+ int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+ int32_t i = 0, pwr_up = 0, pwr_down = 0;
+ void *ptr = cmd_buf, *scr;
+ struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+ struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
- gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
- GFP_KERNEL);
- if (!gconf->gpio_num_info)
+ if (!pwr_cmd || !cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
+ pwr_cmd, cmd_length);
+ return -EINVAL;
+ }
+
+ power_info->power_setting_size = 0;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_setting)
return -ENOMEM;
- rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+ power_info->power_down_setting_size = 0;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting) *
+ MAX_POWER_CONFIG, GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ while (tot_size < cmd_length) {
+ if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ power_info->
+ power_setting_size +=
+ pwr_cmd->count;
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+ if (pwr_cmd->count == 0)
+ CAM_DBG(CAM_SENSOR, "Un expected Command");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+ power_info->
+ power_setting[pwr_up].seq_type =
+ pwr_cmd->power_settings[i].
+ power_seq_type;
+ power_info->
+ power_setting[pwr_up].config_val =
+ pwr_cmd->power_settings[i].
+ config_val_low;
+ power_info->power_setting[pwr_up].delay = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size = tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Cmd Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_down_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld",
+ pwr_up,
+ power_info->
+ power_setting[pwr_up].seq_type,
+ power_info->
+ power_setting[pwr_up].
+ config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+ struct cam_cmd_unconditional_wait *wait_cmd =
+ (struct cam_cmd_unconditional_wait *)ptr;
+ if (wait_cmd->op_code ==
+ CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+ if (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+ if (pwr_up > 0)
+ power_info->
+ power_setting
+ [pwr_up - 1].delay +=
+ wait_cmd->delay;
+ else
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ } else if (last_cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+ if (pwr_down > 0)
+ power_info->
+ power_down_setting
+ [pwr_down - 1].delay +=
+ wait_cmd->delay;
+ else
+ CAM_ERR(CAM_SENSOR,
+ "Delay is expected only after valid power up setting");
+ }
+ } else
+ CAM_DBG(CAM_SENSOR, "Invalid op code: %d",
+ wait_cmd->op_code);
+ tot_size = tot_size +
+ sizeof(struct cam_cmd_unconditional_wait);
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR, "Command Buffer is wrong");
+ return -EINVAL;
+ }
+ scr = (void *) (wait_cmd);
+ ptr = (void *)
+ (scr +
+ sizeof(struct cam_cmd_unconditional_wait));
+ CAM_DBG(CAM_SENSOR, "ptr: %pK sizeof: %d Next: %pK",
+ scr, (int32_t)sizeof(
+ struct cam_cmd_unconditional_wait), ptr);
+
+ cmm_hdr = (struct common_header *)ptr;
+ } else if (cmm_hdr->cmd_type ==
+ CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+ struct cam_cmd_power *pwr_cmd =
+ (struct cam_cmd_power *)ptr;
+
+ scr = ptr + sizeof(struct cam_cmd_power);
+ tot_size = tot_size + sizeof(struct cam_cmd_power);
+ power_info->power_down_setting_size += pwr_cmd->count;
+
+ if (pwr_cmd->count == 0)
+ CAM_ERR(CAM_SENSOR, "Invalid Command");
+
+ for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+ power_info->
+ power_down_setting[pwr_down].
+ seq_type =
+ pwr_cmd->power_settings[i].
+ power_seq_type;
+ power_info->
+ power_down_setting[pwr_down].
+ config_val =
+ pwr_cmd->power_settings[i].
+ config_val_low;
+ power_info->
+ power_down_setting[pwr_down].delay = 0;
+ if (i) {
+ scr = scr +
+ sizeof(
+ struct cam_power_settings);
+ tot_size =
+ tot_size +
+ sizeof(
+ struct cam_power_settings);
+ }
+ if (tot_size > cmd_length) {
+ CAM_ERR(CAM_SENSOR,
+ "Command Buffer is wrong");
+ rc = -EINVAL;
+ goto free_power_down_settings;
+ }
+ CAM_DBG(CAM_SENSOR,
+ "Seq Type[%d]: %d Config_val: %ld",
+ pwr_down,
+ power_info->
+ power_down_setting[pwr_down].
+ seq_type,
+ power_info->
+ power_down_setting[pwr_down].
+ config_val);
+ }
+ last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+ ptr = (void *) scr;
+ cmm_hdr = (struct common_header *)ptr;
+ } else {
+ CAM_ERR(CAM_SENSOR,
+ "Error: Un expected Header Type: %d",
+ cmm_hdr->cmd_type);
+ }
+ }
+
+ return rc;
+free_power_down_settings:
+ kfree(power_info->power_down_setting);
+free_power_settings:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0, i;
+ int count = 0;
+ const char *seq_name = NULL;
+ uint32_t *array = NULL;
+ struct cam_sensor_power_setting *ps;
+ int c, end;
+
+ if (!power_info)
+ return -EINVAL;
+
+ count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+ power_info->power_setting_size = count;
+
+ CAM_DBG(CAM_SENSOR, "qcom,cam-power-seq-type count %d", count);
+
+ if (count <= 0)
+ return 0;
+
+ ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+ power_info->power_setting = ps;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-type", i, &seq_name);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_name[%d] = %s", i, seq_name);
+ if (!strcmp(seq_name, "cam_vio")) {
+ ps[i].seq_type = SENSOR_VIO;
+ } else if (!strcmp(seq_name, "cam_vana")) {
+ ps[i].seq_type = SENSOR_VANA;
+ } else if (!strcmp(seq_name, "cam_clk")) {
+ ps[i].seq_type = SENSOR_MCLK;
+ } else {
+ CAM_ERR(CAM_SENSOR, "unrecognized seq-type %s",
+ seq_name);
+ rc = -EILSEQ;
+ goto ERROR1;
+ }
+ CAM_DBG(CAM_SENSOR, "seq_type[%d] %d", i, ps[i].seq_type);
+ }
+
+ array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!array) {
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed ");
+ goto ERROR2;
+ }
+
+ for (i = 0; i < count; i++) {
+ ps[i].config_val = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].config_val = %ld", i,
+ ps[i].config_val);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+ array, count);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ ps[i].delay = array[i];
+ CAM_DBG(CAM_SENSOR, "power_setting[%d].delay = %d", i,
+ ps[i].delay);
+ }
+ kfree(array);
+
+ power_info->power_down_setting =
+ kzalloc(sizeof(*ps) * count, GFP_KERNEL);
+
+ if (!power_info->power_down_setting) {
+ CAM_ERR(CAM_SENSOR, "failed");
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ power_info->power_down_setting_size = count;
+
+ end = count - 1;
+
+ for (c = 0; c < count; c++) {
+ power_info->power_down_setting[c] = ps[end];
+ end--;
+ }
+ return rc;
+ERROR2:
+ kfree(array);
+ERROR1:
+ kfree(ps);
+ return rc;
+}
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info)
+{
+ int rc = 0, val = 0;
+ uint32_t gpio_array_size;
+ struct platform_device *pdev = NULL;
+ struct device_node *of_node = NULL;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ gconf = soc_info->gpio_data;
+ if (!gconf) {
+ CAM_ERR(CAM_SENSOR, "No gpio_common_table is found");
+ return -EINVAL;
+ }
+
+ if (!gconf->cam_gpio_common_tbl) {
+ CAM_ERR(CAM_SENSOR, "gpio_common_table is not initialized");
+ return -EINVAL;
+ }
+
+ gpio_array_size = gconf->cam_gpio_common_tbl_size;
+
+ if (!gpio_array_size) {
+ CAM_ERR(CAM_SENSOR, "invalid size of gpio table");
+ return -EINVAL;
+ }
+
+ *pgpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+ GFP_KERNEL);
+ if (!*pgpio_num_info)
+ return -ENOMEM;
+ gpio_num_info = *pgpio_num_info;
+
+ rc = of_property_read_u32(of_node, "gpio-vana", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vana failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vana invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vana invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VANA] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VANA] = 1;
- CDBG("%s qcom,gpio-vana %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VANA]);
+ gpio_num_info->gpio_num[SENSOR_VANA] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VANA] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vana %d",
+ gpio_num_info->gpio_num[SENSOR_VANA]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+ rc = of_property_read_u32(of_node, "gpio-vio", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vio failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vio invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vio invalid %d", val);
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VIO] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VIO] = 1;
- CDBG("%s qcom,gpio-vio %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VIO]);
+ gpio_num_info->gpio_num[SENSOR_VIO] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VIO] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vio %d",
+ gpio_num_info->gpio_num[SENSOR_VIO]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+ rc = of_property_read_u32(of_node, "gpio-vaf", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vaf failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vaf invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VAF] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VAF] = 1;
- CDBG("%s qcom,gpio-vaf %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VAF]);
+ gpio_num_info->gpio_num[SENSOR_VAF] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vaf %d",
+ gpio_num_info->gpio_num[SENSOR_VAF]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+ rc = of_property_read_u32(of_node, "gpio-vdig", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-vdig failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-vdig invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VDIG] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VDIG] = 1;
- CDBG("%s qcom,gpio-vdig %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VDIG]);
+ gpio_num_info->gpio_num[SENSOR_VDIG] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VDIG] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-vdig %d",
+ gpio_num_info->gpio_num[SENSOR_VDIG]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+ rc = of_property_read_u32(of_node, "gpio-reset", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR, "read gpio-reset failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-reset invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-reset invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_RESET] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_RESET] = 1;
- CDBG("%s qcom,gpio-reset %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_RESET]);
+ gpio_num_info->gpio_num[SENSOR_RESET] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_RESET] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-reset %d",
+ gpio_num_info->gpio_num[SENSOR_RESET]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+ rc = of_property_read_u32(of_node, "gpio-standby", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-standby failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-standby invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-standby invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_STANDBY] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_STANDBY] = 1;
- CDBG("%s qcom,gpio-standby %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_STANDBY]);
+ gpio_num_info->gpio_num[SENSOR_STANDBY] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_STANDBY] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-standby %d",
+ gpio_num_info->gpio_num[SENSOR_STANDBY]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+ rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-af-pwdm failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-af-pwdm invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
- CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-af-pwdm %d",
+ gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+ rc = of_property_read_u32(of_node, "gpio-custom1", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom1 failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-custom1 invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
- CDBG("%s qcom,gpio-custom1 %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-custom1 %d",
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
}
- rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+ rc = of_property_read_u32(of_node, "gpio-custom2", &val);
if (rc != -EINVAL) {
if (rc < 0) {
- pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
- __func__, __LINE__, rc);
+ CAM_ERR(CAM_SENSOR,
+ "read gpio-custom2 failed rc %d", rc);
goto free_gpio_info;
} else if (val >= gpio_array_size) {
- pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
- __func__, __LINE__, val);
+ CAM_ERR(CAM_SENSOR, "gpio-custom2 invalid %d", val);
rc = -EINVAL;
goto free_gpio_info;
}
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
- gpio_array[val];
- gconf->gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
- CDBG("%s qcom,gpio-custom2 %d\n", __func__,
- gconf->gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+ gconf->cam_gpio_common_tbl[val].gpio;
+ gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+
+ CAM_DBG(CAM_SENSOR, "gpio-custom2 %d",
+ gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
} else {
rc = 0;
}
@@ -753,142 +1059,8 @@
return rc;
free_gpio_info:
- kfree(gconf->gpio_num_info);
- gconf->gpio_num_info = NULL;
- return rc;
-}
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
- struct camera_vreg_t **cam_vreg, int *num_vreg)
-{
- int rc = 0, i = 0;
- int32_t count = 0;
- uint32_t *vreg_array = NULL;
- struct camera_vreg_t *vreg = NULL;
-
- count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
- CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
-
- if (!count || (count == -EINVAL)) {
- pr_err("%s:%d number of entries is 0 or not present in dts\n",
- __func__, __LINE__);
- *num_vreg = 0;
- return 0;
- }
-
- vreg = kcalloc(count, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
- return -ENOMEM;
-
- *cam_vreg = vreg;
- *num_vreg = count;
- for (i = 0; i < count; i++) {
- rc = of_property_read_string_index(of_node,
- "qcom,cam-vreg-name", i,
- &vreg[i].reg_name);
- CDBG("%s reg_name[%d] = %s\n", __func__, i,
- vreg[i].reg_name);
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg;
- }
- }
-
- vreg_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
- if (!vreg_array) {
- rc = -ENOMEM;
- goto free_vreg;
- }
-
- for (i = 0; i < count; i++)
- vreg[i].type = VREG_TYPE_DEFAULT;
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].type = vreg_array[i];
- CDBG("%s cam_vreg[%d].type = %d\n",
- __func__, i, vreg[i].type);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].min_voltage = vreg_array[i];
- CDBG("%s cam_vreg[%d].min_voltage = %d\n",
- __func__, i, vreg[i].min_voltage);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].max_voltage = vreg_array[i];
- CDBG("%s cam_vreg[%d].max_voltage = %d\n",
- __func__, i, vreg[i].max_voltage);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
- vreg_array, count);
- if (rc != -EINVAL) {
- if (rc < 0) {
- pr_err("%s failed %d\n", __func__, __LINE__);
- goto free_vreg_array;
- } else {
- for (i = 0; i < count; i++) {
- vreg[i].op_mode = vreg_array[i];
- CDBG("%s cam_vreg[%d].op_mode = %d\n",
- __func__, i, vreg[i].op_mode);
- }
- }
- } else {
- CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
- __func__, __LINE__);
- rc = 0;
- }
-
- kfree(vreg_array);
-
- return rc;
-
-free_vreg_array:
- kfree(vreg_array);
-free_vreg:
- kfree(vreg);
- *num_vreg = 0;
-
+ kfree(gpio_num_info);
+ gpio_num_info = NULL;
return rc;
}
@@ -897,189 +1069,155 @@
sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
- pr_err("%s:%d Getting pinctrl handle failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Getting pinctrl handle failed");
return -EINVAL;
}
sensor_pctrl->gpio_state_active =
pinctrl_lookup_state(sensor_pctrl->pinctrl,
CAM_SENSOR_PINCTRL_STATE_DEFAULT);
if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
- pr_err("%s:%d Failed to get the active state pinctrl handle\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the active state pinctrl handle");
return -EINVAL;
}
sensor_pctrl->gpio_state_suspend
= pinctrl_lookup_state(sensor_pctrl->pinctrl,
CAM_SENSOR_PINCTRL_STATE_SLEEP);
if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
- pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "Failed to get the suspend state pinctrl handle");
return -EINVAL;
}
return 0;
}
-
int msm_cam_sensor_handle_reg_gpio(int seq_type,
- struct msm_camera_gpio_conf *gconf, int val)
+ struct msm_camera_gpio_num_info *gpio_num_info, int val)
{
-
int gpio_offset = -1;
- if (!gconf) {
- pr_err("ERR:%s: Input Parameters are not proper\n", __func__);
+ if (!gpio_num_info) {
+ CAM_ERR(CAM_SENSOR, "Input Parameters are not proper");
return -EINVAL;
}
- CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
- seq_type, val);
+
+ CAM_DBG(CAM_SENSOR, "Seq type: %d, config: %d", seq_type, val);
gpio_offset = seq_type;
- if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
- CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
- __func__, __LINE__, gpio_offset, seq_type);
+ if (gpio_num_info->valid[gpio_offset] == 1) {
+ CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
+ gpio_offset, seq_type);
gpio_set_value_cansleep(
- gconf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[gpio_offset], val);
}
return 0;
}
-int32_t msm_sensor_driver_get_gpio_data(
- struct msm_camera_gpio_conf **gpio_conf,
- struct device_node *of_node)
-{
- int32_t rc = 0, i = 0;
- uint16_t *gpio_array = NULL;
- int16_t gpio_array_size = 0;
- struct msm_camera_gpio_conf *gconf = NULL;
-
- /* Validate input parameters */
- if (!of_node) {
- pr_err("failed: invalid param of_node %pK", of_node);
- return -EINVAL;
- }
-
- gpio_array_size = of_gpio_count(of_node);
- CDBG("gpio count %d\n", gpio_array_size);
- if (gpio_array_size <= 0)
- return 0;
-
- gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
- if (!gconf)
- return -ENOMEM;
-
- *gpio_conf = gconf;
-
- gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
- if (!gpio_array)
- goto free_gpio_conf;
-
- for (i = 0; i < gpio_array_size; i++) {
- gpio_array[i] = of_get_gpio(of_node, i);
- CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
- }
- rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
- gpio_array_size);
- if (rc < 0) {
- pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
- goto free_gpio_array;
- }
-
- rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
- gpio_array_size);
- if (rc < 0) {
- pr_err("failed in msm_camera_init_gpio_pin_tbl\n");
- goto free_gpio_req_tbl;
- }
- kfree(gpio_array);
-
- return rc;
-
-free_gpio_req_tbl:
- kfree(gconf->cam_gpio_req_tbl);
-free_gpio_array:
- kfree(gpio_array);
-free_gpio_conf:
- kfree(gconf);
- *gpio_conf = NULL;
-
- return rc;
-}
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl)
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
{
int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0;
+ int32_t vreg_idx = -1;
struct cam_sensor_power_setting *power_setting = NULL;
- struct camera_vreg_t *cam_vreg;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
- CDBG("%s:%d\n", __func__, __LINE__);
+ CAM_DBG(CAM_SENSOR, "Enter");
if (!ctrl) {
- pr_err("failed ctrl %pK\n", ctrl);
+ CAM_ERR(CAM_SENSOR, "Invalid ctrl handle");
return -EINVAL;
}
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
- if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
- CDBG("%s:%d mux install\n", __func__, __LINE__);
+ if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+ CAM_ERR(CAM_SENSOR, "Regulators are not initialized");
+ return -EINVAL;
+ }
ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
if (ret < 0) {
- pr_err("%s:%d Initialization of pinctrl failed\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "Initialization of pinctrl failed");
ctrl->cam_pinctrl_status = 0;
} else {
ctrl->cam_pinctrl_status = 1;
}
- rc = msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+
+ rc = cam_sensor_util_request_gpio_table(soc_info, 1);
if (rc < 0)
no_gpio = rc;
+
if (ctrl->cam_pinctrl_status) {
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_active);
if (ret)
- pr_err("%s:%d cannot set pin to active state",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
}
for (index = 0; index < ctrl->power_setting_size; index++) {
- CDBG("%s index %d\n", __func__, index);
+ CAM_DBG(CAM_SENSOR, "index: %d", index);
power_setting = &ctrl->power_setting[index];
+ CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
switch (power_setting->seq_type) {
case SENSOR_MCLK:
- if (power_setting->seq_val >= ctrl->clk_info_size) {
- pr_err("%s:%d :Error: clk index %d >= max %zu\n",
- __func__, __LINE__,
+ if (power_setting->seq_val >= soc_info->num_clk) {
+ CAM_ERR(CAM_SENSOR, "clk index %d >= max %u",
power_setting->seq_val,
- ctrl->clk_info_size);
+ soc_info->num_clk);
goto power_up_failed;
}
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name,
+ if (!strcmp(soc_info->rgltr_name[j],
"cam_clk")) {
- CDBG("%s:%d Enable cam_clk: %d\n",
- __func__, __LINE__, j);
- msm_camera_config_single_vreg(ctrl->dev,
- &cam_vreg[j],
- (struct regulator **)
- &power_setting->data[0],
- 1);
+ CAM_DBG(CAM_SENSOR,
+ "Enable cam_clk: %d", j);
+
+ soc_info->rgltr[j] =
+ regulator_get(
+ &soc_info->pdev->dev,
+ soc_info->rgltr_name[j]);
+
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[j])) {
+ rc = PTR_ERR(
+ soc_info->rgltr[j]);
+ rc = rc ? rc : -EINVAL;
+ CAM_ERR(CAM_SENSOR,
+ "vreg %s %d",
+ soc_info->rgltr_name[j],
+ rc);
+ soc_info->rgltr[j] = NULL;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[j];
}
}
if (power_setting->config_val)
- ctrl->clk_info[power_setting->seq_val].
- clk_rate = power_setting->config_val;
- rc = msm_camera_clk_enable(ctrl->dev,
- ctrl->clk_info, ctrl->clk_ptr,
- ctrl->clk_info_size, true);
+ soc_info->clk_rate[0][power_setting->seq_val] =
+ power_setting->config_val;
+
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = cam_soc_util_clk_enable(soc_info->clk[j],
+ soc_info->clk_name[j],
+ soc_info->clk_rate[0][j]);
+ if (rc)
+ break;
+ }
+
if (rc < 0) {
- pr_err("%s: clk enable failed\n", __func__);
+ CAM_ERR(CAM_SENSOR, "clk enable failed");
goto power_up_failed;
}
break;
@@ -1088,27 +1226,26 @@
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
if (no_gpio) {
- pr_err("%s: request gpio failed\n", __func__);
+ CAM_ERR(CAM_SENSOR, "request gpio failed");
return no_gpio;
}
if (power_setting->seq_val >= CAM_VREG_MAX ||
- !ctrl->gpio_conf->gpio_num_info) {
- pr_err("%s gpio index %d >= max %d\n", __func__,
+ !gpio_num_info) {
+ CAM_ERR(CAM_SENSOR, "gpio index %d >= max %d",
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
- CDBG("%s:%d gpio set val %d\n",
- __func__, __LINE__,
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ CAM_DBG(CAM_SENSOR, "gpio set val %d",
+ gpio_num_info->gpio_num
[power_setting->seq_val]);
rc = msm_cam_sensor_handle_reg_gpio(
power_setting->seq_type,
- ctrl->gpio_conf, 1);
+ gpio_num_info, 1);
if (rc < 0) {
- pr_err("ERR:%s Error in handling VREG GPIO\n",
- __func__);
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
goto power_up_failed;
}
break;
@@ -1123,34 +1260,56 @@
break;
if (power_setting->seq_val >= CAM_VREG_MAX) {
- pr_err("%s vreg index %d >= max %d\n", __func__,
+ CAM_ERR(CAM_SENSOR, "vreg index %d >= max %d",
power_setting->seq_val,
CAM_VREG_MAX);
goto power_up_failed;
}
- if (power_setting->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [power_setting->seq_val],
- (struct regulator **)
- &power_setting->data[0],
- 1);
+ if (power_setting->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR, "Enable Regulator");
+ vreg_idx = power_setting->seq_val;
+
+ soc_info->rgltr[vreg_idx] =
+ regulator_get(&soc_info->pdev->dev,
+ soc_info->rgltr_name[vreg_idx]);
+ if (IS_ERR_OR_NULL(
+ soc_info->rgltr[vreg_idx])) {
+ rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
+ rc = rc ? rc : -EINVAL;
+
+ CAM_ERR(CAM_SENSOR, "%s get failed %d",
+ soc_info->rgltr_name[vreg_idx],
+ rc);
+
+ soc_info->rgltr[vreg_idx] = NULL;
+ }
+
+ rc = cam_soc_util_regulator_enable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+ }
else
- pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
- __func__, __LINE__,
- power_setting->seq_val, ctrl->num_vreg);
+ CAM_ERR(CAM_SENSOR, "usr_idx:%d dts_idx:%d",
+ power_setting->seq_val, num_vreg);
rc = msm_cam_sensor_handle_reg_gpio(
power_setting->seq_type,
- ctrl->gpio_conf, 1);
+ gpio_num_info, 1);
if (rc < 0) {
- pr_err("ERR:%s Error in handling VREG GPIO\n",
- __func__);
+ CAM_ERR(CAM_SENSOR,
+ "Error in handling VREG GPIO");
goto power_up_failed;
}
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
power_setting->seq_type);
break;
}
@@ -1163,23 +1322,24 @@
return 0;
power_up_failed:
- pr_err("%s:%d failed\n", __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "failed");
for (index--; index >= 0; index--) {
- CDBG("%s index %d\n", __func__, index);
+ CAM_DBG(CAM_SENSOR, "index %d", index);
power_setting = &ctrl->power_setting[index];
- CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ CAM_DBG(CAM_SENSOR, "type %d",
+ power_setting->seq_type);
switch (power_setting->seq_type) {
case SENSOR_RESET:
case SENSOR_STANDBY:
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
- if (!ctrl->gpio_conf->gpio_num_info)
+ if (!gpio_num_info)
continue;
- if (!ctrl->gpio_conf->gpio_num_info->valid
+ if (!gpio_num_info->valid
[power_setting->seq_val])
continue;
gpio_set_value_cansleep(
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[power_setting->seq_val], GPIOF_OUT_INIT_LOW);
break;
case SENSOR_VANA:
@@ -1189,23 +1349,32 @@
case SENSOR_VAF_PWDM:
case SENSOR_CUSTOM_REG1:
case SENSOR_CUSTOM_REG2:
- if (power_setting->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [power_setting->seq_val],
- (struct regulator **)
- &power_setting->data[0],
- 0);
+ if (power_setting->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR, "Disable Regulator");
+ vreg_idx = power_setting->seq_val;
+
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[vreg_idx],
+ soc_info->rgltr_name[vreg_idx],
+ soc_info->rgltr_min_volt[vreg_idx],
+ soc_info->rgltr_max_volt[vreg_idx],
+ soc_info->rgltr_op_mode[vreg_idx],
+ soc_info->rgltr_delay[vreg_idx]);
+
+ power_setting->data[0] =
+ soc_info->rgltr[vreg_idx];
+
+ }
else
- pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
- __func__, __LINE__,
- power_setting->seq_val, ctrl->num_vreg);
+ CAM_ERR(CAM_SENSOR, "seq_val:%d > num_vreg: %d",
+ power_setting->seq_val, num_vreg);
msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
- ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
power_setting->seq_type);
break;
}
@@ -1220,14 +1389,12 @@
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
- pr_err("%s:%d cannot set pin to suspend state\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
}
ctrl->cam_pinctrl_status = 0;
- msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
return rc;
}
@@ -1254,19 +1421,18 @@
}
static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
- int32_t index)
+ struct cam_hw_soc_info *soc_info, int32_t index)
{
- struct camera_vreg_t *cam_vreg;
int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
struct cam_sensor_power_setting *ps = NULL;
struct cam_sensor_power_setting *pd = NULL;
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ num_vreg = soc_info->num_rgltr;
+
pd = &ctrl->power_down_setting[index];
for (j = 0; j < num_vreg; j++) {
- if (!strcmp(cam_vreg[j].reg_name, "cam_clk")) {
+ if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
ps = NULL;
for (idx = 0; idx <
@@ -1278,62 +1444,76 @@
}
}
- if (ps != NULL)
- msm_camera_config_single_vreg(
- ctrl->dev,
- &cam_vreg[j],
- (struct regulator **)
- &ps->data[0], 0);
+ if (ps != NULL) {
+ CAM_DBG(CAM_SENSOR, "Disable Regulator");
+
+ rc = cam_soc_util_regulator_disable(
+ soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+
+ ps->data[0] =
+ soc_info->rgltr[j];
+ }
}
}
return rc;
}
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl)
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info)
{
- int index = 0, ret = 0, num_vreg = 0;
+ int index = 0, ret = 0, num_vreg = 0, i;
struct cam_sensor_power_setting *pd = NULL;
struct cam_sensor_power_setting *ps;
- struct camera_vreg_t *cam_vreg;
+ struct msm_camera_gpio_num_info *gpio_num_info = NULL;
- CDBG("%s:%d\n", __func__, __LINE__);
- if (!ctrl) {
- pr_err("failed ctrl %pK\n", ctrl);
+ CAM_DBG(CAM_SENSOR, "Enter");
+ if (!ctrl || !soc_info) {
+ CAM_ERR(CAM_SENSOR, "failed ctrl %pK", ctrl);
return -EINVAL;
}
- cam_vreg = ctrl->cam_vreg;
- num_vreg = ctrl->num_vreg;
+ gpio_num_info = ctrl->gpio_num_info;
+ num_vreg = soc_info->num_rgltr;
for (index = 0; index < ctrl->power_down_setting_size; index++) {
- CDBG("%s index %d\n", __func__, index);
+ CAM_DBG(CAM_SENSOR, "index %d", index);
pd = &ctrl->power_down_setting[index];
ps = NULL;
- CDBG("%s type %d\n", __func__, pd->seq_type);
+ CAM_DBG(CAM_SENSOR, "type %d", pd->seq_type);
switch (pd->seq_type) {
case SENSOR_MCLK:
- ret = cam_config_mclk_reg(ctrl, index);
+ ret = cam_config_mclk_reg(ctrl, soc_info, index);
if (ret < 0) {
- pr_err("%s:%d :Error: in config clk reg\n",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR,
+ "config clk reg failed rc: %d", ret);
return ret;
}
- msm_camera_clk_enable(ctrl->dev,
- ctrl->clk_info, ctrl->clk_ptr,
- ctrl->clk_info_size, false);
+ //cam_soc_util_clk_disable_default(soc_info);
+ for (i = soc_info->num_clk - 1; i >= 0; i--) {
+ cam_soc_util_clk_disable(soc_info->clk[i],
+ soc_info->clk_name[i]);
+ }
+
break;
case SENSOR_RESET:
case SENSOR_STANDBY:
case SENSOR_CUSTOM_GPIO1:
case SENSOR_CUSTOM_GPIO2:
- if (!ctrl->gpio_conf->gpio_num_info->valid
- [pd->seq_val])
+
+ if (!gpio_num_info->valid[pd->seq_val])
continue;
+
gpio_set_value_cansleep(
- ctrl->gpio_conf->gpio_num_info->gpio_num
+ gpio_num_info->gpio_num
[pd->seq_val],
(int) pd->config_val);
+
break;
case SENSOR_VANA:
case SENSOR_VDIG:
@@ -1344,32 +1524,43 @@
case SENSOR_CUSTOM_REG2:
if (pd->seq_val == INVALID_VREG)
break;
+
ps = msm_camera_get_power_settings(
ctrl, pd->seq_type,
pd->seq_val);
if (ps) {
- if (pd->seq_val < ctrl->num_vreg)
- msm_camera_config_single_vreg(ctrl->dev,
- &ctrl->cam_vreg
- [pd->seq_val],
- (struct regulator **)
- &ps->data[0],
- 0);
+ if (pd->seq_val < num_vreg) {
+ CAM_DBG(CAM_SENSOR,
+ "Disable Regulator");
+ ret = cam_soc_util_regulator_disable(
+ soc_info->rgltr[ps->seq_val],
+ soc_info->rgltr_name[ps->seq_val],
+ soc_info->rgltr_min_volt[ps->seq_val],
+ soc_info->rgltr_max_volt[ps->seq_val],
+ soc_info->rgltr_op_mode[ps->seq_val],
+ soc_info->rgltr_delay[ps->seq_val]);
+
+ ps->data[0] =
+ soc_info->rgltr[ps->seq_val];
+ }
else
- pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
- __func__, __LINE__, pd->seq_val,
- ctrl->num_vreg);
+ CAM_ERR(CAM_SENSOR,
+ "seq_val:%d > num_vreg: %d",
+ pd->seq_val,
+ num_vreg);
} else
- pr_err("%s error in power up/down seq data\n",
- __func__);
+ CAM_ERR(CAM_SENSOR,
+ "error in power up/down seq");
+
ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
- ctrl->gpio_conf, GPIOF_OUT_INIT_LOW);
+ gpio_num_info, GPIOF_OUT_INIT_LOW);
+
if (ret < 0)
- pr_err("ERR:%s Error while disabling VREG GPIO\n",
- __func__);
+ CAM_ERR(CAM_SENSOR,
+ "Error disabling VREG GPIO");
break;
default:
- pr_err("%s error power seq type %d\n", __func__,
+ CAM_ERR(CAM_SENSOR, "error power seq type %d",
pd->seq_type);
break;
}
@@ -1384,15 +1575,13 @@
ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
- pr_err("%s:%d cannot set pin to suspend state",
- __func__, __LINE__);
+ CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
}
ctrl->cam_pinctrl_status = 0;
- msm_camera_request_gpio_table(
- ctrl->gpio_conf->cam_gpio_req_tbl,
- ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+
+ cam_sensor_util_request_gpio_table(soc_info, 0);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 7e7fc35..8a26369 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -22,41 +22,38 @@
#include <cam_req_mgr_util.h>
#include <cam_req_mgr_interface.h>
#include <cam_mem_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
#define INVALID_VREG 100
-int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
- struct camera_vreg_t *cam_vreg, int num_vreg,
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+ struct cam_hw_soc_info *soc_info,
struct cam_sensor_power_ctrl_t *power_info);
-int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size);
-
-int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
- struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
- uint16_t gpio_array_size);
-
-int cam_sensor_get_dt_vreg_data(struct device_node *of_node,
- struct camera_vreg_t **cam_vreg, int *num_vreg);
-
-int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl);
-
-int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
- int num_vreg, struct cam_sensor_power_setting *power_setting,
- uint16_t power_setting_size);
-
int msm_camera_pinctrl_init
(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
-int32_t msm_sensor_driver_get_gpio_data(
- struct msm_camera_gpio_conf **gpio_conf,
- struct device_node *of_node);
-
int cam_sensor_i2c_pkt_parser(struct i2c_settings_array *i2c_reg_settings,
struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
int32_t delete_request(struct i2c_settings_array *i2c_array);
+int cam_sensor_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, int gpio_en);
+
+int cam_sensor_util_init_gpio_pin_tbl(
+ struct cam_hw_soc_info *soc_info,
+ struct msm_camera_gpio_num_info **pgpio_num_info);
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+ struct cam_hw_soc_info *soc_info);
+
+int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
+ struct cam_sensor_power_setting *power_setting,
+ uint16_t power_setting_size);
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+ int cmd_length, struct cam_sensor_power_ctrl_t *power_info);
#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
index 3619da7..e17dac6 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/Makefile
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ca0dfac..ff7a0e5 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
-
#include <linux/module.h>
#include <linux/dma-buf.h>
#include <asm/dma-iommu.h>
@@ -25,6 +23,7 @@
#include <linux/genalloc.h>
#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
#define SHARED_MEM_POOL_GRANULARITY 12
@@ -39,12 +38,6 @@
#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
-#ifdef CONFIG_CAM_SMMU_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
struct firmware_alloc_info {
struct device *fw_dev;
void *fw_kva;
@@ -223,7 +216,7 @@
mutex_lock(&iommu_cb_set.payload_list_lock);
if (list_empty(&iommu_cb_set.payload_list)) {
- pr_err("Payload list empty\n");
+ CAM_ERR(CAM_SMMU, "Payload list empty");
mutex_unlock(&iommu_cb_set.payload_list_lock);
return;
}
@@ -256,10 +249,11 @@
{
struct cam_dma_buff_info *mapping;
- pr_err("index = %d\n", idx);
+ CAM_ERR(CAM_SMMU, "index = %d", idx);
list_for_each_entry(mapping,
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
- pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+ CAM_ERR(CAM_SMMU,
+ "ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
mapping->ion_fd, (void *)mapping->paddr,
(unsigned int)mapping->len,
mapping->region_id);
@@ -271,10 +265,10 @@
int i;
for (i = 0; i < iommu_cb_set.cb_num; i++) {
- pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+ CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i,
(int)iommu_cb_set.cb_info[i].handle,
(void *)iommu_cb_set.cb_info[i].name);
- pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+ CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev);
}
}
@@ -290,18 +284,21 @@
end_addr = (unsigned long)mapping->paddr + mapping->len;
if (start_addr <= current_addr && current_addr < end_addr) {
- pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+ CAM_ERR(CAM_SMMU,
+ "va %pK valid: range:%pK-%pK, fd = %d cb: %s",
vaddr, (void *)start_addr, (void *)end_addr,
mapping->ion_fd,
iommu_cb_set.cb_info[idx].name);
goto end;
} else {
- CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+ CAM_DBG(CAM_SMMU,
+ "va %pK is not in this range: %pK-%pK, fd = %d",
vaddr, (void *)start_addr, (void *)end_addr,
mapping->ion_fd);
}
}
- pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+ CAM_ERR(CAM_SMMU,
+ "Cannot find vaddr:%pK in SMMU %s uses invalid virt address",
vaddr, iommu_cb_set.cb_info[idx].name);
end:
return;
@@ -315,20 +312,22 @@
int idx, i = 0;
if (!token || (handle == HANDLE_INIT)) {
- pr_err("Error: token is NULL or invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
return;
}
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return;
@@ -336,7 +335,8 @@
if (client_page_fault_handler) {
if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
- pr_err("%s Should not regiester more handlers\n",
+ CAM_ERR(CAM_SMMU,
+ "%s Should not regiester more handlers",
iommu_cb_set.cb_info[idx].name);
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return;
@@ -361,7 +361,8 @@
}
}
if (i == CAM_SMMU_CB_MAX)
- pr_err("Error: hdl %x no matching tokens: %s\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl %x no matching tokens: %s",
handle, iommu_cb_set.cb_info[idx].name);
}
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -376,9 +377,10 @@
struct cam_smmu_work_payload *payload;
if (!token) {
- pr_err("Error: token is NULL\n");
- pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
- pr_err("iova = %lX, flags = %d\n", iova, flags);
+ CAM_ERR(CAM_SMMU, "Error: token is NULL");
+ CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
+ domain, dev);
+ CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
return 0;
}
@@ -390,7 +392,8 @@
}
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: index is not valid, index = %d, token = %s\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: index is not valid, index = %d, token = %s",
idx, cb_name);
return 0;
}
@@ -427,7 +430,7 @@
return IOMMU_READ|IOMMU_WRITE;
case CAM_SMMU_MAP_INVALID:
default:
- pr_err("Error: Direction is invalid. dir = %d\n", dir);
+ CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
break;
};
return IOMMU_INVALID_DIR;
@@ -445,7 +448,8 @@
return DMA_BIDIRECTIONAL;
case CAM_SMMU_MAP_INVALID:
default:
- pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+ CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d",
+ (int)dir);
break;
}
return DMA_NONE;
@@ -478,7 +482,8 @@
int i;
if (hdl == HANDLE_INIT) {
- CDBG("iommu handle is init number. Need to try again\n");
+ CAM_DBG(CAM_SMMU,
+ "iommu handle is init number. Need to try again");
return 1;
}
@@ -487,7 +492,8 @@
continue;
if (iommu_cb_set.cb_info[i].handle == hdl) {
- CDBG("iommu handle %d conflicts\n", (int)hdl);
+ CAM_DBG(CAM_SMMU, "iommu handle %d conflicts",
+ (int)hdl);
return 1;
}
}
@@ -503,7 +509,7 @@
get_random_bytes(&rand, COOKIE_NUM_BYTE);
hdl = GET_SMMU_HDL(idx, rand);
- CDBG("create handle value = %x\n", (int)hdl);
+ CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl);
return hdl;
}
@@ -515,7 +521,8 @@
/* attach the mapping to device */
rc = arm_iommu_attach_device(cb->dev, cb->mapping);
if (rc < 0) {
- pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+ CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d",
+ rc);
rc = -ENODEV;
}
@@ -533,7 +540,8 @@
if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
mutex_lock(&iommu_cb_set.cb_info[i].lock);
if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
- pr_err("Error: %s already got handle 0x%x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: %s already got handle 0x%x",
name,
iommu_cb_set.cb_info[i].handle);
mutex_unlock(&iommu_cb_set.cb_info[i].lock);
@@ -549,14 +557,15 @@
iommu_cb_set.cb_info[i].handle = handle;
iommu_cb_set.cb_info[i].cb_count = 0;
*hdl = handle;
- CDBG("%s creates handle 0x%x\n", name, handle);
+ CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
+ name, handle);
mutex_unlock(&iommu_cb_set.cb_info[i].lock);
return 0;
}
}
- pr_err("Error: Cannot find name %s or all handle exist!\n",
- name);
+ CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist",
+ name);
cam_smmu_print_table();
return -EINVAL;
}
@@ -571,7 +580,8 @@
if (!count) {
err = -EINVAL;
- pr_err("Page count is zero, size passed = %zu\n", size);
+ CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu",
+ size);
goto bail;
}
@@ -630,12 +640,12 @@
(1 << mapping->order) - 1) >> mapping->order;
if (!addr) {
- pr_err("Error: Invalid address\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid address");
return -EINVAL;
}
if (start + count > mapping->bits) {
- pr_err("Error: Invalid page bits in scratch map\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map");
return -EINVAL;
}
@@ -657,13 +667,13 @@
list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
list) {
if (mapping->paddr == virt_addr) {
- CDBG("Found virtual address %lx\n",
+ CAM_DBG(CAM_SMMU, "Found virtual address %lx",
(unsigned long)virt_addr);
return mapping;
}
}
- pr_err("Error: Cannot find virtual address %lx by index %d\n",
+ CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d",
(unsigned long)virt_addr, idx);
return NULL;
}
@@ -676,12 +686,12 @@
list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
list) {
if (mapping->ion_fd == ion_fd) {
- CDBG(" find ion_fd %d\n", ion_fd);
+ CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
return mapping;
}
}
- pr_err("Error: Cannot find fd %d by index %d\n",
+ CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
ion_fd, idx);
return NULL;
}
@@ -693,7 +703,7 @@
list_for_each_entry_safe(mapping_info, temp,
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
- CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+ CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
(void *)mapping_info->paddr, idx,
mapping_info->ion_fd);
@@ -708,10 +718,12 @@
idx);
if (ret < 0) {
- pr_err("Buffer delete failed: idx = %d\n", idx);
- pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
- (unsigned long)mapping_info->paddr,
- mapping_info->ion_fd);
+ CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
+ idx);
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete failed: addr = %lx, fd = %d",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->ion_fd);
/*
* Ignore this error and continue to delete other
* buffers in the list
@@ -730,13 +742,13 @@
} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
ret = cam_smmu_attach_device(idx);
if (ret < 0) {
- pr_err("Error: ATTACH fail\n");
+ CAM_ERR(CAM_SMMU, "Error: ATTACH fail");
return -ENODEV;
}
iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
ret = 0;
} else {
- pr_err("Error: Not detach/attach: %d\n",
+ CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d",
iommu_cb_set.cb_info[idx].state);
ret = -EINVAL;
}
@@ -768,28 +780,32 @@
uint32_t vaddr = 0;
if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
- pr_err("Error: Input args are invalid\n");
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
return -EINVAL;
}
- CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+ CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X",
+ size, smmu_hdl);
idx = GET_SMMU_TABLE_IDX(smmu_hdl);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, smmu_hdl);
return -EINVAL;
}
if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, smmu_hdl);
rc = -EINVAL;
goto get_addr_end;
}
if (!iommu_cb_set.cb_info[idx].shared_support) {
- pr_err("Error: Shared memory not supported for hdl = %X\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: Shared memory not supported for hdl = %X",
smmu_hdl);
rc = -EINVAL;
goto get_addr_end;
@@ -812,19 +828,21 @@
int idx;
if (!size || (smmu_hdl == HANDLE_INIT)) {
- pr_err("Error: Input args are invalid\n");
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(smmu_hdl);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, smmu_hdl);
return -EINVAL;
}
if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, smmu_hdl);
rc = -EINVAL;
goto get_addr_end;
@@ -848,45 +866,47 @@
struct iommu_domain *domain;
if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
- pr_err("Error: Input args are invalid\n");
+ CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(smmu_hdl);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, smmu_hdl);
rc = -EINVAL;
goto end;
}
if (!iommu_cb_set.cb_info[idx].firmware_support) {
- pr_err("Firmware memory not supported for this SMMU handle\n");
+ CAM_ERR(CAM_SMMU,
+ "Firmware memory not supported for this SMMU handle");
rc = -EINVAL;
goto end;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
- pr_err("Trying to allocate twice\n");
+ CAM_ERR(CAM_SMMU, "Trying to allocate twice");
rc = -ENOMEM;
goto unlock_and_end;
}
firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
- CDBG("Firmware area len from DT = %zu\n", firmware_len);
+ CAM_DBG(CAM_SMMU, "Firmware area len from DT = %zu", firmware_len);
icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
firmware_len,
&icp_fw.fw_dma_hdl,
GFP_KERNEL);
if (!icp_fw.fw_kva) {
- pr_err("FW memory alloc failed\n");
+ CAM_ERR(CAM_SMMU, "FW memory alloc failed");
rc = -ENOMEM;
goto unlock_and_end;
} else {
- CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+ CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK",
icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
}
@@ -898,7 +918,7 @@
IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
if (rc) {
- pr_err("Failed to map FW into IOMMU\n");
+ CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU");
rc = -ENOMEM;
goto alloc_fail;
}
@@ -933,27 +953,30 @@
size_t unmapped = 0;
if (smmu_hdl == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(smmu_hdl);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, smmu_hdl);
rc = -EINVAL;
goto end;
}
if (!iommu_cb_set.cb_info[idx].firmware_support) {
- pr_err("Firmware memory not supported for this SMMU handle\n");
+ CAM_ERR(CAM_SMMU,
+ "Firmware memory not supported for this SMMU handle");
rc = -EINVAL;
goto end;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
- pr_err("Trying to deallocate firmware that is not allocated\n");
+ CAM_ERR(CAM_SMMU,
+ "Trying to deallocate firmware that is not allocated");
rc = -ENOMEM;
goto unlock_and_end;
}
@@ -966,7 +989,7 @@
firmware_len);
if (unmapped != firmware_len) {
- pr_err("Only %zu unmapped out of total %zu\n",
+ CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
unmapped,
firmware_len);
rc = -EINVAL;
@@ -997,18 +1020,18 @@
struct cam_context_bank_info *cb = NULL;
if (!region_info) {
- pr_err("Invalid region_info pointer\n");
+ CAM_ERR(CAM_SMMU, "Invalid region_info pointer");
return -EINVAL;
}
if (smmu_hdl == HANDLE_INIT) {
- pr_err("Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Invalid handle");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(smmu_hdl);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x",
idx, smmu_hdl);
return -EINVAL;
}
@@ -1016,7 +1039,7 @@
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
cb = &iommu_cb_set.cb_info[idx];
if (!cb) {
- pr_err("SMMU context bank pointer invalid\n");
+ CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid");
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -EINVAL;
}
@@ -1024,7 +1047,7 @@
switch (region_id) {
case CAM_SMMU_REGION_FIRMWARE:
if (!cb->firmware_support) {
- pr_err("Firmware not supported\n");
+ CAM_ERR(CAM_SMMU, "Firmware not supported");
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -ENODEV;
}
@@ -1033,7 +1056,7 @@
break;
case CAM_SMMU_REGION_SHARED:
if (!cb->shared_support) {
- pr_err("Shared mem not supported\n");
+ CAM_ERR(CAM_SMMU, "Shared mem not supported");
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -ENODEV;
}
@@ -1042,7 +1065,7 @@
break;
case CAM_SMMU_REGION_SCRATCH:
if (!cb->scratch_buf_support) {
- pr_err("Scratch memory not supported\n");
+ CAM_ERR(CAM_SMMU, "Scratch memory not supported");
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -ENODEV;
}
@@ -1051,7 +1074,7 @@
break;
case CAM_SMMU_REGION_IO:
if (!cb->io_support) {
- pr_err("IO memory not supported\n");
+ CAM_ERR(CAM_SMMU, "IO memory not supported");
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -ENODEV;
}
@@ -1059,7 +1082,7 @@
region_info->iova_len = cb->io_info.iova_len;
break;
default:
- pr_err("Invalid region id: %d for smmu hdl: %X\n",
+ CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
smmu_hdl, region_id);
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -EINVAL;
@@ -1088,28 +1111,28 @@
buf = dma_buf_get(ion_fd);
if (IS_ERR_OR_NULL(buf)) {
rc = PTR_ERR(buf);
- pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+ CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
goto err_out;
}
attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
if (IS_ERR_OR_NULL(attach)) {
rc = PTR_ERR(attach);
- pr_err("Error: dma buf attach failed\n");
+ CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
goto err_put;
}
table = dma_buf_map_attachment(attach, dma_dir);
if (IS_ERR_OR_NULL(table)) {
rc = PTR_ERR(table);
- pr_err("Error: dma buf map attachment failed\n");
+ CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
goto err_detach;
}
if (region_id == CAM_SMMU_REGION_SHARED) {
domain = iommu_cb_set.cb_info[idx].mapping->domain;
if (!domain) {
- pr_err("CB has no domain set\n");
+ CAM_ERR(CAM_SMMU, "CB has no domain set");
goto err_unmap_sg;
}
@@ -1118,7 +1141,8 @@
&iova);
if (rc < 0) {
- pr_err("IOVA alloc failed for shared memory\n");
+ CAM_ERR(CAM_SMMU,
+ "IOVA alloc failed for shared memory");
goto err_unmap_sg;
}
@@ -1129,17 +1153,17 @@
IOMMU_READ | IOMMU_WRITE);
if (size < 0) {
- pr_err("IOMMU mapping failed\n");
+ CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
rc = cam_smmu_free_iova(iova,
size,
iommu_cb_set.cb_info[idx].handle);
if (rc)
- pr_err("IOVA free failed\n");
+ CAM_ERR(CAM_SMMU, "IOVA free failed");
rc = -ENOMEM;
goto err_unmap_sg;
} else {
- CDBG("iommu_map_sg returned %zu\n", size);
+ CAM_DBG(CAM_SMMU, "iommu_map_sg returned %zu", size);
*paddr_ptr = iova;
*len_ptr = size;
}
@@ -1148,7 +1172,7 @@
table->sgl, table->nents, dma_dir, buf);
if (rc != table->nents) {
- pr_err("Error: msm_dma_map_sg_lazy failed\n");
+ CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
rc = -ENOMEM;
goto err_unmap_sg;
} else {
@@ -1156,22 +1180,23 @@
*len_ptr = (size_t)sg_dma_len(table->sgl);
}
} else {
- pr_err("Error: Wrong region id passed for %s\n", __func__);
+ CAM_ERR(CAM_SMMU, "Error: Wrong region id passed");
rc = -EINVAL;
goto err_unmap_sg;
}
if (table->sgl) {
- CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
- (void *)buf,
- (void *)iommu_cb_set.cb_info[idx].dev,
- (void *)attach, (void *)table);
- CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
- (void *)table->sgl, rc,
- (unsigned int)table->sgl->dma_address);
+ CAM_DBG(CAM_SMMU,
+ "DMA buf: %pK, device: %pK, attach: %pK, table: %pK",
+ (void *)buf,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)attach, (void *)table);
+ CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x",
+ (void *)table->sgl, rc,
+ (unsigned int)table->sgl->dma_address);
} else {
rc = -EINVAL;
- pr_err("Error: table sgl is null\n");
+ CAM_ERR(CAM_SMMU, "Error: table sgl is null");
goto err_unmap_sg;
}
@@ -1192,13 +1217,13 @@
mapping_info->region_id = region_id;
if (!*paddr_ptr || !*len_ptr) {
- pr_err("Error: Space Allocation failed!\n");
+ CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
kfree(mapping_info);
rc = -ENOSPC;
goto err_alloc;
}
- CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
- (void *)iommu_cb_set.cb_info[idx].dev,
+ CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+ ion_fd, (void *)iommu_cb_set.cb_info[idx].dev,
(void *)*paddr_ptr, (unsigned int)*len_ptr);
/* add to the list */
@@ -1241,17 +1266,19 @@
if ((!mapping_info->buf) || (!mapping_info->table) ||
(!mapping_info->attach)) {
- pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params dev = %pK, table = %pK",
(void *)iommu_cb_set.cb_info[idx].dev,
(void *)mapping_info->table);
- pr_err("Error:dma_buf = %pK, attach = %pK\n",
+ CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK",
(void *)mapping_info->buf,
(void *)mapping_info->attach);
return -EINVAL;
}
if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
- CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+ CAM_DBG(CAM_SMMU,
+ "Removing SHARED buffer paddr = %pK, len = %zu",
(void *)mapping_info->paddr, mapping_info->len);
domain = iommu_cb_set.cb_info[idx].mapping->domain;
@@ -1261,8 +1288,8 @@
mapping_info->len);
if (size != mapping_info->len) {
- pr_err("IOMMU unmap failed\n");
- pr_err("Unmapped = %zu, requested = %zu\n",
+ CAM_ERR(CAM_SMMU, "IOMMU unmap failed");
+ CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu",
size,
mapping_info->len);
}
@@ -1272,7 +1299,7 @@
iommu_cb_set.cb_info[idx].handle);
if (rc)
- pr_err("IOVA free failed\n");
+ CAM_ERR(CAM_SMMU, "IOVA free failed");
} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
@@ -1317,19 +1344,19 @@
int ret = 0;
if (!identifier) {
- pr_err("Error: iommu hardware name is NULL\n");
+ CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL");
return -EINVAL;
}
if (!handle_ptr) {
- pr_err("Error: handle pointer is NULL\n");
+ CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL");
return -EINVAL;
}
/* create and put handle in the table */
ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
if (ret < 0)
- pr_err("Error: %s get handle fail\n", identifier);
+ CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier);
return ret;
}
@@ -1340,20 +1367,21 @@
int ret = 0, idx;
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -EINVAL;
@@ -1371,7 +1399,7 @@
case CAM_SMMU_VOTE:
case CAM_SMMU_DEVOTE:
default:
- pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+ CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops);
ret = -EINVAL;
}
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1396,10 +1424,10 @@
struct page *page;
struct sg_table *table = NULL;
- CDBG("%s: nents = %lu, idx = %d, virt_len = %zx\n",
- __func__, nents, idx, virt_len);
- CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
- __func__, phys_len, iommu_dir, virt_addr);
+ CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len = %zx",
+ nents, idx, virt_len);
+ CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK",
+ phys_len, iommu_dir, virt_addr);
/*
* This table will go inside the 'mapping' structure
@@ -1435,7 +1463,8 @@
virt_len, &iova);
if (rc < 0) {
- pr_err("Could not find valid iova for scratch buffer");
+ CAM_ERR(CAM_SMMU,
+ "Could not find valid iova for scratch buffer");
goto err_iommu_map;
}
@@ -1444,7 +1473,7 @@
table->sgl,
table->nents,
iommu_dir) != virt_len) {
- pr_err("iommu_map_sg() failed");
+ CAM_ERR(CAM_SMMU, "iommu_map_sg() failed");
goto err_iommu_map;
}
@@ -1466,22 +1495,23 @@
mapping_info->phys_len = phys_len;
mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
- CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
- __func__, (void *)mapping_info->paddr,
+ CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx",
+ (void *)mapping_info->paddr,
mapping_info->len, mapping_info->phys_len);
list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
*virt_addr = (dma_addr_t)iova;
- CDBG("%s: mapped virtual address = %lx\n", __func__,
+ CAM_DBG(CAM_SMMU, "mapped virtual address = %lx",
(unsigned long)*virt_addr);
return 0;
err_mapping_info:
unmapped = iommu_unmap(domain, iova, virt_len);
if (unmapped != virt_len)
- pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+ CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+ unmapped, virt_len);
err_iommu_map:
__free_pages(page, get_order(phys_len));
err_page_alloc:
@@ -1504,7 +1534,8 @@
&iommu_cb_set.cb_info[idx].scratch_map;
if (!mapping_info->table) {
- pr_err("Error: Invalid params: dev = %pK, table = %pK",
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params: dev = %pK, table = %pK",
(void *)iommu_cb_set.cb_info[idx].dev,
(void *)mapping_info->table);
return -EINVAL;
@@ -1513,14 +1544,15 @@
/* Clean up the mapping_info struct from the list */
unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
if (unmapped != mapping_info->len)
- pr_err("Unmapped only %zx instead of %zx",
+ CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
unmapped, mapping_info->len);
rc = cam_smmu_free_scratch_va(scratch_map,
mapping_info->paddr,
mapping_info->len);
if (rc < 0) {
- pr_err("Error: Invalid iova while freeing scratch buffer\n");
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid iova while freeing scratch buffer");
rc = -EINVAL;
}
@@ -1546,67 +1578,74 @@
unsigned int iommu_dir;
if (!paddr_ptr || !virt_len || !phys_len) {
- pr_err("Error: Input pointer or lengths invalid\n");
+ CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid");
return -EINVAL;
}
if (virt_len < phys_len) {
- pr_err("Error: virt_len > phys_len\n");
+ CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len");
return -EINVAL;
}
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
if (iommu_dir == IOMMU_INVALID_DIR) {
- pr_err("Error: translate direction failed. dir = %d\n", dir);
+ CAM_ERR(CAM_SMMU,
+ "Error: translate direction failed. dir = %d", dir);
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto error;
}
if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
- pr_err("Error: Context bank does not support scratch bufs\n");
+ CAM_ERR(CAM_SMMU,
+ "Error: Context bank does not support scratch bufs");
rc = -EINVAL;
goto error;
}
- CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
- __func__, handle, idx, dir);
- CDBG("%s: virt_len = %zx, phys_len = %zx\n",
- __func__, phys_len, virt_len);
+ CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d",
+ handle, idx, dir);
+ CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len = %zx",
+ phys_len, virt_len);
if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
- pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
- iommu_cb_set.cb_info[idx].name);
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
+ iommu_cb_set.cb_info[idx].name);
rc = -EINVAL;
goto error;
}
if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
- pr_err("Requested scratch buffer length not page aligned\n");
+ CAM_ERR(CAM_SMMU,
+ "Requested scratch buffer length not page aligned");
rc = -EINVAL;
goto error;
}
if (!IS_ALIGNED(virt_len, phys_len)) {
- pr_err("Requested virt length not aligned with phys length\n");
+ CAM_ERR(CAM_SMMU,
+ "Requested virt length not aligned with phys length");
rc = -EINVAL;
goto error;
}
@@ -1617,7 +1656,7 @@
iommu_dir,
paddr_ptr);
if (rc < 0)
- pr_err("Error: mapping or add list fail\n");
+ CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
error:
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1632,28 +1671,31 @@
struct cam_dma_buff_info *mapping_info;
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
/* find index in the iommu_cb_set.cb_info */
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto handle_err;
}
if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
- pr_err("Error: Context bank does not support scratch buffers\n");
+ CAM_ERR(CAM_SMMU,
+ "Error: Context bank does not support scratch buffers");
rc = -EINVAL;
goto handle_err;
}
@@ -1663,7 +1705,7 @@
*/
mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
if (!mapping_info) {
- pr_err("Error: Invalid params\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid params");
rc = -ENODEV;
goto handle_err;
}
@@ -1671,7 +1713,7 @@
/* unmapping one buffer from device */
rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
if (rc < 0) {
- pr_err("Error: unmap or remove list fail\n");
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
goto handle_err;
}
@@ -1698,12 +1740,12 @@
enum cam_smmu_buf_state buf_state;
if (!paddr_ptr || !len_ptr) {
- pr_err("Input pointers are invalid\n");
+ CAM_ERR(CAM_SMMU, "Input pointers are invalid");
return -EINVAL;
}
if (handle == HANDLE_INIT) {
- pr_err("Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Invalid handle");
return -EINVAL;
}
@@ -1714,27 +1756,28 @@
dma_dir = cam_smmu_translate_dir(dir);
if (dma_dir == DMA_NONE) {
- pr_err("translate direction failed. dir = %d\n", dir);
+ CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir);
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto get_addr_end;
}
if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
- pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
iommu_cb_set.cb_info[idx].name);
rc = -EINVAL;
goto get_addr_end;
@@ -1743,15 +1786,16 @@
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
len_ptr);
if (buf_state == CAM_SMMU_BUFF_EXIST) {
- CDBG("ion_fd:%d already in the list, give same addr back",
- ion_fd);
+ CAM_ERR(CAM_SMMU,
+ "ion_fd:%d already in the list, give same addr back",
+ ion_fd);
rc = -EALREADY;
goto get_addr_end;
}
rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
paddr_ptr, len_ptr, region_id);
if (rc < 0)
- pr_err("mapping or add list fail\n");
+ CAM_ERR(CAM_SMMU, "mapping or add list fail");
get_addr_end:
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1767,12 +1811,12 @@
enum cam_smmu_buf_state buf_state;
if (!paddr_ptr || !len_ptr) {
- pr_err("Error: Input pointers are invalid\n");
+ CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
return -EINVAL;
}
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
@@ -1782,14 +1826,16 @@
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto get_addr_end;
@@ -1797,7 +1843,7 @@
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
- CDBG("ion_fd:%d not in the mapped list", ion_fd);
+ CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
rc = -EINVAL;
goto get_addr_end;
}
@@ -1823,21 +1869,23 @@
struct cam_dma_buff_info *mapping_info;
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
/* find index in the iommu_cb_set.cb_info */
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto unmap_end;
@@ -1846,17 +1894,17 @@
/* Based on ion fd and index, we can find mapping info of buffer */
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
if (!mapping_info) {
- pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
idx, ion_fd);
rc = -EINVAL;
goto unmap_end;
}
/* Unmapping one buffer from device */
- CDBG("SMMU: removing buffer idx = %d\n", idx);
+ CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
if (rc < 0)
- pr_err("Error: unmap or remove list fail\n");
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
unmap_end:
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1871,21 +1919,23 @@
struct cam_dma_buff_info *mapping_info;
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
/* find index in the iommu_cb_set.cb_info */
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
rc = -EINVAL;
goto put_addr_end;
@@ -1894,7 +1944,7 @@
/* based on ion fd and index, we can find mapping info of buffer */
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
if (!mapping_info) {
- pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
idx, ion_fd);
rc = -EINVAL;
goto put_addr_end;
@@ -1911,27 +1961,29 @@
int idx;
if (handle == HANDLE_INIT) {
- pr_err("Error: Invalid handle\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid handle");
return -EINVAL;
}
idx = GET_SMMU_TABLE_IDX(handle);
if (idx < 0 || idx >= iommu_cb_set.cb_num) {
- pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
idx, handle);
return -EINVAL;
}
mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
- pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -EINVAL;
}
if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
- pr_err("Client %s buffer list is not clean!\n",
+ CAM_ERR(CAM_SMMU, "Client %s buffer list is not clean",
iommu_cb_set.cb_info[idx].name);
cam_smmu_print_list(idx);
cam_smmu_clean_buffer_list(idx);
@@ -1981,7 +2033,7 @@
int rc = 0;
if (!cb || !dev) {
- pr_err("Error: invalid input params\n");
+ CAM_ERR(CAM_SMMU, "Error: invalid input params");
return -EINVAL;
}
@@ -2001,12 +2053,13 @@
cb->shared_info.iova_len,
-1);
- CDBG("Shared mem start->%lX\n",
+ CAM_DBG(CAM_SMMU, "Shared mem start->%lX",
(unsigned long)cb->shared_info.iova_start);
- CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+ CAM_DBG(CAM_SMMU, "Shared mem len->%zu",
+ cb->shared_info.iova_len);
if (rc) {
- pr_err("Genpool chunk creation failed\n");
+ CAM_ERR(CAM_SMMU, "Genpool chunk creation failed");
gen_pool_destroy(cb->shared_mem_pool);
cb->shared_mem_pool = NULL;
return rc;
@@ -2019,7 +2072,8 @@
cb->scratch_info.iova_len,
0);
if (rc < 0) {
- pr_err("Error: failed to create scratch map\n");
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to create scratch map");
rc = -ENODEV;
goto end;
}
@@ -2030,12 +2084,12 @@
cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
cb->io_info.iova_start, cb->io_info.iova_len);
if (IS_ERR(cb->mapping)) {
- pr_err("Error: create mapping Failed\n");
+ CAM_ERR(CAM_SMMU, "Error: create mapping Failed");
rc = -ENODEV;
goto end;
}
} else {
- pr_err("Context bank does not have IO region\n");
+ CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
rc = -ENODEV;
goto end;
}
@@ -2060,7 +2114,7 @@
struct device_node *domains_child_node = NULL;
if (!dev) {
- pr_err("Error: Invalid device\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid device");
return -ENODEV;
}
@@ -2078,7 +2132,7 @@
}
if (iommu_cb_set.cb_num == 0) {
- pr_err("Error: no context banks present\n");
+ CAM_ERR(CAM_SMMU, "Error: no context banks present");
return -ENOENT;
}
@@ -2088,14 +2142,14 @@
GFP_KERNEL);
if (!iommu_cb_set.cb_info) {
- pr_err("Error: cannot allocate context banks\n");
+ CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks");
return -ENOMEM;
}
cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
iommu_cb_set.cb_init_count = 0;
- CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+ CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num);
return 0;
}
@@ -2109,13 +2163,13 @@
int num_regions = 0;
if (!of_node || !cb) {
- pr_err("Invalid argument(s)\n");
+ CAM_ERR(CAM_SMMU, "Invalid argument(s)");
return -EINVAL;
}
mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
if (!mem_map_node) {
- pr_err("iova-mem-map not present\n");
+ CAM_ERR(CAM_SMMU, "iova-mem-map not present");
return -EINVAL;
}
@@ -2129,7 +2183,7 @@
"iova-region-name", ®ion_name);
if (rc < 0) {
of_node_put(mem_map_node);
- pr_err("IOVA region not found\n");
+ CAM_ERR(CAM_SMMU, "IOVA region not found");
return -EINVAL;
}
@@ -2137,7 +2191,7 @@
"iova-region-start", ®ion_start);
if (rc < 0) {
of_node_put(mem_map_node);
- pr_err("Failed to read iova-region-start\n");
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-start");
return -EINVAL;
}
@@ -2145,7 +2199,7 @@
"iova-region-len", ®ion_len);
if (rc < 0) {
of_node_put(mem_map_node);
- pr_err("Failed to read iova-region-len\n");
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-len");
return -EINVAL;
}
@@ -2153,7 +2207,7 @@
"iova-region-id", ®ion_id);
if (rc < 0) {
of_node_put(mem_map_node);
- pr_err("Failed to read iova-region-id\n");
+ CAM_ERR(CAM_SMMU, "Failed to read iova-region-id");
return -EINVAL;
}
@@ -2179,20 +2233,22 @@
cb->io_info.iova_len = region_len;
break;
default:
- pr_err("Incorrect region id present in DT file: %d\n",
+ CAM_ERR(CAM_SMMU,
+ "Incorrect region id present in DT file: %d",
region_id);
}
- CDBG("Found label -> %s\n", cb->name);
- CDBG("Found region -> %s\n", region_name);
- CDBG("region_start -> %X\n", region_start);
- CDBG("region_len -> %X\n", region_len);
- CDBG("region_id -> %X\n", region_id);
+ CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name);
+ CAM_DBG(CAM_SMMU, "Found region -> %s", region_name);
+ CAM_DBG(CAM_SMMU, "region_start -> %X", region_start);
+ CAM_DBG(CAM_SMMU, "region_len -> %X", region_len);
+ CAM_DBG(CAM_SMMU, "region_id -> %X", region_id);
}
of_node_put(mem_map_node);
if (!num_regions) {
- pr_err("No memory regions found, at least one needed\n");
+ CAM_ERR(CAM_SMMU,
+ "No memory regions found, at least one needed");
rc = -ENODEV;
}
@@ -2207,13 +2263,13 @@
struct device *ctx = NULL;
if (!dev) {
- pr_err("Error: Invalid device\n");
+ CAM_ERR(CAM_SMMU, "Error: Invalid device");
return -ENODEV;
}
/* check the bounds */
if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
- pr_err("Error: populate more than allocated cb\n");
+ CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb");
rc = -EBADHANDLE;
goto cb_init_fail;
}
@@ -2224,29 +2280,31 @@
/* set the name of the context bank */
rc = of_property_read_string(dev->of_node, "label", &cb->name);
if (rc < 0) {
- pr_err("Error: failed to read label from sub device\n");
+ CAM_ERR(CAM_SMMU,
+ "Error: failed to read label from sub device");
goto cb_init_fail;
}
rc = cam_smmu_get_memory_regions_info(dev->of_node,
cb);
if (rc < 0) {
- pr_err("Error: Getting region info\n");
+ CAM_ERR(CAM_SMMU, "Error: Getting region info");
return rc;
}
/* set up the iommu mapping for the context bank */
if (type == CAM_QSMMU) {
- pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+ CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
+ cb->name);
return -ENODEV;
}
ctx = dev;
- CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+ CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name);
rc = cam_smmu_setup_cb(cb, ctx);
if (rc < 0) {
- pr_err("Error: failed to setup cb : %s\n", cb->name);
+ CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
goto cb_init_fail;
}
@@ -2258,7 +2316,7 @@
/* increment count to next bank */
iommu_cb_set.cb_init_count++;
- CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+ CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
cb_init_fail:
return rc;
@@ -2272,14 +2330,14 @@
if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
rc = cam_alloc_smmu_context_banks(dev);
if (rc < 0) {
- pr_err("Error: allocating context banks\n");
+ CAM_ERR(CAM_SMMU, "Error: allocating context banks");
return -ENOMEM;
}
}
if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
if (rc < 0) {
- pr_err("Error: populating context banks\n");
+ CAM_ERR(CAM_SMMU, "Error: populating context banks");
return -ENOMEM;
}
return rc;
@@ -2287,7 +2345,7 @@
if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
if (rc < 0) {
- pr_err("Error: populating context banks\n");
+ CAM_ERR(CAM_SMMU, "Error: populating context banks");
return -ENOMEM;
}
return rc;
@@ -2304,7 +2362,7 @@
rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
NULL, &pdev->dev);
if (rc < 0) {
- pr_err("Error: populating devices\n");
+ CAM_ERR(CAM_SMMU, "Error: populating devices");
} else {
INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
mutex_init(&iommu_cb_set.payload_list_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
index e3012cb..8e884ca 100644
--- a/drivers/media/platform/msm/camera/cam_sync/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 96f40e1..644cb63 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-SYNC %s:%d " fmt, __func__, __LINE__
-
#include <linux/init.h>
#include <linux/module.h>
#include <linux/irqflags.h>
@@ -19,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include "cam_sync_util.h"
+#include "cam_debug_util.h"
struct sync_device *sync_dev;
@@ -35,7 +34,8 @@
rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
if (rc) {
- pr_err("Error: Unable to init row at idx = %ld\n", idx);
+ CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+ idx);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
@@ -62,7 +62,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
@@ -124,7 +125,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
@@ -157,12 +159,14 @@
/* Objects to be signaled will be added into this list */
INIT_LIST_HEAD(&sync_list);
- if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+ if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+ CAM_ERR(CAM_SYNC, "Error: Out of range sync obj");
return -EINVAL;
-
+ }
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
@@ -170,14 +174,15 @@
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (row->type == CAM_SYNC_TYPE_GROUP) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- pr_err("Error: Signaling a GROUP sync object = %d\n",
+ CAM_ERR(CAM_SYNC, "Error: Signaling a GROUP sync object = %d",
sync_obj);
return -EINVAL;
}
if (row->state != CAM_SYNC_STATE_ACTIVE) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- pr_err("Error: Sync object already signaled sync_obj = %d",
+ CAM_ERR(CAM_SYNC,
+ "Error: Sync object already signaled sync_obj = %d",
sync_obj);
return -EALREADY;
}
@@ -185,7 +190,8 @@
if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
status != CAM_SYNC_STATE_SIGNALED_ERROR) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
- pr_err("Error: signaling with undefined status = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: signaling with undefined status = %d",
status);
return -EINVAL;
}
@@ -297,18 +303,20 @@
rc = cam_sync_util_validate_merge(sync_obj,
num_objs);
if (rc < 0) {
- pr_err("Validation failed, Merge not allowed");
+ CAM_ERR(CAM_SYNC, "Validation failed, Merge not allowed");
return -EINVAL;
}
rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
if (rc < 0) {
- pr_err("Error: Unable to find empty row, table full");
+ CAM_ERR(CAM_SYNC,
+ "Error: Unable to find empty row, table full");
return -EINVAL;
}
if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
- pr_err("Error: Invalid empty row index returned = %ld", idx);
+ CAM_ERR(CAM_SYNC,
+ "Error: Invalid empty row index returned = %ld", idx);
return -EINVAL;
}
@@ -317,7 +325,8 @@
num_objs);
if (rc < 0) {
- pr_err("Error: Unable to init row at idx = %ld\n", idx);
+ CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+ idx);
return -EINVAL;
}
@@ -335,7 +344,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj: idx = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj: idx = %d",
sync_obj);
return -EINVAL;
}
@@ -356,7 +366,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
@@ -365,15 +376,16 @@
msecs_to_jiffies(timeout_ms));
if (!timeleft) {
- pr_err("Error: cam_sync_wait() timed out for sync obj = %d\n",
- sync_obj);
+ CAM_ERR(CAM_SYNC,
+ "Error: timed out for sync obj = %d", sync_obj);
rc = -ETIMEDOUT;
} else {
switch (row->state) {
case CAM_SYNC_STATE_INVALID:
case CAM_SYNC_STATE_ACTIVE:
case CAM_SYNC_STATE_SIGNALED_ERROR:
- pr_err("Error: Wait on invalid state = %d, obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: Wait on invalid state = %d, obj = %d",
row->state, sync_obj);
rc = -EINVAL;
break;
@@ -566,7 +578,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
kfree(user_payload_kernel);
@@ -616,12 +629,12 @@
struct sync_table_row *row = NULL;
if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
- CDBG("Incorrect ioctl size\n");
+ CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
return -EINVAL;
}
if (!k_ioctl->ioctl_ptr) {
- CDBG("Invalid embedded ioctl ptr\n");
+ CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
return -EINVAL;
}
@@ -638,7 +651,8 @@
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Error: accessing an uninitialized sync obj = %d\n",
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
@@ -667,7 +681,7 @@
struct cam_private_ioctl_arg k_ioctl;
if (!sync_dev) {
- pr_err("%s sync_dev NULL\n", __func__);
+ CAM_ERR(CAM_SYNC, "sync_dev NULL");
return -EINVAL;
}
@@ -735,7 +749,7 @@
struct sync_device *sync_dev = video_drvdata(filep);
if (!sync_dev) {
- pr_err("%s Sync device NULL\n", __func__);
+ CAM_ERR(CAM_SYNC, "Sync device NULL");
return -ENODEV;
}
@@ -752,7 +766,7 @@
sync_dev->cam_sync_eventq = filep->private_data;
spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
} else {
- pr_err("v4l2_fh_open failed : %d\n", rc);
+ CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
}
mutex_unlock(&sync_dev->table_lock);
@@ -766,7 +780,7 @@
struct sync_device *sync_dev = video_drvdata(filep);
if (!sync_dev) {
- pr_err("%s Sync device NULL\n", __func__);
+ CAM_ERR(CAM_SYNC, "Sync device NULL");
rc = -ENODEV;
return rc;
}
@@ -784,11 +798,13 @@
*/
rc = cam_sync_signal(i, CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc < 0)
- pr_err("Cleanup signal failed: idx = %d\n", i);
+ CAM_ERR(CAM_SYNC,
+ "Cleanup signal failed: idx = %d", i);
rc = cam_sync_destroy(i);
if (rc < 0)
- pr_err("Cleanup destroy failed: idx = %d\n", i);
+ CAM_ERR(CAM_SYNC,
+ "Cleanup destroy failed: idx = %d", i);
}
}
mutex_unlock(&sync_dev->table_lock);
@@ -951,7 +967,8 @@
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!sync_dev->work_queue) {
- pr_err("Error: high priority work queue creation failed!\n");
+ CAM_ERR(CAM_SYNC,
+ "Error: high priority work queue creation failed");
rc = -ENOMEM;
goto v4l2_fail;
}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 3b3cbff..c62aacf 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "CAM-SYNC-UTIL %s:%d " fmt, __func__, __LINE__
-
#include "cam_sync_util.h"
int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
@@ -85,7 +83,8 @@
active_count++;
break;
default:
- pr_err("Invalid state of child object during merge\n");
+ CAM_ERR(CAM_SYNC,
+ "Invalid state of child object during merge");
return CAM_SYNC_STATE_SIGNALED_ERROR;
}
}
@@ -256,7 +255,7 @@
struct sync_table_row *row = NULL;
if (num_objs <= 1) {
- pr_err("Single object merge is not allowed\n");
+ CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
return -EINVAL;
}
@@ -265,7 +264,8 @@
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
if (row->type == CAM_SYNC_TYPE_GROUP ||
row->state == CAM_SYNC_STATE_INVALID) {
- pr_err("Group obj %d can't be merged or obj UNINIT\n",
+ CAM_ERR(CAM_SYNC,
+ "Group obj %d can't be merged or obj UNINIT",
sync_obj[i]);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 9dedd14..8b60ce1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -16,6 +16,7 @@
#include <cam_sync_api.h>
#include "cam_sync_private.h"
+#include "cam_debug_util.h"
extern struct sync_device *sync_dev;
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
index f22115c..4702963 100644
--- a/drivers/media/platform/msm/camera/cam_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o cam_common_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
new file mode 100644
index 0000000..199d3ea
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+ int i;
+
+ for (i = 0; i < num_strings; i++) {
+ if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+ CAM_DBG(CAM_UTIL, "matched %s : %d\n",
+ matching_string, i);
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
new file mode 100644
index 0000000..d6a11b7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_COMMON_UTIL_H_
+#define _CAM_COMMON_UTIL_H_
+
+#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * cam_common_util_get_string_index()
+ *
+ * @brief Match the string from list of strings to return
+ * matching index
+ *
+ * @strings: Pointer to list of strings
+ * @num_strings: Number of strings in 'strings'
+ * @matching_string: String to match
+ * @index: Pointer to index to return matching index
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_common_util_get_string_index(const char **strings,
+ uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_COMMON_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
new file mode 100644
index 0000000..21f90ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2017, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include "cam_debug_util.h"
+
+static uint debug_mdl;
+module_param(debug_mdl, uint, 0644);
+
+const char *cam_get_module_name(unsigned int module_id)
+{
+ const char *name = NULL;
+
+ switch (module_id) {
+ case CAM_CDM:
+ name = "CAM-CDM";
+ break;
+ case CAM_CORE:
+ name = "CAM-CORE";
+ break;
+ case CAM_CRM:
+ name = "CAM_CRM";
+ break;
+ case CAM_CPAS:
+ name = "CAM-CPAS";
+ break;
+ case CAM_ISP:
+ name = "CAM-ISP";
+ break;
+ case CAM_SENSOR:
+ name = "CAM-SENSOR";
+ break;
+ case CAM_SMMU:
+ name = "CAM-SMMU";
+ break;
+ case CAM_SYNC:
+ name = "CAM-SYNC";
+ break;
+ case CAM_ICP:
+ name = "CAM-ICP";
+ break;
+ case CAM_JPEG:
+ name = "CAM-JPEG";
+ break;
+ case CAM_FD:
+ name = "CAM-FD";
+ break;
+ case CAM_LRME:
+ name = "CAM-LRME";
+ break;
+ case CAM_FLASH:
+ name = "CAM-FLASH";
+ break;
+ case CAM_ACTUATOR:
+ name = "CAM-ACTUATOR";
+ break;
+ case CAM_CCI:
+ name = "CAM-CCI";
+ break;
+ case CAM_CSIPHY:
+ name = "CAM-CSIPHY";
+ break;
+ case CAM_EEPROM:
+ name = "CAM-EEPROM";
+ break;
+ case CAM_UTIL:
+ name = "CAM-UTIL";
+ break;
+ case CAM_CTXT:
+ name = "CAM-CTXT";
+ break;
+ case CAM_HFI:
+ name = "CAM-HFI";
+ break;
+ default:
+ name = "CAM";
+ break;
+ }
+
+ return name;
+}
+
+void cam_debug_log(unsigned int module_id, enum cam_debug_level dbg_level,
+ const char *func, const int line, const char *fmt, ...)
+{
+ char str_buffer[STR_BUFFER_MAX_LENGTH];
+ va_list args;
+
+ va_start(args, fmt);
+
+ switch (dbg_level) {
+ case CAM_LEVEL_DBG:
+ if (debug_mdl & module_id) {
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_info("CAM_DBG: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id),
+ func, line, str_buffer);
+ va_end(args);
+ }
+ break;
+ case CAM_LEVEL_ERR:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_err("CAM_ERR: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ case CAM_LEVEL_INFO:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_info("CAM_INFO: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ case CAM_LEVEL_WARN:
+ vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+ pr_warn("CAM_WARN: %s: %s: %d: %s\n",
+ cam_get_module_name(module_id), func, line, str_buffer);
+ va_end(args);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
new file mode 100644
index 0000000..7275d56
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_DEBUG_UTIL_H_
+#define _CAM_DEBUG_UTIL_H_
+
+#define CAM_CDM (1 << 0)
+#define CAM_CORE (1 << 1)
+#define CAM_CPAS (1 << 2)
+#define CAM_ISP (1 << 3)
+#define CAM_CRM (1 << 4)
+#define CAM_SENSOR (1 << 5)
+#define CAM_SMMU (1 << 6)
+#define CAM_SYNC (1 << 7)
+#define CAM_ICP (1 << 8)
+#define CAM_JPEG (1 << 9)
+#define CAM_FD (1 << 10)
+#define CAM_LRME (1 << 11)
+#define CAM_FLASH (1 << 12)
+#define CAM_ACTUATOR (1 << 13)
+#define CAM_CCI (1 << 14)
+#define CAM_CSIPHY (1 << 15)
+#define CAM_EEPROM (1 << 16)
+#define CAM_UTIL (1 << 17)
+#define CAM_HFI (1 << 18)
+#define CAM_CTXT (1 << 19)
+
+#define STR_BUFFER_MAX_LENGTH 1024
+
+enum cam_debug_level {
+ CAM_LEVEL_INFO,
+ CAM_LEVEL_WARN,
+ CAM_LEVEL_ERR,
+ CAM_LEVEL_DBG,
+};
+
+/*
+ * cam_debug_log()
+ *
+ * @brief : Get the Module name from module ID and print
+ * respective debug logs
+ *
+ * @module_id : Respective Module ID which is calling this function
+ * @dbg_level : Debug level from cam_module_debug_level enum entries
+ * @func : Function which is calling to print logs
+ * @line : Line number associated with the function which is calling
+ * to print log
+ * @fmt : Formatted string which needs to be print in the log
+ *
+ */
+void cam_debug_log(unsigned int module_id, enum cam_debug_level dbg_level,
+ const char *func, const int line, const char *fmt, ...);
+
+/*
+ * cam_get_module_name()
+ *
+ * @brief : Get the module name from module ID
+ *
+ * @module_id : Module ID which is using this function
+ */
+const char *cam_get_module_name(unsigned int module_id);
+
+/*
+ * CAM_ERR
+ * @brief : This Macro will print error logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_ERR(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_ERR, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_WARN
+ * @brief : This Macro will print warning logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_WARN(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_WARN, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_INFO
+ * @brief : This Macro will print Information logs
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_INFO(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_INFO, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_DBG
+ * @brief : This Macro will print debug logs when enabled using GROUP
+ *
+ * @__module : Respective module id which is been calling this Macro
+ * @fmt : Formatted string which needs to be print in log
+ * @args : Arguments which needs to be print in log
+ */
+#define CAM_DBG(__module, fmt, args...) \
+ cam_debug_log(__module, CAM_LEVEL_DBG, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_ERR_RATE_LIMIT
+ * @brief : This Macro will prevent error print logs with ratelimit
+ */
+#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \
+ pr_err_ratelimited("CAM_ERR: %s: %s: %d\n" fmt, \
+ cam_get_module_name(__module), __func__, __LINE__, ##args)
+
+#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
index 78cd9d8..c1fbb2a 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -10,22 +10,18 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/err.h>
#include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
int cam_io_w(uint32_t data, void __iomem *addr)
{
if (!addr)
return -EINVAL;
- CDBG("0x%pK %08x\n", addr, data);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
writel_relaxed(data, addr);
return 0;
@@ -36,7 +32,7 @@
if (!addr)
return -EINVAL;
- CDBG("0x%pK %08x\n", addr, data);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
/* Ensure previous writes are done */
wmb();
writel_relaxed(data, addr);
@@ -49,12 +45,12 @@
uint32_t data;
if (!addr) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_UTIL, "Invalid args");
return 0;
}
data = readl_relaxed(addr);
- CDBG("0x%pK %08x\n", addr, data);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
return data;
}
@@ -64,14 +60,14 @@
uint32_t data;
if (!addr) {
- pr_err("Invalid args\n");
+ CAM_ERR(CAM_UTIL, "Invalid args");
return 0;
}
/* Ensure previous read is done */
rmb();
data = readl_relaxed(addr);
- CDBG("0x%pK %08x\n", addr, data);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
return data;
}
@@ -86,10 +82,10 @@
if (!dest_addr || !src_addr)
return -EINVAL;
- CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+ CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
for (i = 0; i < len/4; i++) {
- CDBG("0x%pK %08x\n", d, *s);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
writel_relaxed(*s++, d++);
}
@@ -106,7 +102,7 @@
if (!dest_addr || !src_addr)
return -EINVAL;
- CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+ CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
/*
* Do not use cam_io_w_mb to avoid double wmb() after a write
@@ -114,7 +110,7 @@
*/
wmb();
for (i = 0; i < (len / 4); i++) {
- CDBG("0x%pK %08x\n", d, *s);
+ CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
writel_relaxed(*s++, d++);
}
@@ -138,7 +134,7 @@
}
if (cnt > retry) {
- pr_debug("Poll failed by value\n");
+ CAM_DBG(CAM_UTIL, "Poll failed by value");
rc = -EINVAL;
}
@@ -163,7 +159,7 @@
}
if (cnt > retry) {
- pr_debug("Poll failed with mask\n");
+ CAM_DBG(CAM_UTIL, "Poll failed with mask");
rc = -EINVAL;
}
@@ -179,7 +175,7 @@
return -EINVAL;
for (i = 0; i < len; i++) {
- CDBG("i= %d len =%d val=%x addr =%pK\n",
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
i, len, data[i], addr);
writel_relaxed(data[i], addr);
}
@@ -196,7 +192,7 @@
return -EINVAL;
for (i = 0; i < len; i++) {
- CDBG("i= %d len =%d val=%x addr =%pK\n",
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
i, len, data[i], addr);
/* Ensure previous writes are done */
wmb();
@@ -217,7 +213,7 @@
return -EINVAL;
for (i = 0; i < len; i++) {
- CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
i, len, __VAL(i), addr_base, __OFFSET(i));
writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
}
@@ -236,7 +232,7 @@
/* Ensure write is done */
wmb();
for (i = 0; i < len; i++) {
- CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+ CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
i, len, __VAL(i), addr_base, __OFFSET(i));
writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
}
@@ -254,7 +250,8 @@
int i;
uint32_t data;
- CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+ CAM_DBG(CAM_UTIL, "addr=%pK offset=0x%x size=%d",
+ base_addr, start_offset, size);
if (!base_addr || (size <= 0))
return -EINVAL;
@@ -265,19 +262,19 @@
if (i % NUM_REGISTER_PER_LINE == 0) {
snprintf(p_str, 12, "0x%08x: ",
REG_OFFSET(start_offset, i));
- p_str += 12;
+ p_str += 11;
}
data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
snprintf(p_str, 9, "%08x ", data);
- p_str += 9;
+ p_str += 8;
if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
- pr_err("%s\n", line_str);
+ CAM_ERR(CAM_UTIL, "%s", line_str);
line_str[0] = '\0';
p_str = line_str;
}
}
if (line_str[0] != '\0')
- pr_err("%s\n", line_str);
+ CAM_ERR(CAM_UTIL, "%s", line_str);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 6d90c1e..a1cdfe9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -10,13 +10,118 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+#include <linux/types.h>
+#include <linux/slab.h>
#include "cam_mem_mgr.h"
#include "cam_packet_util.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+ size_t *len)
+{
+ int rc = 0;
+ uint64_t kmd_buf_addr = 0;
+
+ rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc);
+ } else {
+ if (kmd_buf_addr && *len) {
+ *buf_addr = (uint32_t *)kmd_buf_addr;
+ } else {
+ CAM_ERR(CAM_UTIL, "Invalid addr and length :%ld", *len);
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc)
+{
+ if ((cmd_desc->length > cmd_desc->size) ||
+ (cmd_desc->mem_handle <= 0)) {
+ CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d",
+ cmd_desc->offset, cmd_desc->length,
+ cmd_desc->mem_handle, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_validate_packet(struct cam_packet *packet)
+{
+ if (!packet)
+ return -EINVAL;
+
+ CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d",
+ packet->num_cmd_buf, packet->num_io_configs,
+ packet->kmd_cmd_buf_index);
+
+ if ((packet->kmd_cmd_buf_index >= packet->num_cmd_buf) ||
+ (!packet->header.size) ||
+ (packet->cmd_buf_offset > packet->header.size) ||
+ (packet->io_configs_offset > packet->header.size)) {
+ CAM_ERR(CAM_UTIL, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf)
+{
+ int rc = 0;
+ size_t len = 0;
+ struct cam_cmd_buf_desc *cmd_desc;
+ uint32_t *cpu_addr;
+
+ if (!packet || !kmd_buf) {
+ CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf);
+ return -EINVAL;
+ }
+
+ /* Take first command descriptor and add offset to it for kmd*/
+ cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+ &packet->payload + packet->cmd_buf_offset);
+ cmd_desc += packet->kmd_cmd_buf_index;
+
+ rc = cam_packet_util_validate_cmd_desc(cmd_desc);
+ if (rc)
+ return rc;
+
+ rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+ &len);
+ if (rc)
+ return rc;
+
+ if (len < cmd_desc->size) {
+ CAM_ERR(CAM_UTIL, "invalid memory len:%ld and cmd desc size:%d",
+ len, cmd_desc->size);
+ return -EINVAL;
+ }
+
+ cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
+ CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d",
+ cmd_desc->size, cmd_desc->length,
+ cmd_desc->size - cmd_desc->length);
+ CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK",
+ cmd_desc->mem_handle, cmd_desc->offset,
+ packet->kmd_cmd_buf_offset, cpu_addr);
+
+ kmd_buf->cpu_addr = cpu_addr;
+ kmd_buf->handle = cmd_desc->mem_handle;
+ kmd_buf->offset = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+ kmd_buf->size = cmd_desc->size - cmd_desc->length;
+ kmd_buf->used_bytes = 0;
+
+ return rc;
+}
int cam_packet_util_process_patches(struct cam_packet *packet,
int32_t iommu_hdl)
@@ -36,7 +141,7 @@
patch_desc = (struct cam_patch_desc *)
((uint32_t *) &packet->payload +
packet->patch_offset/4);
- CDBG("packet = %pK patch_desc = %pK size = %lu\n",
+ CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu",
(void *)packet, (void *)patch_desc,
sizeof(struct cam_patch_desc));
@@ -44,7 +149,7 @@
rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
iommu_hdl, &iova_addr, &src_buf_size);
if (rc < 0) {
- pr_err("unable to get src buf address\n");
+ CAM_ERR(CAM_UTIL, "unable to get src buf address");
return rc;
}
src_buf_iova_addr = (uint32_t *)iova_addr;
@@ -53,12 +158,12 @@
rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
&cpu_addr, &dst_buf_len);
if (rc < 0) {
- pr_err("unable to get dst buf address\n");
+ CAM_ERR(CAM_UTIL, "unable to get dst buf address");
return rc;
}
dst_cpu_addr = (uint32_t *)cpu_addr;
- CDBG("i = %d patch info = %x %x %x %x\n", i,
+ CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i,
patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
@@ -68,7 +173,8 @@
*dst_cpu_addr = temp;
- CDBG("patch is done for dst %pK with src %pK value %llx\n",
+ CAM_DBG(CAM_UTIL,
+ "patch is done for dst %pK with src %pK value %llx",
dst_cpu_addr, src_buf_iova_addr,
*((uint64_t *)dst_cpu_addr));
}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 614e868..8b590a7 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -16,6 +16,62 @@
#include <uapi/media/cam_defs.h>
/**
+ * @brief KMD scratch buffer information
+ *
+ * @handle: Memory handle
+ * @cpu_addr: Cpu address
+ * @offset: Offset from the start of the buffer
+ * @size: Size of the buffer
+ * @used_bytes: Used memory in bytes
+ *
+ */
+struct cam_kmd_buf_info {
+ int handle;
+ uint32_t *cpu_addr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t used_bytes;
+};
+
+/**
+ * cam_packet_util_validate_packet()
+ *
+ * @brief Validate the packet
+ *
+ * @packet: Packet to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_packet(struct cam_packet *packet);
+
+/**
+ * cam_packet_util_validate_cmd_desc()
+ *
+ * @brief Validate the packet
+ *
+ * @cmd_desc: Command descriptor to be validated
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc);
+
+/**
+ * cam_packet_util_get_kmd_buffer()
+ *
+ * @brief Get the kmd buffer from the packet command descriptor
+ *
+ * @packet: Packet data
+ * @kmd_buf: Extracted the KMD buffer information
+ *
+ * @return: 0 for success
+ * -EINVAL for Fail
+ */
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+ struct cam_kmd_buf_info *kmd_buf_info);
+
+/**
* cam_packet_util_process_patches()
*
* @brief: Replace the handle in Packet to Address using the
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 2dfb90a..e5c7dbb 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -10,24 +10,99 @@
* GNU General Public License for more details.
*/
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "cam_soc_util.h"
+#include "cam_debug_util.h"
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level)
+{
+ if (!level)
+ return -EINVAL;
+
+ if (!strcmp(string, "suspend")) {
+ *level = CAM_SUSPEND_VOTE;
+ } else if (!strcmp(string, "minsvs")) {
+ *level = CAM_MINSVS_VOTE;
+ } else if (!strcmp(string, "lowsvs")) {
+ *level = CAM_LOWSVS_VOTE;
+ } else if (!strcmp(string, "svs")) {
+ *level = CAM_SVS_VOTE;
+ } else if (!strcmp(string, "svs_l1")) {
+ *level = CAM_SVSL1_VOTE;
+ } else if (!strcmp(string, "nominal")) {
+ *level = CAM_NOMINAL_VOTE;
+ } else if (!strcmp(string, "turbo")) {
+ *level = CAM_TURBO_VOTE;
+ } else {
+ CAM_ERR(CAM_UTIL, "Invalid string %s", string);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cam_soc_util_get_clk_level_to_apply()
+ *
+ * @brief: Get the clock level to apply. If the requested level
+ * is not valid, bump the level to next available valid
+ * level. If no higher level found, return failure.
+ *
+ * @soc_info: Device soc struct to be populated
+ * @req_level: Requested level
+ * @apply_level Level to apply
+ *
+ * @return: success or failure
+ */
+static int cam_soc_util_get_clk_level_to_apply(
+ struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
+ enum cam_vote_level *apply_level)
+{
+ if (req_level >= CAM_MAX_VOTE) {
+ CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
+ req_level);
+ return -EINVAL;
+ }
+
+ if (soc_info->clk_level_valid[req_level] == true) {
+ *apply_level = req_level;
+ } else {
+ int i;
+
+ for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
+ if (soc_info->clk_level_valid[i] == true) {
+ *apply_level = i;
+ break;
+ }
+
+ if (i == CAM_MAX_VOTE) {
+ CAM_ERR(CAM_UTIL,
+ "No valid clock level found to apply, req=%d",
+ req_level);
+ return -EINVAL;
+ }
+ }
+
+ CAM_DBG(CAM_UTIL, "Req level %d, Applying %d",
+ req_level, *apply_level);
+
+ return 0;
+}
int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
{
if (!soc_info) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_UTIL, "Invalid arguments");
return -EINVAL;
}
if (!soc_info->irq_line) {
- pr_err("No IRQ line available\n");
+ CAM_ERR(CAM_UTIL, "No IRQ line available");
return -ENODEV;
}
@@ -39,12 +114,12 @@
int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
{
if (!soc_info) {
- pr_err("Invalid arguments\n");
+ CAM_ERR(CAM_UTIL, "Invalid arguments");
return -EINVAL;
}
if (!soc_info->irq_line) {
- pr_err("No IRQ line available\n");
+ CAM_ERR(CAM_UTIL, "No IRQ line available");
return -ENODEV;
}
@@ -53,7 +128,31 @@
return 0;
}
-int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long clk_rate)
+{
+ if (!soc_info || (clk_index >= soc_info->num_clk) || (clk_rate == 0)) {
+ CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d %lld",
+ soc_info, clk_index, clk_rate);
+ return clk_rate;
+ }
+
+ return clk_round_rate(soc_info->clk[clk_index], clk_rate);
+}
+
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long flags)
+{
+ if (!soc_info || (clk_index >= soc_info->num_clk)) {
+ CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d",
+ soc_info, clk_index);
+ return -EINVAL;
+ }
+
+ return clk_set_flags(soc_info->clk[clk_index], flags);
+}
+
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
int32_t clk_rate)
{
int rc = 0;
@@ -62,40 +161,56 @@
if (!clk || !clk_name)
return -EINVAL;
- CDBG("enable %s, clk %pK rate %d\n",
- clk_name, clk, clk_rate);
+ CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate);
if (clk_rate > 0) {
clk_rate_round = clk_round_rate(clk, clk_rate);
- CDBG("new_rate %ld\n", clk_rate_round);
+ CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
if (clk_rate_round < 0) {
- pr_err("%s: round failed for clock %s rc = %ld\n",
- __func__, clk_name, clk_rate_round);
+ CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
+ clk_name, clk_rate_round);
return clk_rate_round;
}
rc = clk_set_rate(clk, clk_rate_round);
if (rc) {
- pr_err("set_rate failed on %s\n", clk_name);
+ CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
return rc;
}
} else if (clk_rate == INIT_RATE) {
clk_rate_round = clk_get_rate(clk);
- CDBG("init new_rate %ld\n", clk_rate_round);
+ CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
if (clk_rate_round == 0) {
clk_rate_round = clk_round_rate(clk, 0);
if (clk_rate_round <= 0) {
- pr_err("round rate failed on %s\n", clk_name);
+ CAM_ERR(CAM_UTIL, "round rate failed on %s",
+ clk_name);
return clk_rate_round;
}
}
rc = clk_set_rate(clk, clk_rate_round);
if (rc) {
- pr_err("set_rate failed on %s\n", clk_name);
+ CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
return rc;
}
}
+
+ return rc;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+ int32_t clk_rate)
+{
+ int rc = 0;
+
+ if (!clk || !clk_name)
+ return -EINVAL;
+
+ rc = cam_soc_util_set_clk_rate(clk, clk_name, clk_rate);
+ if (rc)
+ return rc;
+
rc = clk_prepare_enable(clk);
if (rc) {
- pr_err("enable failed for %s\n", clk_name);
+ CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
return rc;
}
@@ -107,7 +222,7 @@
if (!clk || !clk_name)
return -EINVAL;
- CDBG("disable %s\n", clk_name);
+ CAM_DBG(CAM_UTIL, "disable %s", clk_name);
clk_disable_unprepare(clk);
return 0;
@@ -119,20 +234,33 @@
* @brief: This function enables the default clocks present
* in soc_info
*
- * @soc_info: device soc struct to be populated
+ * @soc_info: Device soc struct to be populated
+ * @clk_level: Clk level to apply while enabling
*
* @return: success or failure
*/
-static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
{
int i, rc = 0;
+ enum cam_vote_level apply_level;
- if (soc_info->num_clk == 0)
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+ soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
return rc;
for (i = 0; i < soc_info->num_clk; i++) {
rc = cam_soc_util_clk_enable(soc_info->clk[i],
- soc_info->clk_name[i], soc_info->clk_rate[i]);
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
if (rc)
goto clk_disable;
}
@@ -165,11 +293,9 @@
if (soc_info->num_clk == 0)
return;
- for (i = soc_info->num_clk - 1; i >= 0; i--) {
- CDBG("disable %s\n", soc_info->clk_name[i]);
+ for (i = soc_info->num_clk - 1; i >= 0; i--)
cam_soc_util_clk_disable(soc_info->clk[i],
soc_info->clk_name[i]);
- }
}
/**
@@ -186,9 +312,13 @@
{
struct device_node *of_node = NULL;
int count;
- int i, rc;
+ int num_clk_rates, num_clk_levels;
+ int i, j, rc;
+ int32_t num_clk_level_strings;
struct platform_device *pdev = NULL;
const char *src_clk_str = NULL;
+ const char *clk_cntl_lvl_string = NULL;
+ enum cam_vote_level level;
if (!soc_info || !soc_info->pdev)
return -EINVAL;
@@ -199,14 +329,14 @@
count = of_property_count_strings(of_node, "clock-names");
- CDBG("count = %d\n", count);
+ CAM_DBG(CAM_UTIL, "count = %d", count);
if (count > CAM_SOC_MAX_CLK) {
- pr_err("invalid count of clocks, count=%d", count);
+ CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
rc = -EINVAL;
return rc;
}
if (count <= 0) {
- CDBG("No clock-names found\n");
+ CAM_DBG(CAM_UTIL, "No clock-names found");
count = 0;
soc_info->num_clk = count;
return 0;
@@ -216,39 +346,409 @@
for (i = 0; i < count; i++) {
rc = of_property_read_string_index(of_node, "clock-names",
i, &(soc_info->clk_name[i]));
- CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+ CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
+ i, soc_info->clk_name[i]);
if (rc) {
- pr_err("i= %d count= %d reading clock-names failed\n",
+ CAM_ERR(CAM_UTIL,
+ "i= %d count= %d reading clock-names failed",
i, count);
return rc;
}
}
- rc = of_property_read_u32_array(of_node, "clock-rates",
- soc_info->clk_rate, count);
- if (rc) {
- pr_err("reading clock-rates failed");
- return rc;
+ num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
+ if (num_clk_rates <= 0) {
+ CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
+ return -EINVAL;
}
+ if ((num_clk_rates % soc_info->num_clk) != 0) {
+ CAM_ERR(CAM_UTIL,
+ "mismatch clk/rates, No of clocks=%d, No of rates=%d",
+ soc_info->num_clk, num_clk_rates);
+ return -EINVAL;
+ }
+
+ num_clk_levels = (num_clk_rates / soc_info->num_clk);
+
+ num_clk_level_strings = of_property_count_strings(of_node,
+ "clock-cntl-level");
+ if (num_clk_level_strings != num_clk_levels) {
+ CAM_ERR(CAM_UTIL,
+ "Mismatch No of levels=%d, No of level string=%d",
+ num_clk_levels, num_clk_level_strings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk_levels; i++) {
+ rc = of_property_read_string_index(of_node,
+ "clock-cntl-level", i, &clk_cntl_lvl_string);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-cntl-level, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
+ &level);
+ if (rc)
+ return rc;
+
+ CAM_DBG(CAM_UTIL,
+ "[%d] : %s %d", i, clk_cntl_lvl_string, level);
+ soc_info->clk_level_valid[level] = true;
+ for (j = 0; j < soc_info->num_clk; j++) {
+ rc = of_property_read_u32_index(of_node, "clock-rates",
+ ((i * soc_info->num_clk) + j),
+ &soc_info->clk_rate[level][j]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-rates, rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_info->clk_rate[level][j] =
+ (soc_info->clk_rate[level][j] == 0) ?
+ (long)NO_SET_RATE :
+ soc_info->clk_rate[level][j];
+
+ CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
+ level, j,
+ soc_info->clk_rate[level][j]);
+ }
+ }
+
+ soc_info->src_clk_idx = -1;
rc = of_property_read_string_index(of_node, "src-clock-name", 0,
&src_clk_str);
- if (rc) {
- CDBG("No src_clk_str found\n");
- soc_info->src_clk_idx = -1;
+ if (rc || !src_clk_str) {
+ CAM_DBG(CAM_UTIL, "No src_clk_str found");
rc = 0;
/* Bottom loop is dependent on src_clk_str. So return here */
return rc;
}
for (i = 0; i < soc_info->num_clk; i++) {
- soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
- (long)-1 : soc_info->clk_rate[i];
- if (src_clk_str &&
- (strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+ if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
soc_info->src_clk_idx = i;
+ CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
+ src_clk_str, i);
+ break;
}
- CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+ }
+
+ return rc;
+}
+
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level)
+{
+ int i, rc = 0;
+ enum cam_vote_level apply_level;
+
+ if ((soc_info->num_clk == 0) ||
+ (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+ CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+ soc_info->num_clk);
+ return -EINVAL;
+ }
+
+ rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+ &apply_level);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < soc_info->num_clk; i++) {
+ rc = cam_soc_util_set_clk_rate(soc_info->clk[i],
+ soc_info->clk_name[i],
+ soc_info->clk_rate[apply_level][i]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+};
+
+static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+ if (!count) {
+ CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
+ return 0;
+ }
+
+ val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+ if (!val_array)
+ return -ENOMEM;
+
+ gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ rc = -ENOMEM;
+ goto free_val_array;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
+ val_array, count);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
+ rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (val_array[i] >= gpio_array_size) {
+ CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
+ val_array[i]);
+ goto free_gpio_req_tbl;
+ }
+ gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
+ val_array, count);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
+ goto free_gpio_req_tbl;
+ }
+
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "gpio-req-tbl-label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
+ goto free_gpio_req_tbl;
+ }
+ CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
+ gconf->cam_gpio_req_tbl[i].label);
+ }
+
+ kfree(val_array);
+
+ return rc;
+
+free_gpio_req_tbl:
+ kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+
+ return rc;
+}
+
+static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct cam_soc_gpio_data *gconf = NULL;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info || !soc_info->pdev)
+ return -EINVAL;
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ /* Validate input parameters */
+ if (!of_node) {
+ CAM_ERR(CAM_UTIL, "Invalid param of_node");
+ return -EINVAL;
+ }
+
+ gpio_array_size = of_gpio_count(of_node);
+
+ if (gpio_array_size <= 0)
+ return 0;
+
+ CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
+
+ gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+ if (!gpio_array)
+ goto free_gpio_conf;
+
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
+ }
+
+ gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+ if (!gconf)
+ return -ENOMEM;
+
+ rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
+ goto free_gpio_array;
+ }
+
+ gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
+ sizeof(struct gpio), GFP_KERNEL);
+ if (!gconf->cam_gpio_common_tbl) {
+ rc = -ENOMEM;
+ goto free_gpio_array;
+ }
+
+ for (i = 0; i < gpio_array_size; i++)
+ gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
+
+ gconf->cam_gpio_common_tbl_size = gpio_array_size;
+ soc_info->gpio_data = gconf;
+ kfree(gpio_array);
+
+ return rc;
+
+free_gpio_array:
+ kfree(gpio_array);
+free_gpio_conf:
+ kfree(gconf);
+ soc_info->gpio_data = NULL;
+
+ return rc;
+}
+
+static int cam_soc_util_request_gpio_table(
+ struct cam_hw_soc_info *soc_info, bool gpio_en)
+{
+ int rc = 0, i = 0;
+ uint8_t size = 0;
+ struct cam_soc_gpio_data *gpio_conf =
+ soc_info->gpio_data;
+ struct gpio *gpio_tbl = NULL;
+
+
+ if (!gpio_conf) {
+ CAM_DBG(CAM_UTIL, "No GPIO entry");
+ return 0;
+ }
+ if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+ CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
+ return -EINVAL;
+ }
+ size = gpio_conf->cam_gpio_req_tbl_size;
+ gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+
+ if (!gpio_tbl || !size) {
+ CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
+ gpio_tbl, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i++) {
+ CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ rc = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (rc) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ gpio_free_array(gpio_tbl, size);
+ }
+
+ return rc;
+}
+
+static int cam_soc_util_get_dt_regulator_info
+ (struct cam_hw_soc_info *soc_info)
+{
+ int rc = 0, count = 0, i = 0;
+ struct device_node *of_node = NULL;
+ struct platform_device *pdev = NULL;
+
+ if (!soc_info || !soc_info->pdev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameters");
+ return -EINVAL;
+ }
+
+ pdev = soc_info->pdev;
+ of_node = pdev->dev.of_node;
+
+ soc_info->num_rgltr = 0;
+ count = of_property_count_strings(of_node, "regulator-names");
+ if (count != -EINVAL) {
+ if (count <= 0) {
+ CAM_ERR(CAM_UTIL, "no regulators found");
+ count = 0;
+ return -EINVAL;
+ }
+
+ soc_info->num_rgltr = count;
+
+ } else {
+ CAM_DBG(CAM_UTIL, "No regulators node found");
+ return 0;
+ }
+
+ for (i = 0; i < soc_info->num_rgltr; i++) {
+ rc = of_property_read_string_index(of_node,
+ "regulator-names", i, &soc_info->rgltr_name[i]);
+ CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
+ i, soc_info->rgltr_name[i]);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
+ return -ENODEV;
+ }
+ }
+
+ if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
+ CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
+ soc_info->rgltr_ctrl_support = false;
+ return 0;
+ }
+
+ soc_info->rgltr_ctrl_support = true;
+
+ rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
+ soc_info->rgltr_min_volt, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
+ soc_info->rgltr_max_volt, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "rgltr-load-current",
+ soc_info->rgltr_op_mode, soc_info->num_rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
+ return -EINVAL;
}
return rc;
@@ -264,36 +764,17 @@
return -EINVAL;
pdev = soc_info->pdev;
-
of_node = pdev->dev.of_node;
rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
if (rc) {
- pr_err("device %s failed to read cell-index\n", pdev->name);
+ CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
+ pdev->name);
return rc;
}
-
- count = of_property_count_strings(of_node, "regulator-names");
- if (count <= 0) {
- pr_err("no regulators found\n");
- count = 0;
- }
- soc_info->num_rgltr = count;
-
- for (i = 0; i < soc_info->num_rgltr; i++) {
- rc = of_property_read_string_index(of_node,
- "regulator-names", i, &soc_info->rgltr_name[i]);
- CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
- if (rc) {
- pr_err("no regulator resource at cnt=%d\n", i);
- rc = -ENODEV;
- return rc;
- }
- }
-
count = of_property_count_strings(of_node, "reg-names");
if (count <= 0) {
- pr_err("no reg-names found\n");
+ CAM_ERR(CAM_UTIL, "no reg-names found");
count = 0;
}
soc_info->num_mem_block = count;
@@ -302,7 +783,7 @@
rc = of_property_read_string_index(of_node, "reg-names", i,
&soc_info->mem_block_name[i]);
if (rc) {
- pr_err("failed to read reg-names at %d\n", i);
+ CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
return rc;
}
soc_info->mem_block[i] =
@@ -310,7 +791,7 @@
soc_info->mem_block_name[i]);
if (!soc_info->mem_block[i]) {
- pr_err("no mem resource by name %s\n",
+ CAM_ERR(CAM_UTIL, "no mem resource by name %s",
soc_info->mem_block_name[i]);
rc = -ENODEV;
return rc;
@@ -321,7 +802,7 @@
rc = of_property_read_u32_array(of_node, "reg-cam-base",
soc_info->mem_block_cam_base, soc_info->num_mem_block);
if (rc) {
- pr_err("Error reading register offsets\n");
+ CAM_ERR(CAM_UTIL, "Error reading register offsets");
return rc;
}
}
@@ -329,18 +810,29 @@
rc = of_property_read_string_index(of_node, "interrupt-names", 0,
&soc_info->irq_name);
if (rc) {
- pr_warn("No interrupt line present\n");
+ CAM_WARN(CAM_UTIL, "No interrupt line present");
+ rc = 0;
} else {
soc_info->irq_line = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, soc_info->irq_name);
if (!soc_info->irq_line) {
- pr_err("no irq resource\n");
+ CAM_ERR(CAM_UTIL, "no irq resource");
rc = -ENODEV;
return rc;
}
}
+ rc = cam_soc_util_get_dt_regulator_info(soc_info);
+ if (rc)
+ return rc;
+
rc = cam_soc_util_get_dt_clk_info(soc_info);
+ if (rc)
+ return rc;
+
+ rc = cam_soc_util_get_gpio_info(soc_info);
+ if (rc)
+ return rc;
return rc;
}
@@ -364,29 +856,223 @@
if (IS_ERR_OR_NULL(*reg)) {
rc = PTR_ERR(*reg);
rc = rc ? rc : -EINVAL;
- pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+ CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
*reg = NULL;
}
return rc;
}
-int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name, uint32_t rgltr_min_volt,
+ uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
+ uint32_t rgltr_delay_ms)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+ return -EINVAL;
+ }
+
+ rc = regulator_disable(rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay_ms > 20)
+ msleep(rgltr_delay_ms);
+ else if (rgltr_delay_ms)
+ usleep_range(rgltr_delay_ms * 1000,
+ (rgltr_delay_ms * 1000) + 1000);
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ regulator_set_load(rgltr, 0);
+ regulator_set_voltage(rgltr, 0, rgltr_max_volt);
+ }
+
+ return rc;
+}
+
+
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay)
+{
+ int32_t rc = 0;
+
+ if (!rgltr) {
+ CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+ return -EINVAL;
+ }
+
+ if (regulator_count_voltages(rgltr) > 0) {
+ CAM_DBG(CAM_UTIL, "voltage min=%d, max=%d",
+ rgltr_min_volt, rgltr_max_volt);
+
+ rc = regulator_set_voltage(
+ rgltr, rgltr_min_volt, rgltr_max_volt);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
+ return rc;
+ }
+
+ rc = regulator_set_load(rgltr, rgltr_op_mode);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
+ rgltr_name);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(rgltr);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
+ return rc;
+ }
+
+ if (rgltr_delay > 20)
+ msleep(rgltr_delay);
+ else if (rgltr_delay)
+ usleep_range(rgltr_delay * 1000,
+ (rgltr_delay * 1000) + 1000);
+
+ return rc;
+}
+
+static int cam_soc_util_request_pinctrl(
+ struct cam_hw_soc_info *soc_info) {
+
+ struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
+ struct device *dev = &soc_info->pdev->dev;
+
+ device_pctrl->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
+ CAM_DBG(CAM_UTIL, "Pinctrl not available");
+ device_pctrl->pinctrl = NULL;
+ return 0;
+ }
+ device_pctrl->gpio_state_active =
+ pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
+ CAM_ERR(CAM_UTIL,
+ "Failed to get the active state pinctrl handle");
+ device_pctrl->gpio_state_active = NULL;
+ return -EINVAL;
+ }
+ device_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(device_pctrl->pinctrl,
+ CAM_SOC_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
+ CAM_ERR(CAM_UTIL,
+ "Failed to get the suspend state pinctrl handle");
+ device_pctrl->gpio_state_suspend = NULL;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cam_soc_util_regulator_disable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = num_rgltr-1; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+}
+
+static int cam_soc_util_regulator_enable_default(
+ struct cam_hw_soc_info *soc_info)
+{
+ int j = 0, rc = 0;
+ uint32_t num_rgltr = soc_info->num_rgltr;
+
+ for (j = 0; j < num_rgltr; j++) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ rc = regulator_enable(soc_info->rgltr[j]);
+ }
+
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "%s enable failed",
+ soc_info->rgltr_name[j]);
+ goto disable_rgltr;
+ }
+ }
+
+ return rc;
+disable_rgltr:
+
+ for (j--; j >= 0; j--) {
+ if (soc_info->rgltr_ctrl_support == true) {
+ cam_soc_util_regulator_disable(soc_info->rgltr[j],
+ soc_info->rgltr_name[j],
+ soc_info->rgltr_min_volt[j],
+ soc_info->rgltr_max_volt[j],
+ soc_info->rgltr_op_mode[j],
+ soc_info->rgltr_delay[j]);
+ } else {
+ if (soc_info->rgltr[j])
+ regulator_disable(soc_info->rgltr[j]);
+ }
+ }
+
+ return rc;
+}
+
+int cam_soc_util_request_platform_resource(
+ struct cam_hw_soc_info *soc_info,
irq_handler_t handler, void *irq_data)
{
int i = 0, rc = 0;
struct platform_device *pdev = NULL;
- if (!soc_info || !soc_info->pdev)
+
+ if (!soc_info || !soc_info->pdev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameters");
return -EINVAL;
+ }
pdev = soc_info->pdev;
for (i = 0; i < soc_info->num_mem_block; i++) {
+ if (soc_info->reserve_mem) {
+ if (!request_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]),
+ soc_info->mem_block_name[i])){
+ CAM_ERR(CAM_UTIL,
+ "Error Mem Region request Failed:%s",
+ soc_info->mem_block_name[i]);
+ rc = -ENOMEM;
+ goto unmap_base;
+ }
+ }
soc_info->reg_map[i].mem_base = ioremap(
soc_info->mem_block[i]->start,
resource_size(soc_info->mem_block[i]));
if (!soc_info->reg_map[i].mem_base) {
- pr_err("i= %d base NULL\n", i);
+ CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
rc = -ENOMEM;
goto unmap_base;
}
@@ -398,6 +1084,11 @@
}
for (i = 0; i < soc_info->num_rgltr; i++) {
+ if (soc_info->rgltr_name[i] == NULL) {
+ CAM_ERR(CAM_UTIL, "can't find regulator name");
+ goto put_regulator;
+ }
+
rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
soc_info->rgltr_name[i]);
if (rc)
@@ -408,8 +1099,8 @@
rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
handler, IRQF_TRIGGER_RISING,
soc_info->irq_name, irq_data);
- if (rc < 0) {
- pr_err("irq request fail\n");
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "irq request fail");
rc = -EBUSY;
goto put_regulator;
}
@@ -422,12 +1113,23 @@
soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
soc_info->clk_name[i]);
if (!soc_info->clk[i]) {
- pr_err("get failed for %s\n", soc_info->clk_name[i]);
+ CAM_ERR(CAM_UTIL, "get failed for %s",
+ soc_info->clk_name[i]);
rc = -ENOENT;
goto put_clk;
}
}
+ rc = cam_soc_util_request_pinctrl(soc_info);
+ if (rc)
+ CAM_DBG(CAM_UTIL, "Failed in request pinctrl, rc=%d", rc);
+
+ rc = cam_soc_util_request_gpio_table(soc_info, true);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
+ goto put_clk;
+ }
+
return rc;
put_clk:
@@ -461,6 +1163,9 @@
if (i == -1)
i = soc_info->num_reg_map;
for (i = i - 1; i >= 0; i--) {
+ if (soc_info->reserve_mem)
+ release_mem_region(soc_info->mem_block[i]->start,
+ resource_size(soc_info->mem_block[i]));
iounmap(soc_info->reg_map[i].mem_base);
soc_info->reg_map[i].mem_base = NULL;
soc_info->reg_map[i].size = 0;
@@ -474,8 +1179,11 @@
int i;
struct platform_device *pdev = NULL;
- if (!soc_info || !soc_info->pdev)
+ if (!soc_info || !soc_info->pdev) {
+ CAM_ERR(CAM_UTIL, "Invalid parameter");
return -EINVAL;
+ }
+
pdev = soc_info->pdev;
@@ -503,28 +1211,32 @@
soc_info->irq_line->start, soc_info->irq_data);
}
+ if (soc_info->pinctrl_info.pinctrl)
+ devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
+
+
+ /* release for gpio */
+ cam_soc_util_request_gpio_table(soc_info, false);
+
return 0;
}
int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
- bool enable_clocks, bool enable_irq)
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq)
{
- int i, rc = 0;
+ int rc = 0;
if (!soc_info)
return -EINVAL;
- for (i = 0; i < soc_info->num_rgltr; i++) {
- rc = regulator_enable(soc_info->rgltr[i]);
- if (rc) {
- pr_err("Regulator enable %s failed\n",
- soc_info->rgltr_name[i]);
- goto disable_regulator;
- }
+ rc = cam_soc_util_regulator_enable_default(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_UTIL, "Regulators enable failed");
+ return rc;
}
if (enable_clocks) {
- rc = cam_soc_util_clk_enable_default(soc_info);
+ rc = cam_soc_util_clk_enable_default(soc_info, clk_level);
if (rc)
goto disable_regulator;
}
@@ -535,19 +1247,28 @@
goto disable_clk;
}
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_active) {
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_active);
+
+ if (rc)
+ goto disable_irq;
+ }
+
return rc;
+disable_irq:
+ if (enable_irq)
+ cam_soc_util_irq_disable(soc_info);
+
disable_clk:
if (enable_clocks)
cam_soc_util_clk_disable_default(soc_info);
disable_regulator:
- if (i == -1)
- i = soc_info->num_rgltr;
- for (i = i - 1; i >= 0; i--) {
- if (soc_info->rgltr[i])
- regulator_disable(soc_info->rgltr[i]);
- }
+ cam_soc_util_regulator_disable_default(soc_info);
+
return rc;
}
@@ -555,7 +1276,7 @@
int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
bool disable_clocks, bool disble_irq)
{
- int i, rc = 0;
+ int rc = 0;
if (!soc_info)
return -EINVAL;
@@ -563,18 +1284,16 @@
if (disable_clocks)
cam_soc_util_clk_disable_default(soc_info);
- for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
- rc |= regulator_disable(soc_info->rgltr[i]);
- if (rc) {
- pr_err("Regulator disble %s failed\n",
- soc_info->rgltr_name[i]);
- continue;
- }
- }
+ cam_soc_util_regulator_disable_default(soc_info);
if (disble_irq)
rc |= cam_soc_util_irq_disable(soc_info);
+ if (soc_info->pinctrl_info.pinctrl &&
+ soc_info->pinctrl_info.gpio_state_suspend)
+ rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+ soc_info->pinctrl_info.gpio_state_suspend);
+
return rc;
}
@@ -583,7 +1302,7 @@
{
void __iomem *base_addr = NULL;
- CDBG("base_idx %u size=%d\n", base_index, size);
+ CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
if (!soc_info || base_index >= soc_info->num_reg_map ||
size <= 0 || (offset + size) >=
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index e556bba..ae92cab 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -16,8 +16,10 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/clk/qcom.h>
#include "cam_io_util.h"
@@ -37,6 +39,33 @@
#define CAM_SOC_MAX_CLK 32
/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_MINSVS_VOTE : Min SVS vote
+ * @CAM_LOWSVS_VOTE : Low SVS vote
+ * @CAM_SVS_VOTE : SVS vote
+ * @CAM_SVSL1_VOTE : SVS Plus vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_TURBO_VOTE : Turbo vote
+ * @CAM_MAX_VOTE : Max voting level, This is invalid level.
+ */
+enum cam_vote_level {
+ CAM_SUSPEND_VOTE,
+ CAM_MINSVS_VOTE,
+ CAM_LOWSVS_VOTE,
+ CAM_SVS_VOTE,
+ CAM_SVSL1_VOTE,
+ CAM_NOMINAL_VOTE,
+ CAM_TURBO_VOTE,
+ CAM_MAX_VOTE,
+};
+
+/* pinctrl states */
+#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default"
+
+/**
* struct cam_soc_reg_map: Information about the mapped register space
*
* @mem_base: Starting location of MAPPED register space
@@ -51,6 +80,35 @@
};
/**
+ * struct cam_soc_pinctrl_info: Information about pinctrl data
+ *
+ * @pinctrl: pintrl object
+ * @gpio_state_active: default pinctrl state
+ * @gpio_state_suspend suspend state of pinctrl
+ **/
+struct cam_soc_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+};
+
+/**
+ * struct cam_soc_gpio_data: Information about the gpio pins
+ *
+ * @cam_gpio_common_tbl: It is list of al the gpios present in gpios node
+ * @cam_gpio_common_tbl_size: It is equal to number of gpios prsent in
+ * gpios node in DTSI
+ * @cam_gpio_req_tbl It is list of al the requesetd gpios
+ * @cam_gpio_req_tbl_size: It is size of requested gpios
+ **/
+struct cam_soc_gpio_data {
+ struct gpio *cam_gpio_common_tbl;
+ uint8_t cam_gpio_common_tbl_size;
+ struct gpio *cam_gpio_req_tbl;
+ uint8_t cam_gpio_req_tbl_size;
+};
+
+/**
* struct cam_hw_soc_info: Soc information pertaining to specific instance of
* Camera hardware driver module
*
@@ -69,14 +127,25 @@
* @num_reg_map: Number of mapped register space associated
* with mem_block. num_reg_map = num_mem_block in
* most cases
+ * @reserve_mem: Whether to reserve memory for Mem blocks
* @num_rgltr: Number of regulators
* @rgltr_name: Array of regulator names
+ * @rgltr_ctrl_support: Whether regulator control is supported
+ * @rgltr_min_volt: Array of minimum regulator voltage
+ * @rgltr_max_volt: Array of maximum regulator voltage
+ * @rgltr_op_mode: Array of regulator operation mode
+ * @rgltr_type: Array of regulator names
* @rgltr: Array of associated regulator resources
+ * @rgltr_delay: Array of regulator delay values
* @num_clk: Number of clocks
* @clk_name: Array of clock names
* @clk: Array of associated clock resources
- * @clk_rate: Array of default clock rates
+ * @clk_rate: 2D array of clock rates representing clock rate
+ * values at different vote levels
* @src_clk_idx: Source clock index that is rate-controllable
+ * @clk_level_valid: Indicates whether corresponding level is valid
+ * @gpio_data: Pointer to gpio info
+ * @pinctrl_info: Pointer to pinctrl info
* @soc_private: Soc private data
*
*/
@@ -84,7 +153,6 @@
struct platform_device *pdev;
uint32_t hw_version;
uint32_t index;
-
const char *irq_name;
struct resource *irq_line;
void *irq_data;
@@ -95,16 +163,27 @@
struct resource *mem_block[CAM_SOC_MAX_BLOCK];
struct cam_soc_reg_map reg_map[CAM_SOC_MAX_BASE];
uint32_t num_reg_map;
+ uint32_t reserve_mem;
uint32_t num_rgltr;
const char *rgltr_name[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_ctrl_support;
+ uint32_t rgltr_min_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_max_volt[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_op_mode[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_type[CAM_SOC_MAX_REGULATOR];
struct regulator *rgltr[CAM_SOC_MAX_REGULATOR];
+ uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR];
uint32_t num_clk;
const char *clk_name[CAM_SOC_MAX_CLK];
struct clk *clk[CAM_SOC_MAX_CLK];
- int32_t clk_rate[CAM_SOC_MAX_CLK];
+ int32_t clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK];
int32_t src_clk_idx;
+ bool clk_level_valid[CAM_MAX_VOTE];
+
+ struct cam_soc_gpio_data *gpio_data;
+ struct cam_soc_pinctrl_info pinctrl_info;
void *soc_private;
};
@@ -159,6 +238,18 @@
((!__soc_info || __base_index >= __soc_info->num_reg_map) ? \
0 : __soc_info->reg_map[__base_index].size)
+/**
+ * cam_soc_util_get_level_from_string()
+ *
+ * @brief: Get the associated vote level for the input string
+ *
+ * @string: Input string to compare with.
+ * @level: Vote level corresponds to input string.
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_get_level_from_string(const char *string,
+ enum cam_vote_level *level);
/**
* cam_soc_util_get_dt_properties()
@@ -208,6 +299,9 @@
* TRUE: Enable all clocks in soc_info Now.
* False: Don't enable clocks Now. Driver will
* enable independently.
+ * @clk_level: Clock level to be applied.
+ * Applicable only if enable_clocks is true
+ * Valid range : 0 to (CAM_MAX_VOTE - 1)
* @enable_irq: Boolean flag:
* TRUE: Enable IRQ in soc_info Now.
* False: Don't enable IRQ Now. Driver will
@@ -216,7 +310,7 @@
* @return: Success or failure
*/
int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
- bool enable_clocks, bool enable_irq);
+ bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq);
/**
* cam_soc_util_disable_platform_resource()
@@ -235,6 +329,49 @@
bool disable_clocks, bool disable_irq);
/**
+ * cam_soc_util_get_clk_round_rate()
+ *
+ * @brief: Get the rounded clock rate for the given clock's
+ * clock rate value
+ *
+ * @soc_info: Device soc information
+ * @clk_index: Clock index in soc_info for which round rate is needed
+ * @clk_rate: Input clock rate for which rounded rate is needed
+ *
+ * @return: Rounded clock rate
+ */
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long clk_rate);
+
+/**
+ * cam_soc_util_set_clk_flags()
+ *
+ * @brief: Camera SOC util to set the flags for a specified clock
+ *
+ * @soc_info: Device soc information
+ * @clk_index: Clock index in soc_info for which flags are to be set
+ * @flags: Flags to set
+ *
+ * @return: Success or Failure
+ */
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+ uint32_t clk_index, unsigned long flags);
+
+/**
+ * cam_soc_util_set_clk_rate()
+ *
+ * @brief: Set the rate on a given clock.
+ *
+ * @clk: Clock that needs to be set
+ * @clk_name: Clocks name associated with clk
+ * @clk_rate: Clocks rate associated with clk
+ *
+ * @return: success or failure
+ */
+int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
+ int32_t clk_rate);
+
+/**
* cam_soc_util_clk_enable()
*
* @brief: Enable clock specified in params
@@ -249,6 +386,21 @@
int32_t clk_rate);
/**
+ * cam_soc_util_set_clk_rate_level()
+ *
+ * @brief: Apply clock rates for the requested level.
+ * This applies the new requested level for all
+ * the clocks listed in DT based on their values.
+ *
+ * @soc_info: Device soc information
+ * @clk_level: Clock level number to set
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+ enum cam_vote_level clk_level);
+
+/**
* cam_soc_util_clk_disable()
*
* @brief: Disable clock specified in params
@@ -283,6 +435,45 @@
int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Enable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief: Disable single regulator
+ *
+ * @rgltr Regulator that needs to be turned ON
+ * @rgltr_name Associated Regulator name
+ * @rgltr_min_volt: Requested minimum volatage
+ * @rgltr_max_volt: Requested maximum volatage
+ * @rgltr_op_mode: Requested Load
+ * @rgltr_delay: Requested delay needed aaftre enabling regulator
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+ const char *rgltr_name,
+ uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+ uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+
+/**
* cam_soc_util_w()
*
* @brief: Camera SOC util for register write
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.c b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
new file mode 100644
index 0000000..08129f3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
@@ -0,0 +1,16 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "cam_trace.h"
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
new file mode 100644
index 0000000..2e9e61f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -0,0 +1,223 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CAM_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM camera
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cam_trace
+
+#include <linux/tracepoint.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_context.h"
+
+TRACE_EVENT(cam_context_state,
+ TP_PROTO(const char *name, struct cam_context *ctx),
+ TP_ARGS(name, ctx),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __string(name, name)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __assign_str(name, name);
+ ),
+ TP_printk(
+ "%s: State ctx=%p ctx_state=%u",
+ __get_str(name), __entry->ctx, __entry->state
+ )
+);
+
+TRACE_EVENT(cam_isp_activated_irq,
+ TP_PROTO(struct cam_context *ctx, unsigned int substate,
+ unsigned int event, uint64_t timestamp),
+ TP_ARGS(ctx, substate, event, timestamp),
+ TP_STRUCT__entry(
+ __field(void*, ctx)
+ __field(uint32_t, state)
+ __field(uint32_t, substate)
+ __field(uint32_t, event)
+ __field(uint64_t, ts)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->state = ctx->state;
+ __entry->substate = substate;
+ __entry->event = event;
+ __entry->ts = timestamp;
+ ),
+ TP_printk(
+ "ISP: IRQ ctx=%p ctx_state=%u substate=%u event=%u ts=%llu",
+ __entry->ctx, __entry->state, __entry->substate,
+ __entry->event, __entry->ts
+ )
+);
+
+TRACE_EVENT(cam_buf_done,
+ TP_PROTO(const char *ctx_type, struct cam_context *ctx,
+ struct cam_ctx_request *req),
+ TP_ARGS(ctx_type, ctx, req),
+ TP_STRUCT__entry(
+ __string(ctx_type, ctx_type)
+ __field(void*, ctx)
+ __field(uint64_t, request)
+ ),
+ TP_fast_assign(
+ __assign_str(ctx_type, ctx_type);
+ __entry->ctx = ctx;
+ __entry->request = req->request_id;
+ ),
+ TP_printk(
+ "%5s: BufDone ctx=%p request=%llu",
+ __get_str(ctx_type), __entry->ctx, __entry->request
+ )
+);
+
+TRACE_EVENT(cam_apply_req,
+ TP_PROTO(const char *entity, struct cam_req_mgr_apply_request *req),
+ TP_ARGS(entity, req),
+ TP_STRUCT__entry(
+ __string(entity, entity)
+ __field(uint64_t, req_id)
+ ),
+ TP_fast_assign(
+ __assign_str(entity, entity);
+ __entry->req_id = req->request_id;
+ ),
+ TP_printk(
+ "%8s: ApplyRequest request=%llu",
+ __get_str(entity), __entry->req_id
+ )
+);
+
+TRACE_EVENT(cam_flush_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_flush_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __field(uint32_t, type)
+ __field(int64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __entry->type = info->flush_type;
+ __entry->req_id = info->req_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "FlushRequest type=%u request=%llu link=%pK session=%pK",
+ __entry->type, __entry->req_id, __entry->link,
+ __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_connect_device,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_device_info *info),
+ TP_ARGS(link, info),
+ TP_STRUCT__entry(
+ __string(name, info->name)
+ __field(uint32_t, id)
+ __field(uint32_t, delay)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, info->name);
+ __entry->id = info->dev_id;
+ __entry->delay = info->p_delay;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK",
+ __get_str(name), __entry->id, __entry->delay,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_apply_request,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ struct cam_req_mgr_apply_request *req,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, req, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = req->request_id;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->link, __entry->session
+ )
+);
+
+TRACE_EVENT(cam_req_mgr_add_req,
+ TP_PROTO(struct cam_req_mgr_core_link *link,
+ int idx, struct cam_req_mgr_add_request *add_req,
+ struct cam_req_mgr_req_tbl *tbl,
+ struct cam_req_mgr_connected_device *dev),
+ TP_ARGS(link, idx, add_req, tbl, dev),
+ TP_STRUCT__entry(
+ __string(name, dev->dev_info.name)
+ __field(uint32_t, dev_id)
+ __field(uint64_t, req_id)
+ __field(uint32_t, slot_id)
+ __field(uint32_t, delay)
+ __field(uint32_t, readymap)
+ __field(uint32_t, devicemap)
+ __field(void*, link)
+ __field(void*, session)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev->dev_info.name);
+ __entry->dev_id = dev->dev_info.dev_id;
+ __entry->req_id = add_req->req_id;
+ __entry->slot_id = idx;
+ __entry->delay = tbl->pd;
+ __entry->readymap = tbl->slot[idx].req_ready_map;
+ __entry->devicemap = tbl->dev_mask;
+ __entry->link = link;
+ __entry->session = link->parent;
+ ),
+ TP_printk(
+ "ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pk session=%pK",
+ __get_str(name), __entry->dev_id, __entry->req_id,
+ __entry->slot_id, __entry->delay, __entry->readymap,
+ __entry->devicemap, __entry->link, __entry->session
+ )
+);
+#endif /* _CAM_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/camera/icp/Makefile b/drivers/media/platform/msm/camera/icp/Makefile
deleted file mode 100644
index c42b162..0000000
--- a/drivers/media/platform/msm/camera/icp/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-
-obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
deleted file mode 100644
index a4df0b8..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/a5_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/a5_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
deleted file mode 100644
index 6aeb5f1..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/bps_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/bps_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
deleted file mode 100644
index 8af20ae..0000000
--- a/drivers/media/platform/msm/camera/icp/icp_hw/ipe_hw/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
-ccflags-y += -Idrivers/media/platform/msm/camera/icp
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/include
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/icp_hw/ipe_hw
-ccflags-y += -Idrivers/media/platform/msm/camera/icp/fw_inc
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
-
-obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/dvb/Kconfig b/drivers/media/platform/msm/dvb/Kconfig
new file mode 100644
index 0000000..e205c81
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Kconfig
@@ -0,0 +1,10 @@
+config DVB_MPQ
+ tristate "Qualcomm Technologies Inc Multimedia Processor DVB Adapter"
+ depends on ARCH_QCOM && DVB_CORE
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc MPQ based DVB adapter.
+ Say Y or M if you own such a device and want to use it.
+
+source "drivers/media/platform/msm/dvb/demux/Kconfig"
diff --git a/drivers/media/platform/msm/dvb/Makefile b/drivers/media/platform/msm/dvb/Makefile
new file mode 100644
index 0000000..862ebca
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DVB_MPQ) += adapter/
+obj-$(CONFIG_DVB_MPQ_DEMUX) += demux/
diff --git a/drivers/media/platform/msm/dvb/adapter/Makefile b/drivers/media/platform/msm/dvb/adapter/Makefile
new file mode 100644
index 0000000..662bf99
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/media/platform/msm/dvb/demux/
+
+obj-$(CONFIG_DVB_MPQ) += mpq-adapter.o
+
+mpq-adapter-y := mpq_adapter.o mpq_stream_buffer.o
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
new file mode 100644
index 0000000..1ccb98f
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "mpq_adapter.h"
+#include "mpq_dvb_debug.h"
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* data-structure holding MPQ adapter information */
+static struct
+{
+ /* MPQ adapter registered to dvb-core */
+ struct dvb_adapter adapter;
+
+ /* mutex protect against the data-structure */
+ struct mutex mutex;
+
+ /* List of stream interfaces registered to the MPQ adapter */
+ struct {
+ /* pointer to the stream buffer using for data tunneling */
+ struct mpq_streambuffer *stream_buffer;
+
+ /* callback triggered when the stream interface is registered */
+ mpq_adapter_stream_if_callback callback;
+
+ /* parameter passed to the callback function */
+ void *user_param;
+ } interfaces[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+} mpq_info;
+
+
+/**
+ * Initialize MPQ DVB adapter module.
+ *
+ * Return error status
+ */
+static int __init mpq_adapter_init(void)
+{
+ int i;
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mutex_init(&mpq_info.mutex);
+
+ /* reset stream interfaces list */
+ for (i = 0; i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; i++) {
+ mpq_info.interfaces[i].stream_buffer = NULL;
+ mpq_info.interfaces[i].callback = NULL;
+ }
+
+ /* regsiter a new dvb-adapter to dvb-core */
+ result = dvb_register_adapter(&mpq_info.adapter,
+ "Qualcomm technologies, inc. DVB adapter",
+ THIS_MODULE, NULL, adapter_nr);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dvb_register_adapter failed, errno %d\n",
+ __func__,
+ result);
+ }
+
+ return result;
+}
+
+
+/**
+ * Cleanup MPQ DVB adapter module.
+ */
+static void __exit mpq_adapter_exit(void)
+{
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ /* un-regsiter adapter from dvb-core */
+ dvb_unregister_adapter(&mpq_info.adapter);
+ mutex_destroy(&mpq_info.mutex);
+}
+
+struct dvb_adapter *mpq_adapter_get(void)
+{
+ return &mpq_info.adapter;
+}
+EXPORT_SYMBOL(mpq_adapter_get);
+
+
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) {
+ ret = -EINVAL;
+ goto register_failed;
+ }
+
+ if (mutex_lock_interruptible(&mpq_info.mutex)) {
+ ret = -ERESTARTSYS;
+ goto register_failed;
+ }
+
+ if (mpq_info.interfaces[interface_id].stream_buffer != NULL) {
+ /* already registered interface */
+ ret = -EINVAL;
+ goto register_failed_unlock_mutex;
+ }
+
+ mpq_info.interfaces[interface_id].stream_buffer = stream_buffer;
+ mutex_unlock(&mpq_info.mutex);
+
+ /*
+ * If callback is installed, trigger it to notify that
+ * stream interface was registered.
+ */
+ if (mpq_info.interfaces[interface_id].callback != NULL) {
+ mpq_info.interfaces[interface_id].callback(
+ interface_id,
+ mpq_info.interfaces[interface_id].user_param);
+ }
+
+ return 0;
+
+register_failed_unlock_mutex:
+ mutex_unlock(&mpq_info.mutex);
+register_failed:
+ return ret;
+}
+EXPORT_SYMBOL(mpq_adapter_register_stream_if);
+
+
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ /* clear the registered interface */
+ mpq_info.interfaces[interface_id].stream_buffer = NULL;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_unregister_stream_if);
+
+
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer)
+{
+ if ((interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) ||
+ (stream_buffer == NULL))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ *stream_buffer = mpq_info.interfaces[interface_id].stream_buffer;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_get_stream_if);
+
+
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ mpq_info.interfaces[interface_id].callback = callback;
+ mpq_info.interfaces[interface_id].user_param = user_param;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_notify_stream_if);
+
+
+module_init(mpq_adapter_init);
+module_exit(mpq_adapter_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. MPQ adapter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
new file mode 100644
index 0000000..4f84c58
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
@@ -0,0 +1,827 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_stream_buffer.h"
+
+
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size)
+{
+ if ((sbuff == NULL) || (data_buffers == NULL) ||
+ (packet_buff == NULL) || (data_buff_num == 0))
+ return -EINVAL;
+
+ if (data_buff_num > 1) {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return -EINVAL;
+ /* Linear buffer group */
+ dvb_ringbuffer_init(
+ &sbuff->raw_data,
+ data_buffers,
+ data_buff_num *
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ } else {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_RING)
+ return -EINVAL;
+ /* Single ring-buffer */
+ dvb_ringbuffer_init(&sbuff->raw_data,
+ data_buffers[0].base, data_buffers[0].size);
+ }
+ sbuff->mode = mode;
+ sbuff->buffers = data_buffers;
+ sbuff->pending_buffers_count = 0;
+ sbuff->buffers_num = data_buff_num;
+ sbuff->cb = NULL;
+ dvb_ringbuffer_init(&sbuff->packet_data, packet_buff, packet_buff_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_init);
+
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff)
+{
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+ sbuff->packet_data.error = -ENODEV;
+ sbuff->raw_data.error = -ENODEV;
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ wake_up_all(&sbuff->packet_data.queue);
+}
+EXPORT_SYMBOL(mpq_streambuffer_terminate);
+
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen)
+{
+ ssize_t packet_idx;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ packet_idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, idx, pktlen);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return packet_idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_next);
+
+
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ size_t ret;
+ size_t read_len;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data, idx, 0,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* verify length, at least packet header should exist */
+ if (ret != sizeof(struct mpq_streambuffer_packet_header)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -EINVAL;
+ }
+
+ read_len = ret;
+
+ /* read-out private user-data if there are such */
+ if ((packet->user_data_len) && (user_data != NULL)) {
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data,
+ idx,
+ sizeof(struct mpq_streambuffer_packet_header),
+ user_data,
+ packet->user_data_len);
+
+ if (ret < 0) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+ }
+
+ read_len += ret;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return read_len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_read);
+
+
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data)
+{
+ int ret;
+ struct mpq_streambuffer_packet_header packet;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(&sbuff->packet_data, idx,
+ 0,
+ (u8 *)&packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ if (ret != sizeof(struct mpq_streambuffer_packet_header))
+ return -EINVAL;
+
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) ||
+ (dispose_data)) {
+ /* Advance the read pointer in the raw-data buffer first */
+ ret = mpq_streambuffer_data_read_dispose(sbuff,
+ packet.raw_data_len);
+ if (ret != 0)
+ return ret;
+ }
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if ((sbuff->packet_data.error == -ENODEV) ||
+ (sbuff->raw_data.error == -ENODEV)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Move read pointer to the next linear buffer for subsequent reads */
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) &&
+ (packet.raw_data_len > 0)) {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+
+ wake_up_all(&sbuff->raw_data.queue);
+ }
+
+ /* Now clear the packet from the packet header */
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_dispose);
+
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ ssize_t idx;
+ size_t len;
+
+ if ((sbuff == NULL) || (packet == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Make sure we can go to the next linear buffer */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ sbuff->pending_buffers_count == sbuff->buffers_num &&
+ packet->raw_data_len) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ len = sizeof(struct mpq_streambuffer_packet_header) +
+ packet->user_data_len;
+
+ /* Make sure enough space available for packet header */
+ if (dvb_ringbuffer_free(&sbuff->packet_data) <
+ (len + DVB_RINGBUFFER_PKTHDRSIZE)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ /* Starting writing packet header */
+ idx = dvb_ringbuffer_pkt_start(&sbuff->packet_data, len);
+
+ /* Write non-user private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* Write user's own private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ user_data,
+ packet->user_data_len);
+
+ dvb_ringbuffer_pkt_close(&sbuff->packet_data, idx);
+
+ /* Move write pointer to next linear buffer for subsequent writes */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ packet->raw_data_len) {
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count++;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+ wake_up_all(&sbuff->packet_data.queue);
+
+ return idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_write);
+
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len)
+{
+ int res;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+ res = dvb_ringbuffer_write(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: No space available! %d pending buffers out of %d total buffers. write_ptr=%d, size=%d\n",
+ __func__,
+ sbuff->pending_buffers_count,
+ sbuff->buffers_num,
+ desc->write_ptr,
+ desc->size);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ memcpy(desc->base + desc->write_ptr, buf, len);
+ desc->write_ptr += len;
+ res = len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return res;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write);
+
+
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc =
+ (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: No space available!\n",
+ __func__);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ desc->write_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write_deposit);
+
+
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ memcpy(buf, desc->base + desc->read_ptr, len);
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read);
+
+
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV)
+ return -ENODEV;
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL)
+ return -EPERM;
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read_user(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL)
+ return -EPERM;
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ if (copy_to_user(buf, desc->base + desc->read_ptr, len))
+ return -EFAULT;
+
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_user);
+
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ u32 offset;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_avail(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EINVAL;
+ }
+
+ offset = sbuff->raw_data.pread;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ offset = desc->read_ptr;
+
+ if ((desc->read_ptr + len) > desc->size)
+ desc->read_ptr = desc->size;
+ else
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_dispose);
+
+
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle)
+{
+ struct mpq_streambuffer_buffer_desc *desc = NULL;
+
+ if ((sbuff == NULL) || (handle == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ *handle = sbuff->buffers[0].handle;
+ } else {
+ if (read_buffer)
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ else
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *handle = desc->handle;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_buffer_handle);
+
+
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data)
+{
+ if ((sbuff == NULL) || (cb_func == NULL))
+ return -EINVAL;
+
+ sbuff->cb = cb_func;
+ sbuff->cb_user_data = user_data;
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_register_data_dispose);
+
+
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return dvb_ringbuffer_free(&sbuff->raw_data);
+ }
+
+ if (sbuff->pending_buffers_count == sbuff->buffers_num) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->size - desc->write_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_free);
+
+
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ ssize_t avail = dvb_ringbuffer_avail(&sbuff->raw_data);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return avail;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->write_ptr - desc->read_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_avail);
+
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (read_offset)
+ *read_offset = sbuff->raw_data.pread;
+ if (write_offset)
+ *write_offset = sbuff->raw_data.pwrite;
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (read_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ *read_offset = desc->read_ptr;
+ }
+ if (write_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *write_offset = desc->write_ptr;
+ }
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset);
+
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff)
+{
+ ssize_t free;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ free = dvb_ringbuffer_free(&sbuff->packet_data);
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return free;
+}
+EXPORT_SYMBOL(mpq_streambuffer_metadata_free);
+
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+ size_t len;
+ int idx;
+ int ret = 0;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* Check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV ||
+ sbuff->raw_data.error == -ENODEV) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ while (sbuff->pending_buffers_count) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+ }
+ else
+ dvb_ringbuffer_flush(&sbuff->raw_data);
+
+ /*
+ * Dispose all packets (simply flushing is not enough since we want
+ * the packets' status to move to disposed).
+ */
+ do {
+ idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, -1, &len);
+ if (idx >= 0)
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+ } while (idx >= 0);
+
+end:
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+}
+EXPORT_SYMBOL(mpq_streambuffer_flush);
diff --git a/drivers/media/platform/msm/dvb/demux/Kconfig b/drivers/media/platform/msm/dvb/demux/Kconfig
new file mode 100644
index 0000000..b928212
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Kconfig
@@ -0,0 +1,47 @@
+menuconfig DVB_MPQ_DEMUX
+ tristate "DVB Demux Device"
+ depends on DVB_MPQ && ION && ION_MSM
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc based dvb demux device.
+ Say Y if you own such a device and want to use it.
+ The Demux device is used to stream playback either
+ from TSIF interface or from DVR interface.
+
+config DVB_MPQ_NUM_DMX_DEVICES
+ int "Number of demux devices"
+ depends on DVB_MPQ_DEMUX
+ default 4
+ range 1 255
+
+ help
+ Configure number of demux devices.
+ Depends on your use-cases for maximum concurrent stream playback.
+
+config DVB_MPQ_MEDIA_BOX_DEMUX
+ bool "Media box demux support"
+ depends on DVB_MPQ_DEMUX
+ default n
+ help
+ Use this option if your HW is Qualcomm Technologies Inc
+ media box and demux support is required on that media box.
+ Currently this config is being used for demux video events
+ optimization.
+
+config DVB_MPQ_TSPP1
+ bool "TSPPv1 plugin"
+ depends on DVB_MPQ_DEMUX && TSPP
+ help
+ Use this option if your HW has
+ Transport Stream Packet Processor(TSPP) version1 support.
+ Demux may take adavantage of HW capabilities to perform
+ some tasks in HW instead of SW.
+
+config DVB_MPQ_SW
+ bool "Software plugin"
+ depends on DVB_MPQ_DEMUX && !DVB_MPQ_TSPP1
+ help
+ Use this option if your HW does not have any
+ TSPP hardware support. All demux tasks will be
+ performed in SW.
diff --git a/drivers/media/platform/msm/dvb/demux/Makefile b/drivers/media/platform/msm/dvb/demux/Makefile
new file mode 100644
index 0000000..c08fa85
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Makefile
@@ -0,0 +1,14 @@
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/misc/
+
+obj-$(CONFIG_DVB_MPQ_DEMUX) += mpq-dmx-hw-plugin.o
+
+mpq-dmx-hw-plugin-y := mpq_dmx_plugin_common.o
+
+mpq-dmx-hw-plugin-$(CONFIG_QSEECOM) += mpq_sdmx.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_TSPP1) += mpq_dmx_plugin_tspp_v1.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_SW) += mpq_dmx_plugin_sw.o
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
new file mode 100644
index 0000000..f16c1ba
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -0,0 +1,6712 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/scatterlist.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+#include "mpq_sdmx.h"
+
+#define SDMX_MAJOR_VERSION_MATCH (8)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX 188
+
+#define MAX_PES_LENGTH (SZ_64K)
+
+#define MAX_TS_PACKETS_FOR_SDMX_PROCESS (500)
+
+/*
+ * PES header length field is 8 bits so PES header length after this field
+ * can be up to 256 bytes.
+ * Preceding fields of the PES header total to 9 bytes
+ * (including the PES header length field).
+ */
+#define MAX_PES_HEADER_LENGTH (256 + PES_MANDATORY_FIELDS_LEN)
+
+/* TS packet with adaptation field only can take up the entire TSP */
+#define MAX_TSP_ADAPTATION_LENGTH (184)
+
+#define MAX_SDMX_METADATA_LENGTH \
+ (TS_PACKET_HEADER_LENGTH + \
+ MAX_TSP_ADAPTATION_LENGTH + \
+ MAX_PES_HEADER_LENGTH)
+
+#define SDMX_METADATA_BUFFER_SIZE (64*1024)
+#define SDMX_SECTION_BUFFER_SIZE (64*1024)
+#define SDMX_PCR_BUFFER_SIZE (64*1024)
+
+/* Number of demux devices, has default of linux configuration */
+static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+module_param(mpq_demux_device_num, int, 0444);
+
+/* ION heap IDs used for allocating video output buffer */
+static int video_secure_ion_heap = ION_CP_MM_HEAP_ID;
+module_param(video_secure_ion_heap, int, 0644);
+MODULE_PARM_DESC(video_secure_ion_heap, "ION heap for secure video buffer allocation");
+
+static int video_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(video_nonsecure_ion_heap, int, 0644);
+MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
+
+/* ION heap IDs used for allocating audio output buffer */
+static int audio_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(audio_nonsecure_ion_heap, int, 0644);
+MODULE_PARM_DESC(audio_nonsecure_ion_heap, "ION heap for non-secure audio buffer allocation");
+
+/* Value of TS packet scramble bits field for even key */
+static int mpq_sdmx_scramble_even = 0x2;
+module_param(mpq_sdmx_scramble_even, int, 0644);
+
+/* Value of TS packet scramble bits field for odd key */
+static int mpq_sdmx_scramble_odd = 0x3;
+module_param(mpq_sdmx_scramble_odd, int, 0644);
+
+/*
+ * Default action (discard or pass) taken when scramble bit is not one of the
+ * pass-through / odd / even values.
+ * When set packets will be discarded, otherwise passed through.
+ */
+static int mpq_sdmx_scramble_default_discard = 1;
+module_param(mpq_sdmx_scramble_default_discard, int, 0644);
+
+/* Max number of TS packets allowed as input for a single sdmx process */
+static int mpq_sdmx_proc_limit = MAX_TS_PACKETS_FOR_SDMX_PROCESS;
+module_param(mpq_sdmx_proc_limit, int, 0644);
+
+/* Debug flag for secure demux process */
+static int mpq_sdmx_debug;
+module_param(mpq_sdmx_debug, int, 0644);
+
+/*
+ * Indicates whether the demux should search for frame boundaries
+ * and notify on video packets on frame-basis or whether to provide
+ * only video PES packet payloads as-is.
+ */
+static int video_framing = 1;
+module_param(video_framing, int, 0644);
+
+/* TSIF operation mode: 1 = TSIF_MODE_1, 2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */
+static int tsif_mode = 2;
+module_param(tsif_mode, int, 0644);
+
+/* Inverse TSIF clock signal */
+static int clock_inv;
+module_param(clock_inv, int, 0644);
+
+/* TSIF Timestamp source: 0 = TSIF Clock Reference, 1 = LPASS time counter */
+enum tsif_tts_source {
+ TSIF_TTS_TCR = 0, /* Time stamps from TCR counter */
+ TSIF_TTS_LPASS_TIMER /* Time stamps from AV/Qtimer Timer */
+};
+
+/* Store all mpq feeds corresponding to 4 TS programs in a Transport Stream */
+static struct mpq_feed *store_mpq_audio_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = {
+ NULL, NULL, NULL, NULL};
+static struct mpq_feed *store_mpq_video_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = {
+ NULL, NULL, NULL, NULL};
+static int non_predicted_video_frame;
+/* trigger video ES frame events on MPEG2 B frames and H264 non-IDR frames */
+#ifdef CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX
+static int video_b_frame_events = 1;
+#else
+static int video_b_frame_events;
+#endif
+
+/* Global data-structure for managing demux devices */
+static struct
+{
+ /* ION demux client used for memory allocation */
+ struct ion_client *ion_client;
+
+ /* demux devices array */
+ struct mpq_demux *devices;
+
+ /* Stream buffers objects used for tunneling to decoders */
+ struct mpq_streambuffer
+ decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ /* Indicates whether secure demux TZ application is available */
+ int secure_demux_app_loaded;
+} mpq_dmx_info;
+
+
+int mpq_dmx_get_param_scramble_odd(void)
+{
+ return mpq_sdmx_scramble_odd;
+}
+
+int mpq_dmx_get_param_scramble_even(void)
+{
+ return mpq_sdmx_scramble_even;
+}
+
+int mpq_dmx_get_param_scramble_default_discard(void)
+{
+ return mpq_sdmx_scramble_default_discard;
+}
+
+int mpq_dmx_get_param_tsif_mode(void)
+{
+ return tsif_mode;
+}
+
+int mpq_dmx_get_param_clock_inv(void)
+{
+ return clock_inv;
+}
+
+struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video)
+{
+ struct mpq_streambuffer *streambuffer = NULL;
+ struct mpq_video_feed_info *feed_data = NULL;
+
+ switch (dmx_ts_pes_video) {
+ case DMX_PES_VIDEO0:
+ if (store_mpq_video_feed[0] != NULL) {
+ feed_data = &store_mpq_video_feed[0]->video_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO0_STREAM_IF;
+ }
+ break;
+ case DMX_PES_VIDEO1:
+ if (store_mpq_video_feed[1] != NULL) {
+ feed_data = &store_mpq_video_feed[1]->video_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO1_STREAM_IF;
+ }
+ break;
+ case DMX_PES_VIDEO2:
+ if (store_mpq_video_feed[2] != NULL) {
+ feed_data = &store_mpq_video_feed[2]->video_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO2_STREAM_IF;
+ }
+ break;
+ case DMX_PES_VIDEO3:
+ if (store_mpq_video_feed[3] != NULL) {
+ feed_data = &store_mpq_video_feed[3]->video_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO3_STREAM_IF;
+ }
+ break;
+ }
+
+ if (feed_data != NULL)
+ mpq_adapter_get_stream_if(feed_data->stream_interface,
+ &streambuffer);
+
+ return streambuffer;
+}
+EXPORT_SYMBOL(consumer_video_streambuffer);
+
+struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio)
+{
+ struct mpq_streambuffer *streambuffer = NULL;
+ struct mpq_audio_feed_info *feed_data = NULL;
+
+ switch (dmx_ts_pes_audio) {
+ case DMX_PES_AUDIO0:
+ if (store_mpq_audio_feed[0] != NULL) {
+ feed_data = &store_mpq_audio_feed[0]->audio_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO0_STREAM_IF;
+ }
+ break;
+ case DMX_PES_AUDIO1:
+ if (store_mpq_audio_feed[1] != NULL) {
+ feed_data = &store_mpq_audio_feed[1]->audio_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO1_STREAM_IF;
+ }
+ break;
+ case DMX_PES_AUDIO2:
+ if (store_mpq_audio_feed[2] != NULL) {
+ feed_data = &store_mpq_audio_feed[2]->audio_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO2_STREAM_IF;
+ }
+ break;
+ case DMX_PES_AUDIO3:
+ if (store_mpq_audio_feed[3] != NULL) {
+ feed_data = &store_mpq_audio_feed[3]->audio_info;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO3_STREAM_IF;
+ }
+ break;
+ }
+
+ if (feed_data != NULL)
+ mpq_adapter_get_stream_if(feed_data->stream_interface,
+ &streambuffer);
+
+ return streambuffer;
+}
+EXPORT_SYMBOL(consumer_audio_streambuffer);
+
+
+
+/* Check that PES header is valid and that it is a video PES */
+static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
+{
+ /* start-code valid? */
+ if ((pes_header->packet_start_code_prefix_1 != 0) ||
+ (pes_header->packet_start_code_prefix_2 != 0) ||
+ (pes_header->packet_start_code_prefix_3 != 1))
+ return -EINVAL;
+
+ /* stream_id is video? */
+ if ((pes_header->stream_id & 0xF0) != 0xE0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mpq_dmx_is_valid_audio_pes(struct pes_packet_header *pes_header)
+{
+ /* start-code valid? */
+ if ((pes_header->packet_start_code_prefix_1 != 0) ||
+ (pes_header->packet_start_code_prefix_2 != 0) ||
+ (pes_header->packet_start_code_prefix_3 != 1))
+ return -EINVAL;
+
+ /* Note: AC3 stream ID = 0xBD */
+ if (pes_header->stream_id == 0xBD)
+ return 0;
+
+ /* stream_id is audio? */ /* 110x xxxx = Audio Stream IDs */
+ if ((pes_header->stream_id & 0xE0) != 0xC0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+ enum dmx_video_codec codec,
+ u64 pattern_type)
+{
+ switch (codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ if (video_b_frame_events == 1)
+ if (pattern_type == DMX_IDX_MPEG_B_FRAME_START)
+ non_predicted_video_frame = 1;
+
+ if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_P_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_B_FRAME_START))
+ return 1;
+ return 0;
+
+ case DMX_VIDEO_CODEC_H264:
+ if (video_b_frame_events == 1) {
+ if (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START)
+ non_predicted_video_frame = 1;
+
+ if ((pattern_type == DMX_IDX_H264_IDR_ISLICE_START) ||
+ (pattern_type ==
+ DMX_IDX_H264_NON_IDR_PSLICE_START) ||
+ (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START))
+ return 1;
+ } else {
+ if ((pattern_type == DMX_IDX_H264_IDR_START) ||
+ (pattern_type == DMX_IDX_H264_NON_IDR_START))
+ return 1;
+ }
+ return 0;
+
+ case DMX_VIDEO_CODEC_VC1:
+ if (pattern_type == DMX_IDX_VC1_FRAME_START)
+ return 1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * mpq_dmx_get_pattern_params - Returns the required video
+ * patterns for framing operation based on video codec.
+ *
+ * @video_codec: the video codec.
+ * @patterns: a pointer to the pattern parameters, updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+ enum dmx_video_codec video_codec,
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int *patterns_num)
+{
+ switch (video_codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ *patterns_num = 5;
+ break;
+
+ case DMX_VIDEO_CODEC_H264:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ if (video_b_frame_events != 1) {
+ patterns[2] = dvb_dmx_get_pattern
+ (DMX_IDX_H264_IDR_START);
+ patterns[3] = dvb_dmx_get_pattern
+ (DMX_IDX_H264_NON_IDR_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 5;
+ } else {
+ patterns[2] = dvb_dmx_get_pattern
+ (DMX_IDX_H264_IDR_ISLICE_START);
+ patterns[3] = dvb_dmx_get_pattern
+ (DMX_IDX_H264_NON_IDR_PSLICE_START);
+ patterns[4] = dvb_dmx_get_pattern
+ (DMX_IDX_H264_NON_IDR_BSLICE_START);
+ patterns[5] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 6;
+ }
+ break;
+
+ case DMX_VIDEO_CODEC_VC1:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ *patterns_num = 3;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ *patterns_num = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * mpq_dmx_update_decoder_stat -
+ * Update decoder output statistics in debug-fs.
+ *
+ * @mpq_feed: decoder feed object
+ */
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed)
+{
+ ktime_t curr_time;
+ u32 delta_time_ms;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ enum mpq_adapter_stream_if idx;
+
+ if (!dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) &&
+ !dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed))
+ return;
+
+ if (dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) &&
+ mpq_feed->video_info.stream_interface <=
+ MPQ_ADAPTER_VIDEO3_STREAM_IF)
+ idx = mpq_feed->video_info.stream_interface;
+ else if (dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed) &&
+ mpq_feed->audio_info.stream_interface <=
+ MPQ_ADAPTER_AUDIO3_STREAM_IF)
+ idx = mpq_feed->audio_info.stream_interface;
+ else
+ return;
+
+ curr_time = ktime_get();
+ if (unlikely(!mpq_demux->decoder_stat[idx].out_count)) {
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+ return;
+ }
+
+ /* calculate time-delta between frame */
+ delta_time_ms = mpq_dmx_calc_time_delta(curr_time,
+ mpq_demux->decoder_stat[idx].out_last_time);
+
+ mpq_demux->decoder_stat[idx].out_interval_sum += delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_interval_average =
+ mpq_demux->decoder_stat[idx].out_interval_sum /
+ mpq_demux->decoder_stat[idx].out_count;
+
+ if (delta_time_ms > mpq_demux->decoder_stat[idx].out_interval_max)
+ mpq_demux->decoder_stat[idx].out_interval_max = delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+}
+
+/*
+ * mpq_dmx_update_sdmx_stat -
+ * Update SDMX statistics in debug-fs.
+ *
+ * @mpq_demux: mpq_demux object
+ * @bytes_processed: number of bytes processed by sdmx
+ * @process_start_time: time before sdmx process was triggered
+ * @process_end_time: time after sdmx process finished
+ */
+static inline void mpq_dmx_update_sdmx_stat(struct mpq_demux *mpq_demux,
+ u32 bytes_processed, ktime_t process_start_time,
+ ktime_t process_end_time)
+{
+ u32 packets_num;
+ u32 process_time;
+
+ mpq_demux->sdmx_process_count++;
+ packets_num = bytes_processed / mpq_demux->demux.ts_packet_size;
+ mpq_demux->sdmx_process_packets_sum += packets_num;
+ mpq_demux->sdmx_process_packets_average =
+ mpq_demux->sdmx_process_packets_sum /
+ mpq_demux->sdmx_process_count;
+
+ process_time =
+ mpq_dmx_calc_time_delta(process_end_time, process_start_time);
+
+ mpq_demux->sdmx_process_time_sum += process_time;
+ mpq_demux->sdmx_process_time_average =
+ mpq_demux->sdmx_process_time_sum /
+ mpq_demux->sdmx_process_count;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (packets_num < mpq_demux->sdmx_process_packets_min))
+ mpq_demux->sdmx_process_packets_min = packets_num;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (process_time > mpq_demux->sdmx_process_time_max))
+ mpq_demux->sdmx_process_time_max = process_time;
+}
+
+static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mpq_sdmx_log_level_read(struct file *fp,
+ char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ struct mpq_demux *mpq_demux = fp->private_data;
+ int ret;
+
+ ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level);
+ ret = simple_read_from_buffer(user_buffer, count, position,
+ user_str, ret+1);
+
+ return ret;
+}
+
+static ssize_t mpq_sdmx_log_level_write(struct file *fp,
+ const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ int ret;
+ int ret_count;
+ int level;
+ struct mpq_demux *mpq_demux = fp->private_data;
+
+ if (count >= 16)
+ return -EINVAL;
+
+ ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer,
+ count);
+ if (ret_count < 0)
+ return ret_count;
+
+ ret = kstrtoint(user_str, 0, &level);
+ if (ret)
+ return ret;
+
+ if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE)
+ return -EINVAL;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_demux->sdmx_log_level = level;
+ if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not set sdmx log level. ret = %d\n",
+ __func__, ret);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret_count;
+}
+
+static const struct file_operations sdmx_debug_fops = {
+ .open = mpq_sdmx_log_level_open,
+ .read = mpq_sdmx_log_level_read,
+ .write = mpq_sdmx_log_level_write,
+ .owner = THIS_MODULE,
+};
+
+/* Extend dvb-demux debugfs with common plug-in entries */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux)
+{
+ int i;
+ char file_name[50];
+ struct dentry *debugfs_decoder_dir;
+
+ /*
+ * Extend dvb-demux debugfs with HW statistics.
+ * Note that destruction of debugfs directory is done
+ * when dvb-demux is terminated.
+ */
+ mpq_demux->hw_notification_count = 0;
+ mpq_demux->hw_notification_interval = 0;
+ mpq_demux->hw_notification_size = 0;
+ mpq_demux->hw_notification_min_size = 0xFFFFFFFF;
+
+ if (mpq_demux->demux.dmx.debugfs_demux_dir == NULL)
+ return;
+
+ debugfs_create_u32(
+ "hw_notification_interval",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_interval);
+
+ debugfs_create_u32(
+ "hw_notification_min_interval",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_interval);
+
+ debugfs_create_u32(
+ "hw_notification_count",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_count);
+
+ debugfs_create_u32(
+ "hw_notification_size",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_size);
+
+ debugfs_create_u32(
+ "hw_notification_min_size",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_size);
+
+ debugfs_decoder_dir = debugfs_create_dir("decoder",
+ mpq_demux->demux.dmx.debugfs_demux_dir);
+
+ for (i = 0;
+ debugfs_decoder_dir &&
+ (i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES);
+ i++) {
+ snprintf(file_name, 50, "decoder%d_drop_count", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].drop_count);
+
+ snprintf(file_name, 50, "decoder%d_out_count", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_count);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_sum", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_sum);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_average", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_average);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_max", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_max);
+
+ snprintf(file_name, 50, "decoder%d_ts_errors", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].ts_errors);
+
+ snprintf(file_name, 50, "decoder%d_cc_errors", i);
+ debugfs_create_u32(
+ file_name,
+ 0444,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].cc_errors);
+ }
+
+ debugfs_create_u32(
+ "sdmx_process_count",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_count);
+
+ debugfs_create_u32(
+ "sdmx_process_time_sum",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_time_average",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_average);
+
+ debugfs_create_u32(
+ "sdmx_process_time_max",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_max);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_sum",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_average",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_average);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_min",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_min);
+
+ debugfs_create_file("sdmx_log_level",
+ 0664,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ mpq_demux,
+ &sdmx_debug_fops);
+}
+
+/* Update dvb-demux debugfs with HW notification statistics */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux)
+{
+ ktime_t curr_time;
+ u32 delta_time_ms;
+
+ curr_time = ktime_get();
+ if (likely(mpq_demux->hw_notification_count)) {
+ /* calculate time-delta between notifications */
+ delta_time_ms = mpq_dmx_calc_time_delta(curr_time,
+ mpq_demux->last_notification_time);
+
+ mpq_demux->hw_notification_interval = delta_time_ms;
+
+ if ((mpq_demux->hw_notification_count == 1) ||
+ (mpq_demux->hw_notification_interval &&
+ mpq_demux->hw_notification_interval <
+ mpq_demux->hw_notification_min_interval))
+ mpq_demux->hw_notification_min_interval =
+ mpq_demux->hw_notification_interval;
+ }
+
+ mpq_demux->hw_notification_count++;
+ mpq_demux->last_notification_time = curr_time;
+}
+
+static void mpq_sdmx_check_app_loaded(void)
+{
+ int session;
+ u32 version;
+ int ret;
+
+ ret = sdmx_open_session(&session);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not initialize session with SDMX. ret = %d\n",
+ __func__, ret);
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+ return;
+ }
+
+ /* Check proper sdmx major version */
+ ret = sdmx_get_version(session, &version);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not get sdmx version. ret = %d\n",
+ __func__, ret);
+ } else {
+ if ((version >> 8) != SDMX_MAJOR_VERSION_MATCH)
+ MPQ_DVB_ERR_PRINT(
+ "%s: sdmx major version does not match. expected=%d, actual=%d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH,
+ (version >> 8));
+ else
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx major version is ok = %d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH);
+ }
+
+ mpq_dmx_info.secure_demux_app_loaded = 1;
+ sdmx_close_session(session);
+}
+
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func)
+{
+ int i;
+ int j;
+ int result;
+ struct mpq_demux *mpq_demux;
+ struct dvb_adapter *mpq_adapter;
+ struct mpq_feed *feed;
+
+ MPQ_DVB_DBG_PRINT("%s executed, device num %d\n",
+ __func__,
+ mpq_demux_device_num);
+
+ mpq_adapter = mpq_adapter_get();
+
+ if (mpq_adapter == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter is not valid\n",
+ __func__);
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ if (mpq_demux_device_num == 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_demux_device_num set to 0\n",
+ __func__);
+
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ mpq_dmx_info.devices = NULL;
+ mpq_dmx_info.ion_client = NULL;
+
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+
+ /* Allocate memory for all MPQ devices */
+ mpq_dmx_info.devices =
+ vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux));
+
+ if (!mpq_dmx_info.devices) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate devices memory\n",
+ __func__);
+
+ result = -ENOMEM;
+ goto init_failed;
+ }
+
+ /*
+ * Create a new ION client used by demux to allocate memory
+ * for decoder's buffers.
+ */
+ mpq_dmx_info.ion_client =
+ msm_ion_client_create("demux_client");
+ if (IS_ERR_OR_NULL(mpq_dmx_info.ion_client)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_client_create\n",
+ __func__);
+
+ result = PTR_ERR(mpq_dmx_info.ion_client);
+ if (!result)
+ result = -ENOMEM;
+ mpq_dmx_info.ion_client = NULL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize and register all demux devices to the system */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices+i;
+ mpq_demux->idx = i;
+
+ /* initialize demux source to memory by default */
+ mpq_demux->source = DMX_SOURCE_DVR0 + i;
+
+ /*
+ * Give the plugin pointer to the ion client so
+ * that it can allocate memory from ION if it requires so
+ */
+ mpq_demux->ion_client = mpq_dmx_info.ion_client;
+
+ mutex_init(&mpq_demux->mutex);
+
+ mpq_demux->num_secure_feeds = 0;
+ mpq_demux->num_active_feeds = 0;
+ mpq_demux->sdmx_filter_count = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
+ mpq_demux->ts_packet_timestamp_source = 0;
+
+ if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: err - actual feednum (%d) larger than max, enlarge MPQ_MAX_DMX_FILES!\n",
+ __func__,
+ mpq_demux->demux.feednum);
+ result = -EINVAL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize private feed info */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ memset(feed, 0, sizeof(*feed));
+ feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+ feed->mpq_demux = mpq_demux;
+ feed->session_id = 0;
+ }
+
+ /*
+ * mpq_demux_plugin_hw_init should be implemented
+ * by the specific plugin
+ */
+ result = dmx_init_func(mpq_adapter, mpq_demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dmx_init_func (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+
+ mpq_demux->is_initialized = 1;
+
+ /*
+ * dvb-demux is now initialized,
+ * update back-pointers of private feeds
+ */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ feed->dvb_demux_feed = &mpq_demux->demux.feed[j];
+ mpq_demux->demux.feed[j].priv = feed;
+ }
+
+ /*
+ * Add capability of receiving input from memory.
+ * Every demux in our system may be connected to memory input,
+ * or any live input.
+ */
+ mpq_demux->fe_memory.source = DMX_MEMORY_FE;
+ result =
+ mpq_demux->demux.dmx.add_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: add_frontend (mem) failed (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+ }
+
+ return 0;
+
+init_failed_free_demux_devices:
+ mpq_dmx_plugin_exit();
+init_failed:
+ return result;
+}
+
+void mpq_dmx_plugin_exit(void)
+{
+ int i;
+ struct mpq_demux *mpq_demux;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ if (mpq_dmx_info.ion_client != NULL) {
+ ion_client_destroy(mpq_dmx_info.ion_client);
+ mpq_dmx_info.ion_client = NULL;
+ }
+
+ if (mpq_dmx_info.devices != NULL) {
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices + i;
+
+ if (!mpq_demux->is_initialized)
+ continue;
+
+ if (mpq_demux->mpq_dmx_plugin_release)
+ mpq_demux->mpq_dmx_plugin_release(mpq_demux);
+
+ mpq_demux->demux.dmx.remove_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (mpq_dmx_info.secure_demux_app_loaded)
+ mpq_sdmx_close_session(mpq_demux);
+ mutex_destroy(&mpq_demux->mutex);
+ dvb_dmxdev_release(&mpq_demux->dmxdev);
+ dvb_dmx_release(&mpq_demux->demux);
+ }
+
+ vfree(mpq_dmx_info.devices);
+ mpq_dmx_info.devices = NULL;
+ }
+}
+
+int mpq_dmx_set_source(
+ struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int i;
+ int dvr_index;
+ int dmx_index;
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For dvr sources,
+ * verify that this source is connected to the respective demux
+ */
+ dmx_index = mpq_demux - mpq_dmx_info.devices;
+
+ if (*src >= DMX_SOURCE_DVR0) {
+ dvr_index = *src - DMX_SOURCE_DVR0;
+
+ if (dvr_index != dmx_index) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: can't connect demux%d to dvr%d\n",
+ __func__,
+ dmx_index,
+ dvr_index);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * For front-end sources,
+ * verify that this source is not already set to different demux
+ */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ if ((&mpq_dmx_info.devices[i] != mpq_demux) &&
+ (mpq_dmx_info.devices[i].source == *src)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: demux%d source can't be set,\n"
+ "demux%d occupies this source already\n",
+ __func__,
+ dmx_index,
+ i);
+ return -EBUSY;
+ }
+ }
+
+ mpq_demux->source = *src;
+ return 0;
+}
+
+/**
+ * Takes an ION allocated buffer's file descriptor and handles the details of
+ * mapping it into kernel memory and obtaining an ION handle for it.
+ * Internal helper function.
+ *
+ * @client: ION client
+ * @handle: ION file descriptor to map
+ * @priv_handle: returned ION handle. Must be freed when no longer needed
+ * @kernel_mem: returned kernel mapped pointer
+ *
+ * Note: mapping might not be possible in secured heaps/buffers, and so NULL
+ * might be returned in kernel_mem
+ *
+ * Return errors status
+ */
+static int mpq_map_buffer_to_kernel(
+ struct ion_client *client,
+ int handle,
+ struct ion_handle **priv_handle,
+ void **kernel_mem)
+{
+ struct ion_handle *ion_handle;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if (client == NULL || priv_handle == NULL || kernel_mem == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ion_handle = ion_import_dma_buf_fd(client, handle);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ ret = PTR_ERR(ion_handle);
+ MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+
+ goto map_buffer_failed;
+ }
+
+ ret = ion_handle_get_flags(client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ goto map_buffer_failed_free_buff;
+ }
+
+ if (ionflag & ION_FLAG_SECURE) {
+ MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
+ *kernel_mem = NULL;
+ } else {
+ size_t tmp;
+ *kernel_mem = ion_map_kernel(client, ion_handle);
+ if (IS_ERR_OR_NULL(*kernel_mem)) {
+ ret = PTR_ERR(*kernel_mem);
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto map_buffer_failed_free_buff;
+ }
+ ion_handle_get_size(client, ion_handle, &tmp);
+ MPQ_DVB_DBG_PRINT(
+ "%s: mapped to address 0x%p, size=%zu\n",
+ __func__, *kernel_mem, tmp);
+ }
+
+ *priv_handle = ion_handle;
+ return 0;
+
+map_buffer_failed_free_buff:
+ ion_free(client, ion_handle);
+map_buffer_failed:
+ return ret;
+}
+
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL) || (kernel_mem == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ return mpq_map_buffer_to_kernel(
+ mpq_demux->ion_client,
+ dmx_buffer->handle,
+ (struct ion_handle **)priv_handle, kernel_mem);
+}
+
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux,
+ void *priv_handle)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct ion_handle *ion_handle = priv_handle;
+ struct mpq_demux *mpq_demux;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if (!(ionflag & ION_FLAG_SECURE))
+ ion_unmap_kernel(mpq_demux->ion_client, ion_handle);
+
+ ion_free(mpq_demux->ion_client, ion_handle);
+
+ return 0;
+}
+
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, cookie);
+
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ struct mpq_streambuffer *stream_buffer;
+ int ret;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed, feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ struct mpq_audio_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ struct mpq_streambuffer *stream_buffer;
+ int ret;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+
+ spin_lock(&feed_data->audio_buffer_lock);
+ stream_buffer = feed_data->audio_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed, feed_data->audio_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+ }
+ MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+ __func__, feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Handles the details of internal decoder buffer allocation via ION.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_internal_buffers(
+ struct mpq_demux *mpq_demux,
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ actual_buffer_size += (SZ_4K - 1);
+ actual_buffer_size &= ~(SZ_4K - 1);
+
+ temp_handle = ion_alloc(mpq_demux->ion_client,
+ actual_buffer_size, SZ_4K,
+ ION_HEAP(video_secure_ion_heap) |
+ ION_HEAP(video_nonsecure_ion_heap),
+ mpq_demux->decoder_alloc_flags);
+
+ if (IS_ERR_OR_NULL(temp_handle)) {
+ ret = PTR_ERR(temp_handle);
+ MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+ if (IS_ERR_OR_NULL(payload_buffer)) {
+ ret = PTR_ERR(payload_buffer);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto init_failed_free_payload_buffer;
+ }
+
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ feed_data->buffer_desc.ion_handle[0] = temp_handle;
+ feed_data->buffer_desc.desc[0].base = payload_buffer;
+ feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+ feed_data->buffer_desc.desc[0].read_ptr = 0;
+ feed_data->buffer_desc.desc[0].write_ptr = 0;
+ feed_data->buffer_desc.desc[0].handle =
+ ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+
+ if (feed_data->buffer_desc.desc[0].handle < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to share payload buffer %d\n",
+ __func__, ret);
+ ret = -ENOMEM;
+ goto init_failed_unmap_payload_buffer;
+ }
+
+ feed_data->buffer_desc.shared_file = fget(
+ feed_data->buffer_desc.desc[0].handle);
+
+ return 0;
+
+init_failed_unmap_payload_buffer:
+ ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+ ion_free(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.ion_handle[0] = NULL;
+ feed_data->buffer_desc.desc[0].size = 0;
+ feed_data->buffer_desc.decoder_buffers_num = 0;
+ feed_data->buffer_desc.shared_file = NULL;
+end:
+ return ret;
+
+}
+
+/**
+ * Handles the details of external decoder buffers allocated by user.
+ * Each buffer is mapped into kernel memory and an ION handle is obtained, and
+ * decoder feed object is updated with related information.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_external_buffers(
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+ int i;
+
+ /*
+ * Payload buffer was allocated externally (through ION).
+ * Map the ion handles to kernel memory
+ */
+ MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ if (!dec_buffs->is_linear) {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num =
+ dec_buffs->buffers_num;
+ }
+
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ ret = mpq_map_buffer_to_kernel(
+ client,
+ dec_buffs->handles[i],
+ &temp_handle,
+ &payload_buffer);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed mapping buffer %d\n",
+ __func__, i);
+ goto init_failed;
+ }
+ feed_data->buffer_desc.ion_handle[i] = temp_handle;
+ feed_data->buffer_desc.desc[i].base = payload_buffer;
+ feed_data->buffer_desc.desc[i].handle =
+ dec_buffs->handles[i];
+ feed_data->buffer_desc.desc[i].size =
+ dec_buffs->buffers_size;
+ feed_data->buffer_desc.desc[i].read_ptr = 0;
+ feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+ __func__, i,
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].handle,
+ feed_data->buffer_desc.desc[i].size);
+ }
+
+ return 0;
+
+init_failed:
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Handles the details of initializing the mpq_streambuffer object according
+ * to the user decoder buffer configuration: External/Internal buffers and
+ * ring/linear buffering mode.
+ * Internal helper function.
+ * @feed: dvb demux feed object, contains the buffers configuration
+ * @feed_data: decoder feed object
+ * @stream_buffer: stream buffer object to initialize
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+ void *packet_buffer = NULL;
+ struct mpq_demux *mpq_demux = feed->mpq_demux;
+ struct ion_client *client = mpq_demux->ion_client;
+ struct dmx_decoder_buffers *dec_buffs = NULL;
+ enum mpq_streambuffer_mode mode;
+
+ dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ /* Allocate packet buffer holding the meta-data */
+ packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (packet_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate packets buffer\n",
+ __func__);
+
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+ __func__,
+ dec_buffs->buffers_num,
+ dec_buffs->buffers_size,
+ dec_buffs->is_linear);
+
+ if (dec_buffs->buffers_num == 0)
+ ret = mpq_dmx_init_internal_buffers(
+ mpq_demux, feed_data, dec_buffs);
+ else
+ ret = mpq_dmx_init_external_buffers(
+ feed_data, dec_buffs, client);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+ ret = mpq_streambuffer_init(
+ feed_data->video_buffer,
+ mode,
+ feed_data->buffer_desc.desc,
+ feed_data->buffer_desc.decoder_buffers_num,
+ packet_buffer,
+ VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ goto end;
+
+
+init_failed_free_packet_buffer:
+ vfree(packet_buffer);
+end:
+ return ret;
+}
+
+static void mpq_dmx_release_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *video_buffer,
+ struct ion_client *client)
+{
+ int buf_num = 0;
+ int i;
+ struct dmx_decoder_buffers *dec_buffs =
+ feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+ mpq_streambuffer_terminate(video_buffer);
+
+ vfree(video_buffer->packet_data.data);
+
+ buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < buf_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+
+ /*
+ * Un-share the buffer if kernel it the one that
+ * shared it.
+ */
+ if (!dec_buffs->buffers_num &&
+ feed_data->buffer_desc.shared_file) {
+ fput(feed_data->buffer_desc.shared_file);
+ feed_data->buffer_desc.shared_file = NULL;
+ }
+
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+}
+
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *sbuff;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: not a video feed, feed type=%d\n",
+ __func__, feed->pes_type);
+ return 0;
+ }
+
+ spin_lock(&feed_data->video_buffer_lock);
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return -ENODEV;
+ }
+
+ feed_data->pending_pattern_len = 0;
+
+ ret = mpq_streambuffer_flush(sbuff);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+ __func__, ret);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return ret;
+}
+
+static int mpq_dmx_init_audio_internal_buffers(
+ struct mpq_demux *mpq_demux,
+ struct mpq_audio_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: Internal audio decoder buffer allocation\n",
+ __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ actual_buffer_size += (SZ_4K - 1);
+ actual_buffer_size &= ~(SZ_4K - 1);
+
+ temp_handle = ion_alloc(mpq_demux->ion_client,
+ actual_buffer_size, SZ_4K,
+ ION_HEAP(audio_nonsecure_ion_heap),
+ mpq_demux->decoder_alloc_flags);
+
+ if (IS_ERR_OR_NULL(temp_handle)) {
+ ret = PTR_ERR(temp_handle);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate audio payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+ if (IS_ERR_OR_NULL(payload_buffer)) {
+ ret = PTR_ERR(payload_buffer);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map audio payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto init_failed_free_payload_buffer;
+ }
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ feed_data->buffer_desc.ion_handle[0] = temp_handle;
+ feed_data->buffer_desc.desc[0].base = payload_buffer;
+ feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+ feed_data->buffer_desc.desc[0].read_ptr = 0;
+ feed_data->buffer_desc.desc[0].write_ptr = 0;
+ feed_data->buffer_desc.desc[0].handle =
+ ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+ if (feed_data->buffer_desc.desc[0].handle < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to share audio payload buffer %d\n",
+ __func__, ret);
+ ret = -ENOMEM;
+ goto init_failed_unmap_payload_buffer;
+ }
+
+ feed_data->buffer_desc.shared_file = fget(
+ feed_data->buffer_desc.desc[0].handle);
+
+ return 0;
+
+init_failed_unmap_payload_buffer:
+ ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+ ion_free(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.ion_handle[0] = NULL;
+ feed_data->buffer_desc.desc[0].size = 0;
+ feed_data->buffer_desc.decoder_buffers_num = 0;
+ feed_data->buffer_desc.shared_file = NULL;
+end:
+ return ret;
+}
+
+static int mpq_dmx_init_audio_external_buffers(
+ struct mpq_audio_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+ int i;
+
+ /*
+ * Payload buffer was allocated externally (through ION).
+ * Map the ion handles to kernel memory
+ */
+ MPQ_DVB_DBG_PRINT("%s: External audio decoder buffer allocation\n",
+ __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ if (!dec_buffs->is_linear) {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num =
+ dec_buffs->buffers_num;
+ }
+
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ ret = mpq_map_buffer_to_kernel(
+ client,
+ dec_buffs->handles[i],
+ &temp_handle,
+ &payload_buffer);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed mapping audio buffer %d\n",
+ __func__, i);
+ goto init_failed;
+ }
+ feed_data->buffer_desc.ion_handle[i] = temp_handle;
+ feed_data->buffer_desc.desc[i].base = payload_buffer;
+ feed_data->buffer_desc.desc[i].handle =
+ dec_buffs->handles[i];
+ feed_data->buffer_desc.desc[i].size =
+ dec_buffs->buffers_size;
+ feed_data->buffer_desc.desc[i].read_ptr = 0;
+ feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Audio Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+ __func__, i,
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].handle,
+ feed_data->buffer_desc.desc[i].size);
+ }
+
+ return 0;
+
+init_failed:
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+ return ret;
+}
+static int mpq_dmx_init_audio_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_audio_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+ void *packet_buffer = NULL;
+ struct mpq_demux *mpq_demux = feed->mpq_demux;
+ struct ion_client *client = mpq_demux->ion_client;
+ struct dmx_decoder_buffers *dec_buffs = NULL;
+ enum mpq_streambuffer_mode mode;
+
+ dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ /* Allocate packet buffer holding the meta-data */
+ packet_buffer = vmalloc(AUDIO_META_DATA_BUFFER_SIZE);
+
+ if (packet_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate packets buffer\n", __func__);
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+ __func__, dec_buffs->buffers_num,
+ dec_buffs->buffers_size,
+ dec_buffs->is_linear);
+
+ if (dec_buffs->buffers_num == 0)
+ ret = mpq_dmx_init_audio_internal_buffers(
+ mpq_demux, feed_data, dec_buffs);
+ else
+ ret = mpq_dmx_init_audio_external_buffers(
+ feed_data, dec_buffs, client);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+ ret = mpq_streambuffer_init(
+ feed_data->audio_buffer,
+ mode,
+ feed_data->buffer_desc.desc,
+ feed_data->buffer_desc.decoder_buffers_num,
+ packet_buffer,
+ AUDIO_META_DATA_BUFFER_SIZE);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ goto end;
+
+
+init_failed_free_packet_buffer:
+ vfree(packet_buffer);
+end:
+ return ret;
+}
+
+static void mpq_dmx_release_audio_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_audio_feed_info *feed_data,
+ struct mpq_streambuffer *audio_buffer,
+ struct ion_client *client)
+{
+ int buf_num = 0;
+ int i;
+ struct dmx_decoder_buffers *dec_buffs =
+ feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+ mpq_streambuffer_terminate(audio_buffer);
+
+ vfree(audio_buffer->packet_data.data);
+
+ buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < buf_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+
+ /*
+ * Un-share the buffer if kernel is the one that
+ * shared it.
+ */
+ if (!dec_buffs->buffers_num &&
+ feed_data->buffer_desc.shared_file) {
+ fput(feed_data->buffer_desc.shared_file);
+ feed_data->buffer_desc.shared_file = NULL;
+ }
+
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+}
+
+int mpq_dmx_flush_audio_stream_buffer(struct dvb_demux_feed *feed)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+ struct mpq_streambuffer *sbuff;
+ int ret = 0;
+
+ if (!dvb_dmx_is_audio_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: not a audio feed, feed type=%d\n",
+ __func__, feed->pes_type);
+ return 0;
+ }
+
+ spin_lock(&feed_data->audio_buffer_lock);
+
+ sbuff = feed_data->audio_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: feed_data->audio_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return -ENODEV;
+ }
+
+ ret = mpq_streambuffer_flush(sbuff);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+ __func__, ret);
+
+ spin_unlock(&feed_data->audio_buffer_lock);
+
+ return ret;
+}
+
+static int mpq_dmx_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&demux->mutex))
+ return -ERESTARTSYS;
+
+ dvbdmx_ts_reset_pes_state(feed);
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: flushing video buffer\n", __func__);
+
+ ret = mpq_dmx_flush_stream_buffer(feed);
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: flushing audio buffer\n", __func__);
+
+ ret = mpq_dmx_flush_audio_stream_buffer(feed);
+ }
+
+ mutex_unlock(&demux->mutex);
+ return ret;
+}
+
+/**
+ * mpq_dmx_init_video_feed - Initializes of video feed information
+ * used to pass data directly to decoder.
+ *
+ * @mpq_feed: The mpq feed object
+ *
+ * Return error code.
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed)
+{
+ int ret;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct mpq_streambuffer *stream_buffer;
+
+ /* get and store framing information if required */
+ if (video_framing) {
+ mpq_dmx_get_pattern_params(
+ mpq_feed->dvb_demux_feed->video_codec,
+ feed_data->patterns, &feed_data->patterns_num);
+ if (!feed_data->patterns_num) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get framing pattern parameters\n",
+ __func__);
+
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+ }
+
+ /* Register the new stream-buffer interface to MPQ adapter */
+ switch (mpq_feed->dvb_demux_feed->pes_type) {
+ case DMX_PES_VIDEO0:
+ store_mpq_video_feed[0] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO0_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO1:
+ store_mpq_video_feed[1] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO1_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO2:
+ store_mpq_video_feed[2] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO2_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO3:
+ store_mpq_video_feed[3] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO3_STREAM_IF;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid pes type %d\n",
+ __func__,
+ mpq_feed->dvb_demux_feed->pes_type);
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+
+ /* make sure not occupied already */
+ stream_buffer = NULL;
+ mpq_adapter_get_stream_if(
+ feed_data->stream_interface,
+ &stream_buffer);
+ if (stream_buffer != NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Video interface %d already occupied!\n",
+ __func__,
+ feed_data->stream_interface);
+ ret = -EBUSY;
+ goto init_failed_free_priv_data;
+ }
+
+ feed_data->video_buffer =
+ &mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+ ret = mpq_dmx_init_streambuffer(
+ mpq_feed, feed_data, feed_data->video_buffer);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_priv_data;
+ }
+
+ ret = mpq_adapter_register_stream_if(
+ feed_data->stream_interface,
+ feed_data->video_buffer);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter_register_stream_if failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_stream_buffer;
+ }
+
+ spin_lock_init(&feed_data->video_buffer_lock);
+
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+ feed_data->pes_header_offset = 0;
+ mpq_feed->dvb_demux_feed->pusi_seen = 0;
+ mpq_feed->dvb_demux_feed->peslen = 0;
+ feed_data->fullness_wait_cancel = 0;
+ mpq_streambuffer_get_data_rw_offset(feed_data->video_buffer, NULL,
+ &feed_data->frame_offset);
+ feed_data->last_pattern_offset = 0;
+ feed_data->pending_pattern_len = 0;
+ feed_data->last_framing_match_type = 0;
+ feed_data->found_sequence_header_pattern = 0;
+ memset(&feed_data->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+ feed_data->first_prefix_size = 0;
+ feed_data->saved_pts_dts_info.pts_exist = 0;
+ feed_data->saved_pts_dts_info.dts_exist = 0;
+ feed_data->new_pts_dts_info.pts_exist = 0;
+ feed_data->new_pts_dts_info.dts_exist = 0;
+ feed_data->saved_info_used = 1;
+ feed_data->new_info_exists = 0;
+ feed_data->first_pts_dts_copy = 1;
+ feed_data->tei_errs = 0;
+ feed_data->last_continuity = -1;
+ feed_data->continuity_errs = 0;
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+
+ mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_sum = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_max = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+ return 0;
+
+init_failed_free_stream_buffer:
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ feed_data->video_buffer, mpq_demux->ion_client);
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+ feed_data->video_buffer = NULL;
+ return ret;
+}
+
+/* Register the new stream-buffer interface to MPQ adapter */
+int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed)
+{
+ int ret;
+ struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct mpq_streambuffer *stream_buffer;
+
+ switch (mpq_feed->dvb_demux_feed->pes_type) {
+ case DMX_PES_AUDIO0:
+ store_mpq_audio_feed[0] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO0_STREAM_IF;
+ break;
+
+ case DMX_PES_AUDIO1:
+ store_mpq_audio_feed[1] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO1_STREAM_IF;
+ break;
+
+ case DMX_PES_AUDIO2:
+ store_mpq_audio_feed[2] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO2_STREAM_IF;
+ break;
+
+ case DMX_PES_AUDIO3:
+ store_mpq_audio_feed[3] = mpq_feed;
+ feed_data->stream_interface =
+ MPQ_ADAPTER_AUDIO3_STREAM_IF;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid pes type %d\n",
+ __func__,
+ mpq_feed->dvb_demux_feed->pes_type);
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+
+ /* make sure not occupied already */
+ stream_buffer = NULL;
+ mpq_adapter_get_stream_if(
+ feed_data->stream_interface,
+ &stream_buffer);
+ if (stream_buffer != NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Audio interface %d already occupied!\n",
+ __func__, feed_data->stream_interface);
+ ret = -EBUSY;
+ goto init_failed_free_priv_data;
+ }
+
+ feed_data->audio_buffer =
+ &mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+ ret = mpq_dmx_init_audio_streambuffer(
+ mpq_feed, feed_data, feed_data->audio_buffer);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_priv_data;
+ }
+
+ ret = mpq_adapter_register_stream_if(
+ feed_data->stream_interface,
+ feed_data->audio_buffer);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter_register_stream_if failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_stream_buffer;
+ }
+
+ spin_lock_init(&feed_data->audio_buffer_lock);
+
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+ feed_data->pes_header_offset = 0;
+ mpq_feed->dvb_demux_feed->pusi_seen = 0;
+ mpq_feed->dvb_demux_feed->peslen = 0;
+ feed_data->fullness_wait_cancel = 0;
+ mpq_streambuffer_get_data_rw_offset(feed_data->audio_buffer, NULL,
+ &feed_data->frame_offset);
+ feed_data->saved_pts_dts_info.pts_exist = 0;
+ feed_data->saved_pts_dts_info.dts_exist = 0;
+ feed_data->new_pts_dts_info.pts_exist = 0;
+ feed_data->new_pts_dts_info.dts_exist = 0;
+ feed_data->saved_info_used = 1;
+ feed_data->new_info_exists = 0;
+ feed_data->first_pts_dts_copy = 1;
+ feed_data->tei_errs = 0;
+ feed_data->last_continuity = -1;
+ feed_data->continuity_errs = 0;
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+
+ mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_sum = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_max = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+ return 0;
+
+init_failed_free_stream_buffer:
+ mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data,
+ feed_data->audio_buffer, mpq_demux->ion_client);
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+ feed_data->audio_buffer = NULL;
+ return ret;
+}
+
+/**
+ * mpq_dmx_terminate_video_feed - terminate video feed information
+ * that was previously initialized in mpq_dmx_init_video_feed
+ *
+ * @mpq_feed: The mpq feed used for the video TS packets
+ *
+ * Return error code.
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer *video_buffer;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_demux *mpq_demux;
+
+ if (mpq_feed == NULL)
+ return -EINVAL;
+
+ mpq_demux = mpq_feed->mpq_demux;
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ video_buffer = feed_data->video_buffer;
+ feed_data->video_buffer = NULL;
+ wake_up_all(&video_buffer->raw_data.queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ video_buffer, mpq_demux->ion_client);
+
+ return 0;
+}
+
+int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer *audio_buffer;
+ struct mpq_audio_feed_info *feed_data;
+ struct mpq_demux *mpq_demux;
+
+ if (mpq_feed == NULL)
+ return -EINVAL;
+
+ mpq_demux = mpq_feed->mpq_demux;
+ feed_data = &mpq_feed->audio_info;
+
+ spin_lock(&feed_data->audio_buffer_lock);
+ audio_buffer = feed_data->audio_buffer;
+ feed_data->audio_buffer = NULL;
+ wake_up_all(&audio_buffer->raw_data.queue);
+ spin_unlock(&feed_data->audio_buffer_lock);
+
+ mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data,
+ audio_buffer, mpq_demux->ion_client);
+
+ return 0;
+}
+
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux_feed *tmp;
+ struct dvb_demux *dvb_demux = feed->demux;
+
+ list_for_each_entry(tmp, &dvb_demux->feed_list, list_head) {
+ if (tmp != feed && tmp->state == DMX_STATE_GO &&
+ tmp->feed.ts.buffer.ringbuff ==
+ feed->feed.ts.buffer.ringbuff) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: main feed pid=%d, secondary feed pid=%d\n",
+ __func__, tmp->pid, feed->pid);
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static int mpq_sdmx_alloc_data_buf(struct mpq_feed *mpq_feed, size_t size)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ void *buf_base;
+ int ret;
+
+ mpq_feed->sdmx_buf_handle = ion_alloc(mpq_demux->ion_client,
+ size,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(mpq_feed->sdmx_buf_handle)) {
+ ret = PTR_ERR(mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ buf_base = ion_map_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ if (IS_ERR_OR_NULL(buf_base)) {
+ ret = PTR_ERR(buf_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_buf;
+ }
+
+ dvb_ringbuffer_init(&mpq_feed->sdmx_buf, buf_base, size);
+
+ return 0;
+
+failed_free_buf:
+ ion_free(mpq_demux->ion_client, mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_free_data_buf(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->sdmx_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_metadata_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, struct sdmx_buff_descr *metadata_buff_desc)
+{
+ void *metadata_buff_base;
+ ion_phys_addr_t temp;
+ int ret;
+ size_t size;
+
+ feed->metadata_buf_handle = ion_alloc(mpq_demux->ion_client,
+ SDMX_METADATA_BUFFER_SIZE,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(feed->metadata_buf_handle)) {
+ ret = PTR_ERR(feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ metadata_buff_base = ion_map_kernel(mpq_demux->ion_client,
+ feed->metadata_buf_handle);
+ if (IS_ERR_OR_NULL(metadata_buff_base)) {
+ ret = PTR_ERR(metadata_buff_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_metadata_buf;
+ }
+
+ ret = ion_phys(mpq_demux->ion_client,
+ feed->metadata_buf_handle,
+ &temp,
+ &size);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get physical address %d\n",
+ __func__, ret);
+ goto failed_unmap_metadata_buf;
+ }
+ metadata_buff_desc->size = size;
+ metadata_buff_desc->base_addr = (u64)temp;
+
+ dvb_ringbuffer_init(&feed->metadata_buf, metadata_buff_base,
+ SDMX_METADATA_BUFFER_SIZE);
+
+ return 0;
+
+failed_unmap_metadata_buf:
+ ion_unmap_kernel(mpq_demux->ion_client, feed->metadata_buf_handle);
+failed_free_metadata_buf:
+ ion_free(mpq_demux->ion_client, feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_terminate_metadata_buffer(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->metadata_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+
+ if (feed == NULL)
+ return -EINVAL;
+
+ mpq_demux = feed->demux->priv;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) {
+ if (mpq_feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ if (main_rec_feed) {
+ /* This feed is part of a recording filter */
+ MPQ_DVB_DBG_PRINT(
+ "%s: Removing raw pid %d from filter %d\n",
+ __func__, feed->pid,
+ mpq_feed->sdmx_filter_handle);
+ ret = sdmx_remove_raw_pid(
+ mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle, feed->pid);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_raw_pid failed. ret = %d\n",
+ __func__, ret);
+
+ /* If this feed that we are removing was set as primary,
+ * now other feeds should be set as primary
+ */
+ if (!mpq_feed->secondary_feed)
+ main_rec_feed->secondary_feed = 0;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Removing filter %d, pid %d\n",
+ __func__, mpq_feed->sdmx_filter_handle,
+ feed->pid);
+ ret = sdmx_remove_filter(mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_filter failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ mpq_demux->sdmx_filter_count--;
+ mpq_feed->sdmx_filter_handle =
+ SDMX_INVALID_FILTER_HANDLE;
+ }
+
+ mpq_sdmx_close_session(mpq_demux);
+ if (mpq_demux->num_secure_feeds > 0)
+ mpq_demux->num_secure_feeds--;
+ else
+ MPQ_DVB_DBG_PRINT("%s: Invalid secure feed count= %u\n",
+ __func__, mpq_demux->num_secure_feeds);
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_terminate_video_feed(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_terminate_video_feed failed. ret = %d\n",
+ __func__, ret);
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ ret = mpq_dmx_terminate_audio_feed(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_terminate_audio_feed failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ if (mpq_feed->sdmx_buf_handle) {
+ wake_up_all(&mpq_feed->sdmx_buf.queue);
+ mpq_sdmx_free_data_buf(mpq_feed);
+ }
+
+ mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+ if (mpq_demux->num_active_feeds > 0)
+ mpq_demux->num_active_feeds--;
+ else
+ MPQ_DVB_DBG_PRINT("%s: Invalid num_active_feeds count = %u\n",
+ __func__, mpq_demux->num_active_feeds);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
+{
+ struct mpq_feed *mpq_feed;
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_video_feed_info *feed_data;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ feed_data->fullness_wait_cancel = 0;
+
+ return 0;
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ struct mpq_audio_feed_info *feed_data;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+ feed_data->fullness_wait_cancel = 0;
+
+ return 0;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n", __func__,
+ feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Returns whether the free space of decoder's output
+ * buffer is larger than specific number of bytes.
+ *
+ * @sbuff: MPQ stream buffer used for decoder data.
+ * @required_space: number of required free bytes in the buffer
+ *
+ * Return 1 if required free bytes are available, 0 otherwise.
+ */
+static inline int mpq_dmx_check_video_decoder_fullness(
+ struct mpq_streambuffer *sbuff,
+ size_t required_space)
+{
+ ssize_t free = mpq_streambuffer_data_free(sbuff);
+ ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+ /* Verify meta-data buffer can contain at least 1 packet */
+ if (free_meta < VIDEO_META_DATA_PACKET_SIZE)
+ return 0;
+
+ /*
+ * For linear buffers, verify there's enough space for this TSP
+ * and an additional buffer is free, as framing might required one
+ * more buffer to be available.
+ */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return (free >= required_space &&
+ sbuff->pending_buffers_count < sbuff->buffers_num-1);
+ else
+ /* Ring buffer mode */
+ return (free >= required_space);
+}
+
+static inline int mpq_dmx_check_audio_decoder_fullness(
+ struct mpq_streambuffer *sbuff,
+ size_t required_space)
+{
+ ssize_t free = mpq_streambuffer_data_free(sbuff);
+ ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+ /* Verify meta-data buffer can contain at least 1 packet */
+ if (free_meta < AUDIO_META_DATA_PACKET_SIZE)
+ return 0;
+
+ /*
+ * For linear buffers, verify there's enough space for this TSP
+ * and an additional buffer is free, as framing might required one
+ * more buffer to be available.
+ */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return (free >= required_space &&
+ sbuff->pending_buffers_count < sbuff->buffers_num-1);
+ else
+ return (free >= required_space); /* Ring buffer mode */
+}
+
+/**
+ * Checks whether decoder's output buffer has free space
+ * for specific number of bytes, if not, the function waits
+ * until the amount of free-space is available.
+ *
+ * @feed: decoder's feed object
+ * @required_space: number of required free bytes in the buffer
+ * @lock_feed: indicates whether mutex should be held before
+ * accessing the feed information. If the caller of this function
+ * already holds a mutex then this should be set to 0 and 1 otherwise.
+ *
+ * Return 0 if required space is available and error code
+ * in case waiting on buffer fullness was aborted.
+ */
+static int mpq_dmx_decoder_fullness_check(
+ struct dvb_demux_feed *feed,
+ size_t required_space,
+ int lock_feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_streambuffer *sbuff = NULL;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ if (lock_feed) {
+ mutex_lock(&mpq_demux->mutex);
+ } else if (!mutex_is_locked(&mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((feed_data->video_buffer != NULL) &&
+ (!feed_data->fullness_wait_cancel) &&
+ (!mpq_dmx_check_video_decoder_fullness(sbuff,
+ required_space))) {
+ DEFINE_WAIT(__wait);
+
+ for (;;) {
+ prepare_to_wait(&sbuff->raw_data.queue,
+ &__wait,
+ TASK_INTERRUPTIBLE);
+ if (!feed_data->video_buffer ||
+ feed_data->fullness_wait_cancel ||
+ mpq_dmx_check_video_decoder_fullness(sbuff,
+ required_space))
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&mpq_demux->mutex);
+ schedule();
+ mutex_lock(&mpq_demux->mutex);
+ continue;
+ }
+
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&sbuff->raw_data.queue, &__wait);
+ }
+
+ if (ret < 0) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+ }
+
+ if ((feed_data->fullness_wait_cancel) ||
+ (feed_data->video_buffer == NULL)) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+}
+
+static int mpq_dmx_audio_decoder_fullness_check(
+ struct dvb_demux_feed *feed,
+ size_t required_space,
+ int lock_feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_streambuffer *sbuff = NULL;
+ struct mpq_audio_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ int ret = 0;
+
+ if (!dvb_dmx_is_audio_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ if (lock_feed) {
+ mutex_lock(&mpq_demux->mutex);
+ } else if (!mutex_is_locked(&mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+
+ sbuff = feed_data->audio_buffer;
+ if (sbuff == NULL) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((feed_data->audio_buffer != NULL) &&
+ (!feed_data->fullness_wait_cancel) &&
+ (!mpq_dmx_check_audio_decoder_fullness(sbuff,
+ required_space))) {
+ DEFINE_WAIT(__wait);
+
+ for (;;) {
+ prepare_to_wait(&sbuff->raw_data.queue,
+ &__wait, TASK_INTERRUPTIBLE);
+ if (!feed_data->audio_buffer ||
+ feed_data->fullness_wait_cancel ||
+ mpq_dmx_check_audio_decoder_fullness(sbuff,
+ required_space))
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&mpq_demux->mutex);
+ schedule();
+ mutex_lock(&mpq_demux->mutex);
+ continue;
+ }
+
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&sbuff->raw_data.queue, &__wait);
+ }
+
+ if (ret < 0) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+ }
+
+ if ((feed_data->fullness_wait_cancel) ||
+ (feed_data->audio_buffer == NULL)) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+}
+
+int mpq_dmx_decoder_fullness_wait(
+ struct dvb_demux_feed *feed,
+ size_t required_space)
+{
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_decoder_fullness_check(feed, required_space, 1);
+ else if (dvb_dmx_is_audio_feed(feed))
+ return mpq_dmx_audio_decoder_fullness_check(feed,
+ required_space, 1);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
+{
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *video_buff;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ feed_data->fullness_wait_cancel = 1;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ if (feed_data->video_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ video_buff = &feed_data->video_buffer->raw_data;
+ wake_up_all(&video_buff->queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_audio_feed_info *feed_data;
+ struct dvb_ringbuffer *audio_buff;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+
+ feed_data->fullness_wait_cancel = 1;
+
+ spin_lock(&feed_data->audio_buffer_lock);
+ if (feed_data->audio_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: audio_buffer released\n", __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ audio_buff = &feed_data->audio_buffer->raw_data;
+ wake_up_all(&audio_buff->queue);
+ spin_unlock(&feed_data->audio_buffer_lock);
+
+ return 0;
+ }
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n", __func__, feed->pes_type);
+
+ return -EINVAL;
+}
+
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+ left_size =
+ PES_MANDATORY_FIELDS_LEN -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have beginning of PES header */
+ *bytes_avail -= left_size;
+ *ts_payload_offset += left_size;
+
+ /* Make sure the PES packet is valid */
+ if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+ /*
+ * Since the new PES header parsing
+ * failed, reset pusi_seen to drop all
+ * data until next PUSI
+ */
+ feed->pusi_seen = 0;
+ feed_data->pes_header_offset = 0;
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid packet\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ feed_data->pes_header_left_bytes =
+ pes_header->pes_header_data_length;
+ }
+
+ return 0;
+}
+
+int mpq_dmx_parse_mandatory_audio_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_audio_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+ left_size =
+ PES_MANDATORY_FIELDS_LEN -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have beginning of PES header */
+ *bytes_avail -= left_size;
+ *ts_payload_offset += left_size;
+
+ /* Make sure the PES packet is valid */
+ if (mpq_dmx_is_valid_audio_pes(pes_header) < 0) {
+ /*
+ * Since the new PES header parsing
+ * failed, reset pusi_seen to drop all
+ * data until next PUSI
+ */
+ feed->pusi_seen = 0;
+ feed_data->pes_header_offset = 0;
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid packet\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ feed_data->pes_header_left_bytes =
+ pes_header->pes_header_data_length;
+ }
+
+ return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header)
+{
+ struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+ /* Get PTS/DTS information from PES header */
+
+ if ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3)) {
+ info->pts_exist = 1;
+
+ info->pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ info->pts_exist = 0;
+ info->pts = 0;
+ }
+
+ if (pes_header->pts_dts_flag == 3) {
+ info->dts_exist = 1;
+
+ info->dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ info->dts_exist = 0;
+ info->dts = 0;
+ }
+
+ feed_data->new_info_exists = 1;
+}
+
+static inline void mpq_dmx_get_audio_pts_dts(
+ struct mpq_audio_feed_info *feed_data,
+ struct pes_packet_header *pes_header)
+{
+ struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+ /* Get PTS/DTS information from PES header */
+
+ if ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3)) {
+ info->pts_exist = 1;
+
+ info->pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ info->pts_exist = 0;
+ info->pts = 0;
+ }
+
+ if (pes_header->pts_dts_flag == 3) {
+ info->dts_exist = 1;
+
+ info->dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ info->dts_exist = 0;
+ info->dts = 0;
+ }
+
+ feed_data->new_info_exists = 1;
+}
+
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ /* Remaining header bytes that need to be processed? */
+ if (!feed_data->pes_header_left_bytes)
+ return 0;
+
+ /* Did we capture the PTS value (if exists)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+5)) &&
+ ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3))) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 5 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the PTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Did we capture the DTS value (if exist)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+10)) &&
+ (pes_header->pts_dts_flag == 3)) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 10 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the DTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Any more header bytes?! */
+ if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+ feed_data->pes_header_left_bytes -= *bytes_avail;
+ return -EINVAL;
+ }
+
+ /* get PTS/DTS information from PES header to be written later */
+ mpq_dmx_get_pts_dts(feed_data, pes_header);
+
+ /* Got PES header, process payload */
+ *bytes_avail -= feed_data->pes_header_left_bytes;
+ *ts_payload_offset += feed_data->pes_header_left_bytes;
+ feed_data->pes_header_left_bytes = 0;
+
+ return 0;
+}
+
+int mpq_dmx_parse_remaining_audio_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_audio_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ /* Remaining header bytes that need to be processed? */
+ if (!feed_data->pes_header_left_bytes)
+ return 0;
+
+ /* Did we capture the PTS value (if exists)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+5)) &&
+ ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3))) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 5 -
+ feed_data->pes_header_offset;
+
+ copy_len =
+ (left_size > *bytes_avail) ? *bytes_avail : left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset), copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the PTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Did we capture the DTS value (if exist)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+10)) &&
+ (pes_header->pts_dts_flag == 3)) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 10 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the DTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Any more header bytes?! */
+ if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+ feed_data->pes_header_left_bytes -= *bytes_avail;
+ return -EINVAL;
+ }
+
+ /* get PTS/DTS information from PES header to be written later */
+ mpq_dmx_get_audio_pts_dts(feed_data, pes_header);
+
+ /* Got PES header, process payload */
+ *bytes_avail -= feed_data->pes_header_left_bytes;
+ *ts_payload_offset += feed_data->pes_header_left_bytes;
+ feed_data->pes_header_left_bytes = 0;
+
+ return 0;
+}
+
+static void mpq_dmx_check_continuity(struct mpq_video_feed_info *feed_data,
+ int current_continuity,
+ int discontinuity_indicator)
+{
+ const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+ /* sanity check */
+ if (unlikely((current_continuity < 0) ||
+ (current_continuity > max_continuity))) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: received invalid continuity counter value %d\n",
+ __func__, current_continuity);
+ return;
+ }
+
+ /* reset last continuity */
+ if ((feed_data->last_continuity == -1) ||
+ (discontinuity_indicator)) {
+ feed_data->last_continuity = current_continuity;
+ return;
+ }
+
+ /* check for continuity errors */
+ if (current_continuity !=
+ ((feed_data->last_continuity + 1) & max_continuity))
+ feed_data->continuity_errs++;
+
+ /* save for next time */
+ feed_data->last_continuity = current_continuity;
+}
+
+static void mpq_dmx_check_audio_continuity(
+ struct mpq_audio_feed_info *feed_data,
+ int current_continuity,
+ int discontinuity_indicator)
+{
+ const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+ /* sanity check */
+ if (unlikely((current_continuity < 0) ||
+ (current_continuity > max_continuity))) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: received invalid continuity counter value %d\n",
+ __func__, current_continuity);
+ return;
+ }
+
+ /* reset last continuity */
+ if ((feed_data->last_continuity == -1) || (discontinuity_indicator)) {
+ feed_data->last_continuity = current_continuity;
+ return;
+ }
+
+ /* check for continuity errors */
+ if (current_continuity !=
+ ((feed_data->last_continuity + 1) & max_continuity))
+ feed_data->continuity_errs++;
+
+ /* save for next time */
+ feed_data->last_continuity = current_continuity;
+}
+
+static inline void mpq_dmx_prepare_es_event_data(
+ struct mpq_streambuffer_packet_header *packet,
+ struct mpq_adapter_video_meta_data *meta_data,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer,
+ struct dmx_data_ready *data,
+ int cookie)
+{
+ struct dmx_pts_dts_info *pts_dts;
+
+ if (meta_data->packet_type == DMX_PES_PACKET) {
+ pts_dts = &meta_data->info.pes.pts_dts_info;
+ data->buf.stc = meta_data->info.pes.stc;
+ } else {
+ pts_dts = &meta_data->info.framing.pts_dts_info;
+ data->buf.stc = meta_data->info.framing.stc;
+ }
+
+ pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
+ &meta_data->info.pes.pts_dts_info :
+ &meta_data->info.framing.pts_dts_info;
+
+ data->data_length = 0;
+ data->buf.handle = packet->raw_data_handle;
+ data->buf.cookie = cookie;
+ data->buf.offset = packet->raw_data_offset;
+ data->buf.len = packet->raw_data_len;
+ data->buf.pts_exists = pts_dts->pts_exist;
+ data->buf.pts = pts_dts->pts;
+ data->buf.dts_exists = pts_dts->dts_exist;
+ data->buf.dts = pts_dts->dts;
+ data->buf.tei_counter = feed_data->tei_errs;
+ data->buf.cont_err_counter = feed_data->continuity_errs;
+ data->buf.ts_packets_num = feed_data->ts_packets_num;
+ data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+ data->status = DMX_OK_DECODER_BUF;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+ /* reset counters */
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+ feed_data->tei_errs = 0;
+ feed_data->continuity_errs = 0;
+}
+
+static inline void mpq_dmx_prepare_audio_es_event_data(
+ struct mpq_streambuffer_packet_header *packet,
+ struct mpq_adapter_audio_meta_data *meta_data,
+ struct mpq_audio_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer,
+ struct dmx_data_ready *data,
+ int cookie)
+{
+ struct dmx_pts_dts_info *pts_dts;
+
+ pts_dts = &meta_data->info.pes.pts_dts_info;
+ data->buf.stc = meta_data->info.pes.stc;
+
+ data->data_length = 0;
+ data->buf.handle = packet->raw_data_handle;
+ data->buf.cookie = cookie;
+ data->buf.offset = packet->raw_data_offset;
+ data->buf.len = packet->raw_data_len;
+ data->buf.pts_exists = pts_dts->pts_exist;
+ data->buf.pts = pts_dts->pts;
+ data->buf.dts_exists = pts_dts->dts_exist;
+ data->buf.dts = pts_dts->dts;
+ data->buf.tei_counter = feed_data->tei_errs;
+ data->buf.cont_err_counter = feed_data->continuity_errs;
+ data->buf.ts_packets_num = feed_data->ts_packets_num;
+ data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+ data->status = DMX_OK_DECODER_BUF;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+ /* reset counters */
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+ feed_data->tei_errs = 0;
+ feed_data->continuity_errs = 0;
+}
+
+static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *buf_desc)
+{
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ ion_phys_addr_t phys_addr;
+ size_t len;
+ int ret;
+
+ ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to obtain physical address of input buffer. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buf_desc->base_addr = (u64)phys_addr;
+ buf_desc->size = rbuf->size;
+
+ return 0;
+}
+
+static inline int mpq_dmx_notify_overflow(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready data;
+
+ data.data_length = 0;
+ data.status = DMX_OVERRUN_ERROR;
+ return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
+/**
+ * mpq_dmx_decoder_frame_closure - Helper function to handle closing current
+ * pending frame upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /* Report last pattern found */
+ if ((feed_data->pending_pattern_len) &&
+ mpq_dmx_is_video_frame(feed->video_codec,
+ feed_data->last_framing_match_type)) {
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+ packet.raw_data_len = feed_data->pending_pattern_len;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /* Writing meta-data that includes the framing information */
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/**
+ * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES
+ * upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ */
+ if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset = feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ /* Save write offset where new PES will begin */
+ mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+ &feed_data->frame_offset);
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/*
+ * in audio handling although ES frames are send to decoder, close the
+ * pes packet
+ */
+static void mpq_dmx_decoder_audio_pes_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_audio_meta_data meta_data;
+ struct mpq_audio_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->audio_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of audio
+ * output buffer by the API (terminate audio feed, re-use of audio
+ * buffers).
+ */
+ spin_lock(&feed_data->audio_buffer_lock);
+ stream_buffer = feed_data->audio_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n", __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return;
+ }
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ */
+ if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset = feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_audio_meta_data);
+
+ mpq_dmx_write_audio_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ /* Save write offset where new PES will begin */
+ mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+ &feed_data->frame_offset);
+ mpq_dmx_prepare_audio_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sb_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+ spin_unlock(&feed_data->audio_buffer_lock);
+}
+
+static int mpq_dmx_process_video_packet_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+
+ struct dvb_dmx_video_patterns_results framing_res;
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+ int bytes_written = 0;
+ int bytes_to_write = 0;
+ int found_patterns = 0;
+ int first_pattern = 0;
+ int i;
+ int is_video_frame = 0;
+ int pending_data_len = 0;
+ int ret = 0;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+
+ mpq_demux = feed->demux->priv;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ /*
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+ if (feed_data->pes_header_left_bytes != 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * the decoder requires demux to do framing,
+ * so search for the patterns now.
+ */
+ found_patterns = dvb_dmx_video_pattern_search(
+ feed_data->patterns,
+ feed_data->patterns_num,
+ (buf + ts_payload_offset),
+ bytes_avail,
+ &feed_data->prefix_size,
+ &framing_res);
+
+ if (!feed_data->found_sequence_header_pattern) {
+ for (i = 0; i < found_patterns; i++) {
+ if ((framing_res.info[i].type ==
+ DMX_IDX_MPEG_SEQ_HEADER) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_H264_SPS) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_VC1_SEQ_HEADER)) {
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n",
+ __func__, buf, i,
+ framing_res.info[i].offset,
+ framing_res.info[i].type);
+
+ first_pattern = i;
+ feed_data->found_sequence_header_pattern = 1;
+ ts_payload_offset +=
+ framing_res.info[i].offset;
+ bytes_avail -= framing_res.info[i].offset;
+
+ if (framing_res.info[i].used_prefix_size) {
+ feed_data->first_prefix_size =
+ framing_res.info[i].
+ used_prefix_size;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * If decoder requires demux to do framing,
+ * pass data to decoder only after sequence header
+ * or equivalent is found. Otherwise the data is dropped.
+ */
+ if (!feed_data->found_sequence_header_pattern) {
+ feed_data->prev_stc = curr_stc;
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ /* Need to back-up the PTS information of the very first frame */
+ if (feed_data->first_pts_dts_copy) {
+ for (i = first_pattern; i < found_patterns; i++) {
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ framing_res.info[i].type);
+
+ if (is_video_frame == 1) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ break;
+ }
+ }
+ }
+
+ /*
+ * write prefix used to find first Sequence pattern, if needed.
+ * feed_data->patterns[0]->pattern always contains the sequence
+ * header pattern.
+ */
+ if (feed_data->first_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->patterns[0]->pattern,
+ feed_data->first_prefix_size);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ feed_data->first_prefix_size;
+ feed_data->ts_dropped_bytes +=
+ feed_data->first_prefix_size;
+ MPQ_DVB_DBG_PRINT("%s: could not write prefix\n",
+ __func__);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Writing pattern prefix of size %d\n",
+ __func__, feed_data->first_prefix_size);
+ /*
+ * update the length of the data we report
+ * to include the size of the prefix that was used.
+ */
+ feed_data->pending_pattern_len +=
+ feed_data->first_prefix_size;
+ }
+ }
+
+ feed->peslen += bytes_avail;
+ pending_data_len += bytes_avail;
+
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ packet.user_data_len = sizeof(struct mpq_adapter_video_meta_data);
+
+ /*
+ * Go over all the patterns that were found in this packet.
+ * For each pattern found, write the relevant data to the data
+ * buffer, then write the respective meta-data.
+ * Each pattern can only be reported when the next pattern is found
+ * (in order to know the data length).
+ * There are three possible cases for each pattern:
+ * 1. This is the very first pattern we found in any TS packet in this
+ * feed.
+ * 2. This is the first pattern found in this TS packet, but we've
+ * already found patterns in previous packets.
+ * 3. This is not the first pattern in this packet, i.e., we've
+ * already found patterns in this TS packet.
+ */
+ for (i = first_pattern; i < found_patterns; i++) {
+ if (i == first_pattern) {
+ /*
+ * The way to identify the very first pattern:
+ * 1. It's the first pattern found in this packet.
+ * 2. The pending_pattern_len, which indicates the
+ * data length of the previous pattern that has
+ * not yet been reported, is usually 0. However,
+ * it may be larger than 0 if a prefix was used
+ * to find this pattern (i.e., the pattern was
+ * split over two TS packets). In that case,
+ * pending_pattern_len equals first_prefix_size.
+ * first_prefix_size is set to 0 later in this
+ * function.
+ */
+ if (feed_data->first_prefix_size ==
+ feed_data->pending_pattern_len) {
+ /*
+ * This is the very first pattern, so no
+ * previous pending frame data exists.
+ * Update frame info and skip to the
+ * next frame.
+ */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc =
+ feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc =
+ curr_stc;
+ continue;
+ }
+ /*
+ * This is the first pattern in this
+ * packet and previous frame from
+ * previous packet is pending for report
+ */
+ bytes_to_write = framing_res.info[i].offset;
+ } else {
+ /* Previous pending frame is in the same packet */
+ bytes_to_write =
+ framing_res.info[i].offset -
+ feed_data->last_pattern_offset;
+ }
+
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ bytes_to_write);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ bytes_to_write;
+ feed_data->ts_dropped_bytes += bytes_to_write;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d bytes to data buffer, ret=%d\n",
+ __func__, bytes_to_write, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ bytes_written += bytes_to_write;
+ pending_data_len -= bytes_to_write;
+ feed_data->pending_pattern_len += bytes_to_write;
+ }
+ non_predicted_video_frame = 0;
+
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ feed_data->last_framing_match_type);
+ if (is_video_frame == 1) {
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+
+ packet.raw_data_len = feed_data->pending_pattern_len -
+ framing_res.info[i].used_prefix_size;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc =
+ feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.
+ transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ if (video_b_frame_events == 1) {
+ if (non_predicted_video_frame == 0) {
+ struct dmx_pts_dts_info *pts_dts;
+
+ pts_dts =
+ &meta_data.info.framing.pts_dts_info;
+ pts_dts->pts_exist = 0;
+ pts_dts->pts = 0;
+ pts_dts->dts_exist = 0;
+ pts_dts->dts = 0;
+ }
+ }
+ /*
+ * Write meta-data that includes the framing information
+ */
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT
+ ("%s: mpq_sb_pkt_write failed ret=%d\n",
+ __func__, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, feed_data,
+ stream_buffer, &data, ret);
+
+ /* Trigger ES Data Event for VPTS */
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ if (feed_data->video_buffer->mode ==
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ feed_data->frame_offset = 0;
+ else
+ mpq_streambuffer_get_data_rw_offset(
+ feed_data->video_buffer,
+ NULL,
+ &feed_data->frame_offset);
+ }
+
+ /*
+ * In linear buffers, after writing the packet
+ * we switched over to a new linear buffer for the new
+ * frame. In that case, we should re-write the prefix
+ * of the existing frame if any exists.
+ */
+ if ((MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode) &&
+ framing_res.info[i].used_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->prev_pattern +
+ DVB_DMX_MAX_PATTERN_LEN -
+ framing_res.info[i].used_prefix_size,
+ framing_res.info[i].used_prefix_size);
+
+ if (ret < 0) {
+ feed_data->pending_pattern_len = 0;
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].
+ drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes +=
+ framing_res.info[i].used_prefix_size;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+ }
+ } else {
+ s32 offset = (s32)feed_data->frame_offset;
+ u32 buff_size =
+ feed_data->video_buffer->buffers[0].size;
+
+ offset -= framing_res.info[i].used_prefix_size;
+ offset += (offset < 0) ? buff_size : 0;
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_RING ==
+ feed_data->video_buffer->mode) {
+ feed_data->frame_offset = (u32)offset;
+ }
+ }
+ }
+
+ /* save the last match for next time */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc = feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc = curr_stc;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ feed_data->first_prefix_size = 0;
+
+ /*
+ * Save the trailing of the TS packet as we might have a pattern
+ * split that we need to re-use when closing the next
+ * video linear buffer.
+ */
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode)
+ memcpy(feed_data->prev_pattern,
+ buf + TS_PACKET_SIZE - DVB_DMX_MAX_PATTERN_LEN,
+ DVB_DMX_MAX_PATTERN_LEN);
+
+ if (pending_data_len) {
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ pending_data_len);
+
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ pending_data_len;
+ feed_data->ts_dropped_bytes += pending_data_len;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d pending bytes to data buffer, ret=%d\n",
+ __func__, pending_data_len, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len += pending_data_len;
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+ int cookie;
+ int ret;
+
+ mpq_demux = feed->demux->priv;
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+
+ if (feed_data->pes_header_left_bytes == 0) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset =
+ feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct
+ mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ /* Mark that we detected start of new PES */
+ feed_data->first_pts_dts_copy = 1;
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(
+ stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT
+ ("%s: write failed, ret=%d\n",
+ __func__, cookie);
+ } else {
+ /*
+ * Save write offset where new PES
+ * will begin
+ */
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ NULL,
+ &feed_data->frame_offset);
+
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data,
+ feed_data,
+ stream_buffer, &data, cookie);
+
+ feed->data_ready_cb.ts(&feed->feed.ts,
+ &data);
+ }
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+ }
+
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * Need to back-up the PTS information
+ * of the start of new PES
+ */
+ if (feed_data->first_pts_dts_copy) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+ bytes_avail);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes += bytes_avail;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed->peslen += bytes_avail;
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+}
+
+/*
+ * parse PES headers and send down ES packets to decoder
+ * Trigger a new ES Data Event with APTS and QTimer in 1st PES
+ */
+static int mpq_dmx_process_audio_packet_no_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_audio_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+ int cookie;
+ int ret;
+
+ mpq_demux = feed->demux->priv;
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of audio
+ * output buffer by the API (terminate audio feed, re-use of audio
+ * buffers). Mutex on the audio-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->audio_buffer_lock);
+ stream_buffer = feed_data->audio_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: audio_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_audio_meta_data meta_data;
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+
+ if (feed_data->pes_header_left_bytes == 0) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset =
+ feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct
+ mpq_adapter_audio_meta_data);
+
+ mpq_dmx_write_audio_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ /* Mark that we detected start of new PES */
+ feed_data->first_pts_dts_copy = 1;
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /* actual writing of stream audio headers */
+ cookie = mpq_streambuffer_pkt_write(
+ stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT
+ ("%s: write failed, ret=%d\n",
+ __func__, cookie);
+ } else {
+ /*
+ * Save write offset where new PES
+ * will begin
+ */
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ NULL,
+ &feed_data->frame_offset);
+
+ mpq_dmx_prepare_audio_es_event_data(
+ &packet, &meta_data,
+ feed_data,
+ stream_buffer, &data, cookie);
+
+ /*
+ * Trigger ES data event for APTS
+ * and AFRAME
+ */
+ feed->data_ready_cb.ts(&feed->feed.ts,
+ &data);
+ }
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+ }
+
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* The audio decoder requires ES packets ! */
+
+ /* Get the mandatory fields of the audio PES header */
+ if (mpq_dmx_parse_mandatory_audio_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_audio_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * Need to back-up the PTS information
+ * of the start of new PES
+ */
+ if (feed_data->first_pts_dts_copy) {
+ mpq_dmx_save_audio_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_audio_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ /* actual writing of audio data for a stream */
+ ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+ bytes_avail);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes += bytes_avail;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed->peslen += bytes_avail;
+ }
+
+ spin_unlock(&feed_data->audio_buffer_lock);
+
+ return 0;
+}
+
+/* function ptr used in several places, handle differently */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *video_buff;
+ struct mpq_feed *mpq_feed;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ video_buff = feed_data->video_buffer;
+ if (!video_buff) {
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = video_buff->raw_data.error;
+
+ if (video_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+ dmx_buffer_status->fullness =
+ video_buff->buffers[0].size *
+ video_buff->pending_buffers_count;
+ dmx_buffer_status->free_bytes =
+ video_buff->buffers[0].size *
+ (video_buff->buffers_num -
+ video_buff->pending_buffers_count);
+ dmx_buffer_status->size =
+ video_buff->buffers[0].size *
+ video_buff->buffers_num;
+ } else {
+ dmx_buffer_status->fullness =
+ mpq_streambuffer_data_avail(video_buff);
+ dmx_buffer_status->free_bytes =
+ mpq_streambuffer_data_free(video_buff);
+ dmx_buffer_status->size = video_buff->buffers[0].size;
+ }
+
+ mpq_streambuffer_get_data_rw_offset(
+ video_buff,
+ &dmx_buffer_status->read_offset,
+ &dmx_buffer_status->write_offset);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_audio_feed_info *feed_data;
+ struct mpq_streambuffer *audio_buff;
+ struct mpq_feed *mpq_feed;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->audio_info;
+ audio_buff = feed_data->audio_buffer;
+ if (!audio_buff) {
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = audio_buff->raw_data.error;
+
+ if (audio_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+ dmx_buffer_status->fullness =
+ audio_buff->buffers[0].size *
+ audio_buff->pending_buffers_count;
+ dmx_buffer_status->free_bytes =
+ audio_buff->buffers[0].size *
+ (audio_buff->buffers_num -
+ audio_buff->pending_buffers_count);
+ dmx_buffer_status->size =
+ audio_buff->buffers[0].size *
+ audio_buff->buffers_num;
+ } else {
+ dmx_buffer_status->fullness =
+ mpq_streambuffer_data_avail(audio_buff);
+ dmx_buffer_status->free_bytes =
+ mpq_streambuffer_data_free(audio_buff);
+ dmx_buffer_status->size = audio_buff->buffers[0].size;
+ }
+
+ mpq_streambuffer_get_data_rw_offset(
+ audio_buff,
+ &dmx_buffer_status->read_offset,
+ &dmx_buffer_status->write_offset);
+
+ mutex_unlock(&mpq_demux->mutex);
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+ __func__, feed->pes_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mpq_dmx_process_video_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 curr_stc;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ curr_stc = 0;
+ } else {
+ if (mpq_demux->ts_packet_timestamp_source !=
+ TSIF_TTS_LPASS_TIMER) {
+ curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ } else {
+ curr_stc = buf[STC_LOCATION_IDX + 3] << 24;
+ curr_stc += buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ }
+ }
+
+ if (!video_framing)
+ return mpq_dmx_process_video_packet_no_framing(feed, buf,
+ curr_stc);
+ else
+ return mpq_dmx_process_video_packet_framing(feed, buf,
+ curr_stc);
+}
+
+int mpq_dmx_process_audio_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 curr_stc;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ curr_stc = 0;
+ } else {
+ if (mpq_demux->ts_packet_timestamp_source !=
+ TSIF_TTS_LPASS_TIMER) {
+ curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ } else {
+ curr_stc = buf[STC_LOCATION_IDX + 3] << 24;
+ curr_stc += buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ }
+ }
+
+ return mpq_dmx_process_audio_packet_no_framing(feed, buf, curr_stc);
+}
+
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci)
+{
+ const struct ts_packet_header *ts_header;
+ const struct ts_adaptation_field *adaptation_field;
+
+ if (buf == NULL || pcr == NULL || dci == NULL)
+ return 0;
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ /* Make sure this TS packet has a adaptation field */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 1) ||
+ ts_header->transport_error_indicator)
+ return 0;
+
+ adaptation_field = (const struct ts_adaptation_field *)
+ (buf + sizeof(struct ts_packet_header));
+
+ if ((!adaptation_field->adaptation_field_length) ||
+ (!adaptation_field->PCR_flag))
+ return 0; /* 0 adaptation field or no PCR */
+
+ *pcr = ((u64)adaptation_field->program_clock_reference_base_1) << 25;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_2) << 17;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_3) << 9;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_4) << 1;
+ *pcr += adaptation_field->program_clock_reference_base_5;
+ *pcr *= 300;
+ *pcr += (((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
+ adaptation_field->program_clock_reference_ext_2;
+
+ *dci = adaptation_field->discontinuity_indicator;
+
+ return 1;
+}
+
+int mpq_dmx_process_pcr_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 stc;
+ struct dmx_data_ready data;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set) == 0)
+ return 0;
+
+ /*
+ * When we play from front-end, we configure HW
+ * to output the extra timestamp, if we are playing
+ * from DVR, we don't have a timestamp if the packet
+ * format is not 192-tail.
+ */
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ stc = 0;
+ } else {
+ if (mpq_demux->ts_packet_timestamp_source !=
+ TSIF_TTS_LPASS_TIMER) {
+ stc = buf[STC_LOCATION_IDX + 2] << 16;
+ stc += buf[STC_LOCATION_IDX + 1] << 8;
+ stc += buf[STC_LOCATION_IDX];
+ stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ } else {
+ stc = buf[STC_LOCATION_IDX + 3] << 24;
+ stc += buf[STC_LOCATION_IDX + 2] << 16;
+ stc += buf[STC_LOCATION_IDX + 1] << 8;
+ stc += buf[STC_LOCATION_IDX];
+ }
+ }
+
+ data.data_length = 0;
+ data.pcr.stc = stc;
+ data.status = DMX_OK_PCR;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type)
+{
+ if (feed_type == 1) { /* video feed */
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_streambuffer_packet_header oob_packet;
+ struct mpq_adapter_video_meta_data oob_meta_data;
+ int ret;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ memset(&oob_packet, 0, sizeof(oob_packet));
+ oob_packet.user_data_len = sizeof(oob_meta_data);
+ oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+ (u8 *)&oob_meta_data);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return (ret < 0) ? ret : 0;
+
+ } else if (feed_type == 2) { /* audio feed */
+ struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_streambuffer_packet_header oob_packet;
+ struct mpq_adapter_audio_meta_data oob_meta_data;
+ int ret;
+
+ spin_lock(&feed_data->audio_buffer_lock);
+ stream_buffer = feed_data->audio_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return 0;
+ }
+
+ memset(&oob_packet, 0, sizeof(oob_packet));
+ oob_packet.user_data_len = sizeof(oob_meta_data);
+ oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+ (u8 *)&oob_meta_data);
+
+ spin_unlock(&feed_data->audio_buffer_lock);
+ return (ret < 0) ? ret : 0;
+ }
+
+ return 0;
+}
+
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if (unlikely(!timestampIn27Mhz))
+ return;
+
+ if (mpq_demux->ts_packet_timestamp_source != TSIF_TTS_LPASS_TIMER) {
+ *timestampIn27Mhz = timestamp[2] << 16;
+ *timestampIn27Mhz += timestamp[1] << 8;
+ *timestampIn27Mhz += timestamp[0];
+ *timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */
+ } else {
+ *timestampIn27Mhz = timestamp[3] << 24;
+ *timestampIn27Mhz += timestamp[2] << 16;
+ *timestampIn27Mhz += timestamp[1] << 8;
+ *timestampIn27Mhz += timestamp[0];
+ }
+}
+
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
+{
+ enum sdmx_status ret = SDMX_SUCCESS;
+ enum sdmx_proc_mode proc_mode;
+ enum sdmx_pkt_format pkt_format;
+
+ MPQ_DVB_DBG_PRINT("%s: ref_count %d\n",
+ __func__, mpq_demux->sdmx_session_ref_count);
+
+ if (mpq_demux->sdmx_session_ref_count) {
+ /* session is already open */
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+ }
+
+ proc_mode = (mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) ?
+ SDMX_PUSH_MODE : SDMX_PULL_MODE;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Proc mode = %s\n",
+ __func__, SDMX_PUSH_MODE == proc_mode ? "Push" : "Pull");
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_188) {
+ pkt_format = SDMX_188_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: invalid tsp format\n", __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: (%s) source, packet format: %d\n",
+ __func__,
+ (mpq_demux->source < DMX_SOURCE_DVR0) ?
+ "frontend" : "DVR", pkt_format);
+
+ /* open session and set configuration */
+ ret = sdmx_open_session(&mpq_demux->sdmx_session_handle);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not open session. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: new session_handle = %d\n",
+ __func__, mpq_demux->sdmx_session_handle);
+
+ ret = sdmx_set_session_cfg(mpq_demux->sdmx_session_handle,
+ proc_mode,
+ SDMX_PKT_ENC_MODE,
+ pkt_format,
+ mpq_sdmx_scramble_odd,
+ mpq_sdmx_scramble_even);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set session config. ret=%d\n",
+ __func__, ret);
+ sdmx_close_session(mpq_demux->sdmx_session_handle);
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ return -EINVAL;
+ }
+
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n",
+ __func__, ret);
+ /* Don't fail open session if just log level setting failed */
+ ret = 0;
+ }
+
+ mpq_demux->sdmx_process_count = 0;
+ mpq_demux->sdmx_process_time_sum = 0;
+ mpq_demux->sdmx_process_time_average = 0;
+ mpq_demux->sdmx_process_time_max = 0;
+ mpq_demux->sdmx_process_packets_sum = 0;
+ mpq_demux->sdmx_process_packets_average = 0;
+ mpq_demux->sdmx_process_packets_min = 0;
+
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+}
+
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux)
+{
+ int ret = 0;
+ enum sdmx_status status;
+
+ MPQ_DVB_DBG_PRINT("%s: session_handle = %d, ref_count %d\n",
+ __func__,
+ mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_session_ref_count);
+
+ if (!mpq_demux->sdmx_session_ref_count)
+ return -EINVAL;
+
+ if (mpq_demux->sdmx_session_ref_count == 1) {
+ status = sdmx_close_session(mpq_demux->sdmx_session_handle);
+ if (status != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: sdmx_close_session failed %d\n",
+ __func__, status);
+ }
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ }
+
+ mpq_demux->sdmx_session_ref_count--;
+
+ return ret;
+}
+
+static int mpq_sdmx_get_buffer_chunks(struct mpq_demux *mpq_demux,
+ struct ion_handle *buff_handle,
+ u32 actual_buff_size,
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS])
+{
+ int i;
+ struct sg_table *sg_ptr;
+ struct scatterlist *sg;
+ u32 chunk_size;
+ int ret;
+
+ memset(buff_chunks, 0,
+ sizeof(struct sdmx_buff_descr) * SDMX_MAX_PHYSICAL_CHUNKS);
+
+ sg_ptr = ion_sg_table(mpq_demux->ion_client, buff_handle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ ret = PTR_ERR(sg_ptr);
+ MPQ_DVB_ERR_PRINT("%s: ion_sg_table failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (sg_ptr->nents == 0) {
+ MPQ_DVB_ERR_PRINT("%s: num of scattered entries is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sg_ptr->nents > SDMX_MAX_PHYSICAL_CHUNKS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: num of scattered entries %d greater than max supported %d\n",
+ __func__, sg_ptr->nents, SDMX_MAX_PHYSICAL_CHUNKS);
+ return -EINVAL;
+ }
+
+ sg = sg_ptr->sgl;
+ for (i = 0; i < sg_ptr->nents; i++) {
+ buff_chunks[i].base_addr = (u64)sg_dma_address(sg);
+
+ if (sg->length > actual_buff_size)
+ chunk_size = actual_buff_size;
+ else
+ chunk_size = sg->length;
+
+ buff_chunks[i].size = chunk_size;
+ sg = sg_next(sg);
+ actual_buff_size -= chunk_size;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_data_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, u32 *num_buffers,
+ struct sdmx_data_buff_descr buf_desc[DMX_MAX_DECODER_BUFFER_NUM],
+ enum sdmx_buf_mode *buf_mode)
+{
+ struct dvb_demux_feed *dvbdmx_feed = feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buffer;
+ struct mpq_video_feed_info *feed_data = &feed->video_info;
+ struct ion_handle *sdmx_buff;
+ int ret;
+ int i;
+
+ *buf_mode = SDMX_RING_BUF;
+
+ if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
+ if (feed_data->buffer_desc.decoder_buffers_num > 1)
+ *buf_mode = SDMX_LINEAR_GROUP_BUF;
+ *num_buffers = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < *num_buffers; i++) {
+ buf_desc[i].length =
+ feed_data->buffer_desc.desc[i].size;
+
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux,
+ feed_data->buffer_desc.ion_handle[i],
+ buf_desc[i].length,
+ buf_desc[i].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ *num_buffers = 1;
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+ dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ buffer = &feed->sdmx_buf;
+ sdmx_buff = feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ dvbdmx_feed->feed.ts.buffer.ringbuff;
+ sdmx_buff = dvbdmx_feed->feed.ts.buffer.priv_handle;
+ }
+
+ if (sdmx_buff == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid buffer allocation\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ buf_desc[0].length = buffer->size;
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux, sdmx_buff,
+ buf_desc[0].length,
+ buf_desc[0].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_filter_setup(struct mpq_demux *mpq_demux,
+ struct dvb_demux_feed *dvbdmx_feed)
+{
+ int ret = 0;
+ struct mpq_feed *feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+ struct sdmx_buff_descr metadata_buff_desc;
+ struct sdmx_data_buff_descr *data_buff_desc = NULL;
+ u32 data_buf_num = DMX_MAX_DECODER_BUFFER_NUM;
+ enum sdmx_buf_mode buf_mode;
+ enum sdmx_raw_out_format ts_out_format = SDMX_188_OUTPUT;
+ u32 filter_flags = 0;
+
+ feed = dvbdmx_feed->priv;
+
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SECTION_FILTER;
+ if (dvbdmx_feed->feed.sec.check_crc)
+ filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
+ } else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_PCR_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
+ } else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SEPARATED_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
+ } else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_RAW_FILTER;
+ switch (dvbdmx_feed->tsp_out_format) {
+ case (DMX_TSP_FORMAT_188):
+ ts_out_format = SDMX_188_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_HEAD):
+ ts_out_format = SDMX_192_HEAD_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_TAIL):
+ ts_out_format = SDMX_192_TAIL_OUTPUT;
+ break;
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Unsupported TS output format %d\n",
+ __func__, dvbdmx_feed->tsp_out_format);
+ return -EINVAL;
+ }
+ MPQ_DVB_DBG_PRINT("%s: SDMX_RAW_FILTER\n", __func__);
+ } else {
+ feed->filter_type = SDMX_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PES_FILTER\n", __func__);
+ }
+
+ data_buff_desc = vmalloc(
+ sizeof(*data_buff_desc)*DMX_MAX_DECODER_BUFFER_NUM);
+ if (!data_buff_desc) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate memory for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Recording feed sdmx filter handle lookup:
+ * In case this is a recording filter with multiple feeds,
+ * this feed is either the first feed of a new recording filter,
+ * or it is another feed of an existing filter for which a filter was
+ * already opened with sdmx. In such case, we need to look up in the
+ * feed pool for a allocated feed with same output buffer (meaning they
+ * belong to the same filter) and to use the already allocated sdmx
+ * filter handle.
+ */
+ if (feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(dvbdmx_feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ /*
+ * If this PID is not part of existing recording filter,
+ * configure a new filter to SDMX.
+ */
+ if (!main_rec_feed) {
+ feed->secondary_feed = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding new sdmx filter, pid %d, flags=0x%X, ts_out_format=%d\n",
+ __func__, dvbdmx_feed->pid, filter_flags,
+ ts_out_format);
+
+ /* Meta-data initialization,
+ * Recording filters do no need meta-data buffers.
+ */
+ if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ metadata_buff_desc.base_addr = 0;
+ metadata_buff_desc.size = 0;
+ } else {
+ ret = mpq_sdmx_init_metadata_buffer(mpq_demux, feed,
+ &metadata_buff_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize metadata buffer. ret=%d\n",
+ __func__, ret);
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ ret = mpq_sdmx_init_data_buffer(mpq_demux, feed, &data_buf_num,
+ data_buff_desc, &buf_mode);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize data buffer. ret=%d\n",
+ __func__, ret);
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+ ret = sdmx_add_filter(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->pid,
+ feed->filter_type,
+ &metadata_buff_desc,
+ buf_mode,
+ data_buf_num,
+ data_buff_desc,
+ &feed->sdmx_filter_handle,
+ ts_out_format,
+ filter_flags);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_add_filter failed. ret = %d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: feed=0x%p, filter pid=%d, handle=%d, data buffer(s)=%d, size=%d\n",
+ __func__, feed, dvbdmx_feed->pid,
+ feed->sdmx_filter_handle,
+ data_buf_num, data_buff_desc[0].length);
+
+ mpq_demux->sdmx_filter_count++;
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding RAW pid to sdmx, pid %d\n",
+ __func__, dvbdmx_feed->pid);
+
+ feed->secondary_feed = 1;
+ feed->sdmx_filter_handle = main_rec_feed->sdmx_filter_handle;
+ ret = sdmx_add_raw_pid(mpq_demux->sdmx_session_handle,
+ feed->sdmx_filter_handle, dvbdmx_feed->pid);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to add raw pid, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ /*
+ * If pid has a key ladder id associated, we need to
+ * set it to SDMX.
+ */
+ if (dvbdmx_feed->secure_mode.is_secured &&
+ dvbdmx_feed->cipher_ops.operations_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: set key-ladder %d to PID %d\n",
+ __func__,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id,
+ dvbdmx_feed->cipher_ops.pid);
+
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->cipher_ops.pid,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id);
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ }
+ }
+
+ vfree(data_buff_desc);
+ return 0;
+
+sdmx_filter_setup_failed:
+ vfree(data_buff_desc);
+ return ret;
+}
+
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ int ret;
+
+ ret = mpq_sdmx_open_session(mpq_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_open_session failed, ret=%d\n",
+ __func__, ret);
+
+ ret = -ENODEV;
+ goto init_sdmx_feed_failed;
+ }
+
+ /* PCR and sections have internal buffer for SDMX */
+ if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+ else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+ SDMX_SECTION_BUFFER_SIZE);
+ else
+ ret = 0;
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_sdmx;
+ }
+
+ ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_data_buff;
+ }
+
+ mpq_demux->num_secure_feeds++;
+ return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+ mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+ mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+ return ret;
+}
+
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_feed *mpq_feed = feed->priv;
+
+ if (mutex_lock_interruptible(&mpq_demux->mutex))
+ return -ERESTARTSYS;
+
+ mpq_feed->sdmx_buf_handle = NULL;
+ mpq_feed->metadata_buf_handle = NULL;
+ mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ if (feed->type != DMX_TYPE_SEC)
+ feed->feed.ts.flush_buffer = mpq_dmx_flush_buffer;
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_init_video_feed(mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+ __func__, ret);
+ goto init_mpq_feed_end;
+ }
+ }
+
+ if (dvb_dmx_is_audio_feed(feed)) {
+ ret = mpq_dmx_init_audio_feed(mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_audio_feed failed, ret=%d\n",
+ __func__, ret);
+ goto init_mpq_feed_end;
+ }
+ }
+
+ /*
+ * sdmx is not relevant for recording filters, which always use
+ * regular filters (non-sdmx)
+ */
+ if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+ dvb_dmx_is_rec_feed(feed)) {
+ if (!mpq_sdmx_is_loaded())
+ mpq_demux->sdmx_session_handle =
+ SDMX_INVALID_SESSION_HANDLE;
+ goto init_mpq_feed_end;
+ }
+
+ /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+ ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_init_feed failed, ret=%d\n",
+ __func__, ret);
+ if (dvb_dmx_is_video_feed(feed))
+ mpq_dmx_terminate_video_feed(mpq_feed);
+ else if (dvb_dmx_is_audio_feed(feed))
+ mpq_dmx_terminate_audio_feed(mpq_feed);
+ }
+
+init_mpq_feed_end:
+ if (!ret) {
+ mpq_demux->num_active_feeds++;
+ mpq_feed->session_id++;
+ }
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
+
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct mpq_feed *mpq_feed;
+ struct mpq_demux *mpq_demux;
+ int ret = 0;
+
+ if (!feed || !feed->priv || !cipher_ops) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+ __func__, cipher_ops->pid,
+ cipher_ops->operations_count,
+ cipher_ops->operations[0].key_ladder_id);
+
+ if ((cipher_ops->operations_count > 1) ||
+ (cipher_ops->operations_count &&
+ cipher_ops->operations[0].encrypt)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid cipher operations, count=%d, encrypt=%d\n",
+ __func__, cipher_ops->operations_count,
+ cipher_ops->operations[0].encrypt);
+ return -EINVAL;
+ }
+
+ if (!feed->secure_mode.is_secured) {
+ /*
+ * Filter is not configured as secured, setting cipher
+ * operations is not allowed.
+ */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot set cipher operations to non-secure filter\n",
+ __func__);
+ return -EPERM;
+ }
+
+ mpq_feed = feed->priv;
+ mpq_demux = mpq_feed->mpq_demux;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * Feed is running in secure mode, this secure mode request is to
+ * update the key ladder id
+ */
+ if ((mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) &&
+ cipher_ops->operations_count) {
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ cipher_ops->pid,
+ cipher_ops->operations[0].key_ladder_id);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+static int mpq_sdmx_invalidate_buffer(struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *buffer;
+ struct ion_handle *ion_handle;
+ int ret = 0;
+ int i;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ ion_handle = mpq_feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ion_handle = feed->feed.ts.buffer.priv_handle;
+ }
+
+ ret = msm_ion_do_cache_op(mpq_feed->mpq_demux->ion_client,
+ ion_handle, buffer->data,
+ buffer->size, ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Video buffers */
+ feed_data = &mpq_feed->video_info;
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ /* Non-secured buffer */
+ ret = msm_ion_do_cache_op(
+ mpq_feed->mpq_demux->ion_client,
+ feed_data->buffer_desc.ion_handle[i],
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].size,
+ ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ }
+ }
+
+ return ret;
+}
+
+static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
+ struct sdmx_filter_status *filter_sts,
+ struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *sbuff;
+
+ filter_sts->filter_handle = mpq_feed->sdmx_filter_handle;
+ filter_sts->metadata_fill_count =
+ dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ filter_sts->metadata_write_offset = mpq_feed->metadata_buf.pwrite;
+ filter_sts->error_indicators = 0;
+ filter_sts->status_indicators = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter meta-data buffer status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->metadata_fill_count,
+ filter_sts->metadata_write_offset);
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ struct dvb_ringbuffer *buffer;
+
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ }
+
+ filter_sts->data_fill_count = dvb_ringbuffer_avail(buffer);
+ filter_sts->data_write_offset = buffer->pwrite;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter buffers status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+
+ return;
+ }
+
+ /* Video feed - decoder buffers */
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ if (feed_data->buffer_desc.decoder_buffers_num > 1) {
+ /* linear mode */
+ filter_sts->data_fill_count = sbuff->pending_buffers_count;
+ filter_sts->data_write_offset =
+ sbuff->raw_data.pwrite /
+ sizeof(struct mpq_streambuffer_buffer_desc);
+ } else {
+ /* ring buffer mode */
+ filter_sts->data_fill_count =
+ mpq_streambuffer_data_avail(sbuff);
+ mpq_streambuffer_get_data_rw_offset(sbuff, NULL,
+ &filter_sts->data_write_offset);
+
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Decoder buffers filter status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+}
+
+static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
+ struct dvb_demux_filter *f,
+ struct sdmx_metadata_header *header)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+ u8 neq = 0;
+ u8 xor;
+ u8 tmp;
+ int i;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
+ tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i);
+ xor = f->filter.filter_value[i] ^ tmp;
+
+ if (f->maskandmode[i] & xor)
+ return 0;
+
+ neq |= f->maskandnotmode[i] & xor;
+ }
+
+ if (f->doneq && !neq)
+ return 0;
+
+ if (feed->demux->playback_mode == DMX_PB_MODE_PULL) {
+ mutex_unlock(&mpq_feed->mpq_demux->mutex);
+
+ ret = feed->demux->buffer_ctrl.sec(&f->filter,
+ header->payload_length, 1);
+
+ mutex_lock(&mpq_feed->mpq_demux->mutex);
+
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: buffer_ctrl.sec aborted\n",
+ __func__);
+ return ret;
+ }
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ if (mpq_feed->sdmx_buf.pread + header->payload_length <
+ mpq_feed->sdmx_buf.size) {
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ header->payload_length,
+ NULL, 0, &f->filter);
+ } else {
+ int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread;
+
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ split,
+ &mpq_feed->sdmx_buf.data[0],
+ header->payload_length - split,
+ &f->filter);
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_check_ts_stall(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts,
+ size_t req,
+ int events_only)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For PULL mode need to verify there is enough space for the dmxdev
+ * event. Also, if data buffer is full we want to stall until some
+ * data is removed from it to prevent calling the sdmx when it cannot
+ * output data to the still full buffer.
+ */
+ if (mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) {
+ MPQ_DVB_DBG_PRINT("%s: Stalling for events and %zu bytes\n",
+ __func__, req);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req, 1);
+ MPQ_DVB_DBG_PRINT("%s: stall result = %d\n",
+ __func__, ret);
+
+ mutex_lock(&mpq_demux->mutex);
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Handle filter results for filters with no extra meta-data */
+static void mpq_sdmx_pes_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready pes_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pes_filter_check_overflow;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Meta: fill=%u, write=%u. Data: fill=%u, write=%u\n",
+ __func__, sts->metadata_fill_count, sts->metadata_write_offset,
+ sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((sts->metadata_fill_count == 0) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ ssize_t free = dvb_ringbuffer_free(buf);
+
+ ret = 0;
+ if ((free + SZ_2K) < MAX_PES_LENGTH)
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ free + SZ_2K, 0);
+ else
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot stall when free space bigger than max PES size\n",
+ __func__);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Notify new data in buffer */
+ data_event.status = DMX_OK;
+ data_event.data_length = header.payload_length;
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ data_event.data_length, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+
+ /* Notify new complete PES */
+ pes_event.status = DMX_OK_PES_END;
+ pes_event.pes_end.actual_length = header.payload_length;
+ pes_event.pes_end.start_gap = 0;
+ pes_event.data_length = 0;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_INVALID_PES_LEN)
+ pes_event.pes_end.pes_length_mismatch = 1;
+ else
+ pes_event.pes_end.pes_length_mismatch = 0;
+
+ pes_event.pes_end.disc_indicator_set = 0;
+
+ pes_event.pes_end.stc = 0;
+ pes_event.pes_end.tei_counter = counters.transport_err_count;
+ pes_event.pes_end.cont_err_counter =
+ counters.continuity_err_count;
+ pes_event.pes_end.ts_packets_num =
+ counters.pes_ts_count;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &pes_event);
+ }
+
+pes_filter_check_overflow:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_demux_filter *f;
+ struct dmx_section_feed *sec = &feed->feed.sec;
+ ssize_t bytes_avail;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL) {
+ MPQ_DVB_DBG_PRINT("%s: Notify CRC err event\n", __func__);
+ event.status = DMX_CRC_ERROR;
+ event.data_length = 0;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal section buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto section_filter_check_eos;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ mpq_feed->sdmx_buf.pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+
+ f = feed->filter;
+ do {
+ if (mpq_sdmx_section_filtering(mpq_feed, f, &header))
+ return;
+ } while ((f = f->next) && sec->is_filtering);
+
+ DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length);
+ }
+
+section_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ event.data_length = 0;
+ event.status = DMX_OK_EOS;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+}
+
+static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ int pes_header_offset;
+ struct ts_packet_header *ts_header;
+ struct ts_adaptation_field *ts_adapt;
+ struct pes_packet_header *pes_header;
+ u8 metadata_buf[MAX_SDMX_METADATA_LENGTH];
+ struct mpq_streambuffer *sbuf;
+ int ret;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto decoder_filter_check_flags;
+
+ /* Update meta data buffer write pointer */
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__);
+
+ ret = mpq_dmx_decoder_fullness_check(
+ mpq_feed->dvb_demux_feed, 0, 0);
+ if (ret) {
+ /* we reach here if demuxing was aborted */
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_dmx_decoder_fullness_check aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ /* Read metadata header */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u, metadata=%u\n",
+ __func__, header.payload_start, header.payload_length,
+ header.metadata_length);
+
+ /* Read metadata - PES counters */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Read metadata - TS & PES headers */
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if ((header.metadata_length < MAX_SDMX_METADATA_LENGTH) &&
+ (header.metadata_length >= sizeof(counters)) &&
+ (bytes_avail >=
+ (header.metadata_length - sizeof(counters)))) {
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf,
+ metadata_buf,
+ header.metadata_length - sizeof(counters));
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data size %d larger than available meta-data %zd or max allowed %d\n",
+ __func__, header.metadata_length,
+ bytes_avail,
+ MAX_SDMX_METADATA_LENGTH);
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ sts->metadata_fill_count -=
+ (header.metadata_length - sizeof(counters));
+
+ ts_header = (struct ts_packet_header *)&metadata_buf[0];
+ if (ts_header->adaptation_field_control == 1) {
+ ts_adapt = NULL;
+ pes_header_offset = sizeof(*ts_header);
+ } else {
+ ts_adapt = (struct ts_adaptation_field *)
+ &metadata_buf[sizeof(*ts_header)];
+ pes_header_offset = sizeof(*ts_header) + 1 +
+ ts_adapt->adaptation_field_length;
+ }
+ pes_header = (struct pes_packet_header *)
+ &metadata_buf[pes_header_offset];
+ meta_data.packet_type = DMX_PES_PACKET;
+ /* TODO - set to real STC when SDMX supports it */
+ meta_data.info.pes.stc = 0;
+
+ if (pes_header->pts_dts_flag & 0x2) {
+ meta_data.info.pes.pts_dts_info.pts_exist = 1;
+ meta_data.info.pes.pts_dts_info.pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.pts_exist = 0;
+ }
+
+ if (pes_header->pts_dts_flag & 0x1) {
+ meta_data.info.pes.pts_dts_info.dts_exist = 1;
+ meta_data.info.pes.pts_dts_info.dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.dts_exist = 0;
+ }
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+
+ mpq_feed->video_info.tei_errs =
+ counters.transport_err_count;
+ mpq_feed->video_info.continuity_errs =
+ counters.continuity_err_count;
+ mpq_feed->video_info.ts_packets_num =
+ counters.pes_ts_count;
+ mpq_feed->video_info.ts_dropped_bytes =
+ counters.drop_count *
+ mpq_demux->demux.ts_packet_size;
+
+ sbuf = mpq_feed->video_info.video_buffer;
+ if (sbuf == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ return;
+ }
+
+ if (!header.payload_length) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - video frame with 0 length, dropping\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ continue;
+ }
+
+ packet.raw_data_len = header.payload_length;
+ packet.user_data_len = sizeof(meta_data);
+ mpq_streambuffer_get_buffer_handle(sbuf, 0,
+ &packet.raw_data_handle);
+ mpq_streambuffer_get_data_rw_offset(sbuf,
+ NULL, &packet.raw_data_offset);
+ ret = mpq_streambuffer_data_write_deposit(sbuf,
+ header.payload_length);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_data_write_deposit failed. ret=%d\n",
+ __func__, ret);
+ }
+ mpq_dmx_update_decoder_stat(mpq_feed);
+ ret = mpq_streambuffer_pkt_write(sbuf, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, ret);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, &mpq_feed->video_info,
+ sbuf, &data, ret);
+ MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ }
+
+decoder_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(mpq_feed->dvb_demux_feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ /* Notify decoder via the stream buffer */
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to notify decoder on EOS, ret=%d\n",
+ __func__, ret);
+
+ /* Notify user filter */
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ mpq_feed->dvb_demux_feed->data_ready_cb.ts(
+ &mpq_feed->dvb_demux_feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready data;
+ struct dvb_ringbuffer *rbuff = &mpq_feed->sdmx_buf;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ u8 buf[TS_PACKET_HEADER_LENGTH + MAX_TSP_ADAPTATION_LENGTH +
+ TIMESTAMP_LEN];
+ size_t stc_len = 0;
+ ssize_t bytes_avail;
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pcr_filter_check_eos;
+
+ if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL)
+ stc_len = 4;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ rbuff->pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(rbuff, buf, header.payload_length);
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set)) {
+
+ if (stc_len) {
+ data.pcr.stc =
+ buf[header.payload_length-2] << 16;
+ data.pcr.stc +=
+ buf[header.payload_length-3] << 8;
+ data.pcr.stc += buf[header.payload_length-4];
+ /* convert from 105.47 KHZ to 27MHz */
+ data.pcr.stc *= 256;
+ } else {
+ data.pcr.stc = 0;
+ }
+
+ data.data_length = 0;
+ data.status = DMX_OK_PCR;
+ ret = mpq_sdmx_check_ts_stall(
+ mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+ }
+
+pcr_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data.data_length = 0;
+ data.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+}
+
+static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ ssize_t new_data;
+ struct dmx_data_ready data_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto raw_filter_check_flags;
+
+ new_data = sts->data_write_offset -
+ buf->pwrite;
+ if (new_data < 0)
+ new_data += buf->size;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ new_data + feed->demux->ts_packet_size, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ data_event.status = DMX_OK;
+ data_event.data_length = new_data;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n",
+ __func__, data_event.data_length);
+
+raw_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+
+}
+
+static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
+{
+ int i;
+ int sdmx_filters;
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 mpq_feed_idx;
+
+ sdmx_filters = mpq_demux->sdmx_filter_count;
+ for (i = 0; i < sdmx_filters; i++) {
+ sts = &mpq_demux->sdmx_filters_state.status[i];
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter: handle=%d, status=0x%x, errors=0x%x\n",
+ __func__, sts->filter_handle, sts->status_indicators,
+ sts->error_indicators);
+ MPQ_DVB_DBG_PRINT("%s: Metadata fill count=%d (write=%d)\n",
+ __func__, sts->metadata_fill_count,
+ sts->metadata_write_offset);
+ MPQ_DVB_DBG_PRINT("%s: Data fill count=%d (write=%d)\n",
+ __func__, sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed_idx = mpq_demux->sdmx_filters_state.mpq_feed_idx[i];
+ mpq_feed = &mpq_demux->feeds[mpq_feed_idx];
+ if ((mpq_feed->dvb_demux_feed->state != DMX_STATE_GO) ||
+ (sts->filter_handle != mpq_feed->sdmx_filter_handle) ||
+ mpq_feed->secondary_feed ||
+ (mpq_demux->sdmx_filters_state.session_id[i] !=
+ mpq_feed->session_id))
+ continue;
+
+ /* Invalidate output buffer before processing the results */
+ mpq_sdmx_invalidate_buffer(mpq_feed);
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data buff for pid %d overflowed!\n",
+ __func__, mpq_feed->dvb_demux_feed->pid);
+
+ switch (mpq_feed->filter_type) {
+ case SDMX_PCR_FILTER:
+ mpq_sdmx_pcr_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ case SDMX_PES_FILTER:
+ mpq_sdmx_pes_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SEPARATED_PES_FILTER:
+ mpq_sdmx_decoder_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SECTION_FILTER:
+ mpq_sdmx_section_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_RAW_FILTER:
+ mpq_sdmx_raw_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int mpq_sdmx_process_buffer(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset)
+{
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 flags = 0;
+ u32 errors;
+ u32 status;
+ u32 prev_read_offset;
+ u32 prev_fill_count;
+ enum sdmx_status sdmx_res;
+ int i;
+ int filter_index = 0;
+ int bytes_read;
+ ktime_t process_start_time;
+ ktime_t process_end_time;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * All active filters may get totally closed and therefore
+ * sdmx session may get terminated, in such case nothing to process
+ */
+ if (mpq_demux->sdmx_session_handle == SDMX_INVALID_SESSION_HANDLE) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx filters aborted, filter-count %d, session %d\n",
+ __func__, mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_session_handle);
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ /* Set input flags */
+ if (mpq_demux->sdmx_eos)
+ flags |= SDMX_INPUT_FLAG_EOS;
+ if (mpq_sdmx_debug)
+ flags |= SDMX_INPUT_FLAG_DBG_ENABLE;
+
+ /* Build up to date filter status array */
+ for (i = 0; i < MPQ_MAX_DMX_FILES; i++) {
+ mpq_feed = &mpq_demux->feeds[i];
+ if ((mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE)
+ && (!mpq_feed->secondary_feed)) {
+ sts = mpq_demux->sdmx_filters_state.status +
+ filter_index;
+ mpq_sdmx_prepare_filter_status(mpq_demux, sts,
+ mpq_feed);
+ mpq_demux->sdmx_filters_state.mpq_feed_idx[filter_index]
+ = i;
+ mpq_demux->sdmx_filters_state.session_id[filter_index] =
+ mpq_feed->session_id;
+ filter_index++;
+ }
+ }
+
+ /* Sanity check */
+ if (filter_index != mpq_demux->sdmx_filter_count) {
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT(
+ "%s: Updated %d SDMX filters status but should be %d\n",
+ __func__, filter_index, mpq_demux->sdmx_filter_count);
+ return -ERESTART;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+ __func__, read_offset, fill_count);
+
+ process_start_time = ktime_get();
+
+ prev_read_offset = read_offset;
+ prev_fill_count = fill_count;
+ sdmx_res = sdmx_process(mpq_demux->sdmx_session_handle, flags, input,
+ &fill_count, &read_offset, &errors, &status,
+ mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_filters_state.status);
+
+ process_end_time = ktime_get();
+ bytes_read = prev_fill_count - fill_count;
+
+ mpq_dmx_update_sdmx_stat(mpq_demux, bytes_read,
+ process_start_time, process_end_time);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX result=%d, input_fill_count=%u, read_offset=%u, read %d bytes from input, status=0x%X, errors=0x%X\n",
+ __func__, sdmx_res, fill_count, read_offset, bytes_read,
+ status, errors);
+
+ if ((sdmx_res == SDMX_SUCCESS) ||
+ (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)) {
+ if (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)
+ MPQ_DVB_DBG_PRINT("%s: SDMX stalled for PULL mode\n",
+ __func__);
+
+ mpq_sdmx_process_results(mpq_demux);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX Process returned %d\n",
+ __func__, sdmx_res);
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return bytes_read;
+}
+
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size)
+{
+ int ret;
+ int todo;
+ int total_bytes_read = 0;
+ int limit = mpq_sdmx_proc_limit * tsp_size;
+
+ MPQ_DVB_DBG_PRINT(
+ "\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%zu\n",
+ __func__, read_offset, fill_count, tsp_size);
+
+ while (fill_count >= tsp_size) {
+ todo = fill_count > limit ? limit : fill_count;
+ ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
+ read_offset);
+
+ if (mpq_demux->demux.sw_filter_abort) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Demuxing from DVR was aborted\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (ret > 0) {
+ total_bytes_read += ret;
+ fill_count -= ret;
+ read_offset += ret;
+ if (read_offset >= input->size)
+ read_offset -= input->size;
+ } else {
+ /*
+ * ret < 0: some error occurred
+ * ret == 0: not enough data (less than 1 TS packet)
+ */
+ if (ret < 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_process_buffer failed, returned %d\n",
+ __func__, ret);
+ break;
+ }
+ }
+
+ return total_bytes_read;
+}
+
+static int mpq_sdmx_write(struct mpq_demux *mpq_demux,
+ struct ion_handle *input_handle,
+ const char *buf,
+ size_t count)
+{
+ struct ion_handle *ion_handle;
+ struct dvb_ringbuffer *rbuf;
+ struct sdmx_buff_descr buf_desc;
+ u32 read_offset;
+ int ret;
+
+ if (mpq_demux == NULL || input_handle == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ion_handle = mpq_demux->demux.dmx.dvr_input.priv_handle;
+ rbuf = (struct dvb_ringbuffer *)mpq_demux->demux.dmx.dvr_input.ringbuff;
+
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to init input buffer descriptor. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+ read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
+
+
+ /*
+ * We must flush the buffer before SDMX starts reading from it
+ * so that it gets a valid data in memory.
+ */
+ ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+ ion_handle, rbuf->data,
+ rbuf->size, ION_IOC_CLEAN_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+
+ return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+ read_offset, mpq_demux->demux.ts_packet_size);
+}
+
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
+{
+ struct dvb_demux *dvb_demux;
+ struct mpq_demux *mpq_demux;
+ int ret = count;
+
+ if (demux == NULL)
+ return -EINVAL;
+
+ dvb_demux = demux->priv;
+ mpq_demux = dvb_demux->priv;
+
+ /* Route through secure demux - process secure feeds if any exist */
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ ret = mpq_sdmx_write(mpq_demux,
+ demux->dvr_input.priv_handle,
+ buf,
+ count);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_write failed. ret = %d\n",
+ __func__, ret);
+ ret = count;
+ }
+ }
+
+ /*
+ * Route through sw filter - process non-secure feeds if any exist.
+ * For sw filter, should process the same amount of bytes the sdmx
+ * process managed to consume, unless some sdmx error occurred, for
+ * which should process the whole buffer
+ */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+ dvb_demux->tsp_format);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ return ret;
+}
+
+int mpq_sdmx_is_loaded(void)
+{
+ static int sdmx_load_checked;
+
+ if (!sdmx_load_checked) {
+ mpq_sdmx_check_app_loaded();
+ sdmx_load_checked = 1;
+ }
+
+ return mpq_dmx_info.secure_demux_app_loaded;
+}
+
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct dmx_data_ready event;
+ int ret = 0;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (!dvb_dmx_is_video_feed(feed) && !dvb_dmx_is_pcr_feed(feed) &&
+ !feed->secure_mode.is_secured) {
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ event.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ event.status = DMX_OK_EOS;
+ if (!feed->secure_mode.is_secured) {
+ if (dvb_dmx_is_video_feed(feed)) {
+ if (!video_framing)
+ mpq_dmx_decoder_pes_closure(mpq_demux,
+ mpq_feed);
+ else
+ mpq_dmx_decoder_frame_closure(mpq_demux,
+ mpq_feed);
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Couldn't write oob eos packet\n",
+ __func__);
+ } else if (dvb_dmx_is_audio_feed(feed)) {
+ mpq_dmx_decoder_audio_pes_closure(mpq_demux,
+ mpq_feed);
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 2);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Couldn't write oob eos packet\n",
+ __func__);
+ }
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ } else if (!mpq_demux->sdmx_eos) {
+ struct sdmx_buff_descr buf_desc;
+
+ mpq_demux->sdmx_eos = 1;
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (!ret) {
+ mutex_unlock(&mpq_demux->mutex);
+ mpq_sdmx_process_buffer(mpq_demux, &buf_desc,
+ 0, 0);
+ return 0;
+ }
+ }
+ break;
+ case DMX_OOB_CMD_MARKER:
+ event.status = DMX_OK_MARKER;
+ event.marker.id = cmd->params.marker.id;
+
+ if (feed->type == DMX_TYPE_SEC)
+ ret = dvb_dmx_notify_section_event(feed, &event, 1);
+ else
+ /* MPQ_TODO: Notify decoder via the stream buffer */
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
new file mode 100644
index 0000000..0c20a89
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -0,0 +1,1116 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DMX_PLUGIN_COMMON_H
+#define _MPQ_DMX_PLUGIN_COMMON_H
+
+#include <linux/msm_ion.h>
+
+#include "dvbdev.h"
+#include "dmxdev.h"
+#include "demux.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "mpq_adapter.h"
+#include "mpq_sdmx.h"
+
+#define TS_PACKET_SYNC_BYTE (0x47)
+#define TS_PACKET_SIZE (188)
+#define TS_PACKET_HEADER_LENGTH (4)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/*
+ * 500 PES header packets in the meta-data buffer,
+ * should be more than enough
+ */
+#define VIDEO_NUM_OF_PES_PACKETS 500
+
+#define VIDEO_META_DATA_PACKET_SIZE \
+ (DVB_RINGBUFFER_PKTHDRSIZE + \
+ sizeof(struct mpq_streambuffer_packet_header) + \
+ sizeof(struct mpq_adapter_video_meta_data))
+
+#define VIDEO_META_DATA_BUFFER_SIZE \
+ (VIDEO_NUM_OF_PES_PACKETS * VIDEO_META_DATA_PACKET_SIZE)
+
+#define AUDIO_NUM_OF_PES_PACKETS 100
+
+#define AUDIO_META_DATA_PACKET_SIZE \
+ (DVB_RINGBUFFER_PKTHDRSIZE + \
+ sizeof(struct mpq_streambuffer_packet_header) + \
+ sizeof(struct mpq_adapter_audio_meta_data))
+
+#define AUDIO_META_DATA_BUFFER_SIZE \
+ (AUDIO_NUM_OF_PES_PACKETS * AUDIO_META_DATA_PACKET_SIZE)
+
+/* Max number open() request can be done on demux device */
+#define MPQ_MAX_DMX_FILES 128
+
+/* TSIF alias name length */
+#define TSIF_NAME_LENGTH 20
+
+/**
+ * struct ts_packet_header - Transport packet header
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned transport_error_indicator:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_priority:1;
+ unsigned pid_msb:5;
+ unsigned pid_lsb:8;
+ unsigned transport_scrambling_control:2;
+ unsigned adaptation_field_control:2;
+ unsigned continuity_counter:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned pid_msb:5;
+ unsigned transport_priority:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_error_indicator:1;
+ unsigned pid_lsb:8;
+ unsigned continuity_counter:4;
+ unsigned adaptation_field_control:2;
+ unsigned transport_scrambling_control:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * struct ts_adaptation_field - Adaptation field prefix
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_adaptation_field {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned discontinuity_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned PCR_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_base_5:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned program_clock_reference_ext_2:8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned PCR_flag:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned discontinuity_indicator:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_base_5:1;
+ unsigned program_clock_reference_ext_2:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+
+/*
+ * PES packet header containing dts and/or pts values
+ * as defined in MPEG2 transport stream standard.
+ */
+struct pes_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned reserved_bits0:2;
+ unsigned pes_scrambling_control:2;
+ unsigned pes_priority:1;
+ unsigned data_alignment_indicator:1;
+ unsigned copyright:1;
+ unsigned original_or_copy:1;
+ unsigned pts_dts_flag:2;
+ unsigned escr_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned pes_extension_flag:1;
+ unsigned pes_header_data_length:8;
+ unsigned reserved_bits1:4;
+ unsigned pts_1:3;
+ unsigned marker_bit0:1;
+ unsigned pts_2:8;
+ unsigned pts_3:7;
+ unsigned marker_bit1:1;
+ unsigned pts_4:8;
+ unsigned pts_5:7;
+ unsigned marker_bit2:1;
+ unsigned reserved_bits2:4;
+ unsigned dts_1:3;
+ unsigned marker_bit3:1;
+ unsigned dts_2:8;
+ unsigned dts_3:7;
+ unsigned marker_bit4:1;
+ unsigned dts_4:8;
+ unsigned dts_5:7;
+ unsigned marker_bit5:1;
+ unsigned reserved_bits3:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned original_or_copy:1;
+ unsigned copyright:1;
+ unsigned data_alignment_indicator:1;
+ unsigned pes_priority:1;
+ unsigned pes_scrambling_control:2;
+ unsigned reserved_bits0:2;
+ unsigned pes_extension_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned escr_flag:1;
+ unsigned pts_dts_flag:2;
+ unsigned pes_header_data_length:8;
+ unsigned marker_bit0:1;
+ unsigned pts_1:3;
+ unsigned reserved_bits1:4;
+ unsigned pts_2:8;
+ unsigned marker_bit1:1;
+ unsigned pts_3:7;
+ unsigned pts_4:8;
+ unsigned marker_bit2:1;
+ unsigned pts_5:7;
+ unsigned marker_bit3:1;
+ unsigned dts_1:3;
+ unsigned reserved_bits2:4;
+ unsigned dts_2:8;
+ unsigned marker_bit4:1;
+ unsigned dts_3:7;
+ unsigned dts_4:8;
+ unsigned marker_bit5:1;
+ unsigned dts_5:7;
+ unsigned reserved_bits3:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * mpq_decoder_buffers_desc - decoder buffer(s) management information.
+ *
+ * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer
+ * upon its initialization. These descriptors must remain valid as long as
+ * the mpq_streambuffer object is used.
+ * @ion_handle: Array of ION handles, one for each decoder buffer, used for
+ * kernel memory mapping or allocation. Handles are saved in order to release
+ * resources properly later on.
+ * @decoder_buffers_num: number of buffers that are managed, either externally
+ * or internally by the mpq_streambuffer object
+ * @shared_file: File handle of internally allocated video buffer shared
+ * with video consumer.
+ */
+struct mpq_decoder_buffers_desc {
+ struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM];
+ struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM];
+ u32 decoder_buffers_num;
+ struct file *shared_file;
+};
+
+/*
+ * mpq_video_feed_info - private data used for video feed.
+ *
+ * @video_buffer: Holds the streamer buffer shared with
+ * the decoder for feeds having the data going to the decoder.
+ * @video_buffer_lock: Lock protecting against video output buffer.
+ * The lock protects against API calls to manipulate the output buffer
+ * (initialize, free, re-use buffers) and dvb-sw demux parsing the video
+ * data through mpq_dmx_process_video_packet().
+ * @buffer_desc: Holds decoder buffer(s) information used for stream buffer.
+ * @pes_header: Used for feeds that output data to decoder,
+ * holds PES header of current processed PES.
+ * @pes_header_left_bytes: Used for feeds that output data to decoder,
+ * holds remaining PES header bytes of current processed PES.
+ * @pes_header_offset: Holds the offset within the current processed
+ * pes header.
+ * @fullness_wait_cancel: Flag used to signal to abort waiting for
+ * decoder's fullness.
+ * @stream_interface: The ID of the video stream interface registered
+ * with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @prev_pattern: holds the trailing data of the last processed video packet.
+ * @frame_offset: Saves data buffer offset to which a new frame will be written
+ * @last_pattern_offset: Holds the previous pattern offset
+ * @pending_pattern_len: Accumulated number of data bytes that will be
+ * reported for this frame.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @saved_pts_dts_info: used to save PTS/DTS information until it is written.
+ * @new_pts_dts_info: used to store PTS/DTS information from current PES header.
+ * @saved_info_used: indicates if saved PTS/DTS information was used.
+ * @new_info_exists: indicates if new PTS/DTS information exists in
+ * new_pts_dts_info that should be saved to saved_pts_dts_info.
+ * @first_pts_dts_copy: a flag used to indicate if PTS/DTS information needs
+ * to be copied from the currently parsed PES header to the saved_pts_dts_info.
+ * @tei_errs: Transport stream Transport Error Indicator (TEI) counter.
+ * @last_continuity: last continuity counter value found in TS packet header.
+ * Initialized to -1.
+ * @continuity_errs: Transport stream continuity error counter.
+ * @ts_packets_num: TS packets counter.
+ * @ts_dropped_bytes: counts the number of bytes dropped due to insufficient
+ * buffer space.
+ * @prev_stc: STC attached to the previous video TS packet
+ */
+struct mpq_video_feed_info {
+ struct mpq_streambuffer *video_buffer;
+ spinlock_t video_buffer_lock;
+ struct mpq_decoder_buffers_desc buffer_desc;
+ struct pes_packet_header pes_header;
+ u32 pes_header_left_bytes;
+ u32 pes_header_offset;
+ int fullness_wait_cancel;
+ enum mpq_adapter_stream_if stream_interface;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ int patterns_num;
+ char prev_pattern[DVB_DMX_MAX_PATTERN_LEN];
+ u32 frame_offset;
+ u32 last_pattern_offset;
+ u32 pending_pattern_len;
+ u64 last_framing_match_type;
+ u64 last_framing_match_stc;
+ int found_sequence_header_pattern;
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
+ u32 first_prefix_size;
+ struct dmx_pts_dts_info saved_pts_dts_info;
+ struct dmx_pts_dts_info new_pts_dts_info;
+ int saved_info_used;
+ int new_info_exists;
+ int first_pts_dts_copy;
+ u32 tei_errs;
+ int last_continuity;
+ u32 continuity_errs;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 prev_stc;
+};
+
+/* require a bare minimal mpq_audio_feed_info struct */
+struct mpq_audio_feed_info {
+ struct mpq_streambuffer *audio_buffer;
+ spinlock_t audio_buffer_lock;
+ struct mpq_decoder_buffers_desc buffer_desc;
+ struct pes_packet_header pes_header;
+ u32 pes_header_left_bytes;
+ u32 pes_header_offset;
+ int fullness_wait_cancel;
+ enum mpq_adapter_stream_if stream_interface;
+ u32 frame_offset; /* pes frame offset */
+ struct dmx_pts_dts_info saved_pts_dts_info;
+ struct dmx_pts_dts_info new_pts_dts_info;
+ int saved_info_used;
+ int new_info_exists;
+ int first_pts_dts_copy;
+ u32 tei_errs;
+ int last_continuity;
+ u32 continuity_errs;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 prev_stc;
+};
+
+/**
+ * mpq feed object - mpq common plugin feed information
+ *
+ * @dvb_demux_feed: Back pointer to dvb demux level feed object
+ * @mpq_demux: Pointer to common mpq demux object
+ * @plugin_priv: Plugin specific private data
+ * @sdmx_filter_handle: Secure demux filter handle. Recording feed may share
+ * same filter handle
+ * @secondary_feed: Specifies if this feed shares filter handle with
+ * other feeds
+ * @metadata_buf: Ring buffer object for managing the metadata buffer
+ * @metadata_buf_handle: Allocation handle for the metadata buffer
+ * @session_id: Counter that is incremented every time feed is initialized
+ * through mpq_dmx_init_mpq_feed
+ * @sdmx_buf: Ring buffer object for intermediate output data from the sdmx
+ * @sdmx_buf_handle: Allocation handle for the sdmx intermediate data buffer
+ * @video_info: Video feed specific information
+ */
+struct mpq_feed {
+ struct dvb_demux_feed *dvb_demux_feed;
+ struct mpq_demux *mpq_demux;
+ void *plugin_priv;
+
+ /* Secure demux related */
+ int sdmx_filter_handle;
+ int secondary_feed;
+ enum sdmx_filter filter_type;
+ struct dvb_ringbuffer metadata_buf;
+ struct ion_handle *metadata_buf_handle;
+
+ u8 session_id;
+ struct dvb_ringbuffer sdmx_buf;
+ struct ion_handle *sdmx_buf_handle;
+
+ struct mpq_video_feed_info video_info;
+ struct mpq_audio_feed_info audio_info;
+};
+
+/**
+ * struct mpq_demux - mpq demux information
+ * @idx: Instance index
+ * @demux: The dvb_demux instance used by mpq_demux
+ * @dmxdev: The dmxdev instance used by mpq_demux
+ * @fe_memory: Handle of front-end memory source to mpq_demux
+ * @source: The current source connected to the demux
+ * @is_initialized: Indicates whether this demux device was
+ * initialized or not.
+ * @ion_client: ION demux client used to allocate memory from ION.
+ * @mutex: Lock used to protect against private feed data
+ * @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
+ * Used before each call to sdmx_process() to build up to date state.
+ * @sdmx_session_handle: Secure demux open session handle
+ * @sdmx_filter_count: Number of active secure demux filters
+ * @sdmx_eos: End-of-stream indication flag for current sdmx session
+ * @sdmx_filters_state: Array holding buffers status for each secure
+ * demux filter.
+ * @decoder_alloc_flags: ION flags to be used when allocating internally
+ * @plugin_priv: Underlying plugin's own private data
+ * @mpq_dmx_plugin_release: Underlying plugin's release function
+ * @hw_notification_interval: Notification interval in msec,
+ * exposed in debugfs.
+ * @hw_notification_min_interval: Minimum notification internal in msec,
+ * exposed in debugfs.
+ * @hw_notification_count: Notification count, exposed in debugfs.
+ * @hw_notification_size: Notification size in bytes, exposed in debugfs.
+ * @hw_notification_min_size: Minimum notification size in bytes,
+ * exposed in debugfs.
+ * @decoder_stat: Decoder output statistics, exposed in debug-fs.
+ * @sdmx_process_count: Total number of times sdmx_process is called.
+ * @sdmx_process_time_sum: Total time sdmx_process takes.
+ * @sdmx_process_time_average: Average time sdmx_process takes.
+ * @sdmx_process_time_max: Max time sdmx_process takes.
+ * @sdmx_process_packets_sum: Total packets number sdmx_process handled.
+ * @sdmx_process_packets_average: Average packets number sdmx_process handled.
+ * @sdmx_process_packets_min: Minimum packets number sdmx_process handled.
+ * @last_notification_time: Time of last HW notification.
+ */
+struct mpq_demux {
+ int idx;
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend fe_memory;
+ dmx_source_t source;
+ int is_initialized;
+ struct ion_client *ion_client;
+ struct mutex mutex;
+ struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+ u32 num_active_feeds;
+ u32 num_secure_feeds;
+ int sdmx_session_handle;
+ int sdmx_session_ref_count;
+ int sdmx_filter_count;
+ int sdmx_eos;
+ struct {
+ /* SDMX filters status */
+ struct sdmx_filter_status status[MPQ_MAX_DMX_FILES];
+
+ /* Index of the feed respective to SDMX filter */
+ u8 mpq_feed_idx[MPQ_MAX_DMX_FILES];
+
+ /*
+ * Snapshot of session_id of the feed
+ * when SDMX process was called. This is used
+ * to identify whether the feed has been
+ * restarted when processing SDMX results.
+ * May happen when demux is stalled in playback
+ * from memory with PULL mode.
+ */
+ u8 session_id[MPQ_MAX_DMX_FILES];
+ } sdmx_filters_state;
+
+ unsigned int decoder_alloc_flags;
+
+ /* HW plugin specific */
+ void *plugin_priv;
+ int (*mpq_dmx_plugin_release)(struct mpq_demux *mpq_demux);
+
+ /* debug-fs */
+ u32 hw_notification_interval;
+ u32 hw_notification_min_interval;
+ u32 hw_notification_count;
+ u32 hw_notification_size;
+ u32 hw_notification_min_size;
+
+ struct {
+ /*
+ * Accumulated number of bytes
+ * dropped due to decoder buffer fullness.
+ */
+ u32 drop_count;
+
+ /* Counter incremeneted for each video frame output by demux */
+ u32 out_count;
+
+ /*
+ * Sum of intervals (msec) holding the time
+ * between two successive video frames output.
+ */
+ u32 out_interval_sum;
+
+ /*
+ * Average interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_average;
+
+ /*
+ * Max interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_max;
+
+ /* Counter for number of decoder packets with TEI bit set */
+ u32 ts_errors;
+
+ /*
+ * Counter for number of decoder packets
+ * with continuity counter errors.
+ */
+ u32 cc_errors;
+
+ /* Time of last video frame output */
+ ktime_t out_last_time;
+ } decoder_stat[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ u32 sdmx_process_count;
+ u32 sdmx_process_time_sum;
+ u32 sdmx_process_time_average;
+ u32 sdmx_process_time_max;
+ u32 sdmx_process_packets_sum;
+ u32 sdmx_process_packets_average;
+ u32 sdmx_process_packets_min;
+ enum sdmx_log_level sdmx_log_level;
+
+ ktime_t last_notification_time;
+ int ts_packet_timestamp_source;
+};
+
+/**
+ * mpq_dmx_init - initialization and registration function of
+ * single MPQ demux device
+ *
+ * @adapter: The adapter to register mpq_demux to
+ * @mpq_demux: The mpq demux to initialize
+ *
+ * Every HW plug-in needs to provide implementation of such
+ * function that will be called for each demux device on the
+ * module initialization. The function mpq_demux_plugin_init
+ * should be called during the HW plug-in module initialization.
+ */
+typedef int (*mpq_dmx_init)(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *demux);
+
+/**
+ * mpq_demux_plugin_init - Initialize demux devices and register
+ * them to the dvb adapter.
+ *
+ * @dmx_init_func: Pointer to the function to be used
+ * to initialize demux of the underlying HW plugin.
+ *
+ * Return error code
+ *
+ * Should be called at the HW plugin module initialization.
+ */
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func);
+
+/**
+ * mpq_demux_plugin_exit - terminate demux devices.
+ *
+ * Should be called at the HW plugin module termination.
+ */
+void mpq_dmx_plugin_exit(void);
+
+/**
+ * mpq_dmx_set_source - implmenetation of set_source routine.
+ *
+ * @demux: The demux device to set its source.
+ * @src: The source to be set.
+ *
+ * Return error code
+ *
+ * Can be used by the underlying plugins to implement kernel
+ * demux API set_source routine.
+ */
+int mpq_dmx_set_source(struct dmx_demux *demux, const dmx_source_t *src);
+
+/**
+ * mpq_dmx_map_buffer - map user-space buffer into kernel space.
+ *
+ * @demux: The demux device.
+ * @dmx_buffer: The demux buffer from user-space, assumes that
+ * buffer handle is ION file-handle.
+ * @priv_handle: Saves ION-handle of the buffer imported by this function.
+ * @kernel_mem: Saves kernel mapped address of the buffer.
+ *
+ * Return error code
+ *
+ * The function maps the buffer into kernel memory only if the buffer
+ * was not allocated with secure flag, otherwise the returned kernel
+ * memory address is set to NULL.
+ */
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem);
+
+/**
+ * mpq_dmx_unmap_buffer - unmap user-space buffer from kernel space memory.
+ *
+ * @demux: The demux device.
+ * @priv_handle: ION-handle of the buffer returned from mpq_dmx_map_buffer.
+ *
+ * Return error code
+ *
+ * The function unmaps the buffer from kernel memory only if the buffer
+ * was not allocated with secure flag.
+ */
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux, void *priv_handle);
+
+/**
+ * mpq_dmx_decoder_fullness_init - Initialize waiting
+ * mechanism on decoder's buffer fullness.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_fullness_wait - Checks whether decoder buffer
+ * have free space as required, if not, wait for it.
+ *
+ * @feed: The decoder's feed
+ * @required_space: the required free space to wait for
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_wait(struct dvb_demux_feed *feed,
+ size_t required_space);
+
+/**
+ * mpq_dmx_decoder_fullness_abort - Aborts waiting
+ * on decoder's buffer fullness if any waiting is done
+ * now. After calling this, to wait again the user must
+ * call mpq_dmx_decoder_fullness_init.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_buffer_status - Returns the
+ * status of the decoder's buffer.
+ *
+ * @feed: The decoder's feed
+ * @dmx_buffer_status: Status of decoder's buffer
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+
+/**
+ * mpq_dmx_reuse_decoder_buffer - release buffer passed to decoder for reuse
+ * by the stream-buffer.
+ *
+ * @feed: The decoder's feed.
+ * @cookie: stream-buffer handle of the buffer.
+ *
+ * Return error code
+ *
+ * The function releases the buffer provided by the stream-buffer
+ * connected to the decoder back to the stream-buffer for reuse.
+ */
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie);
+
+/**
+ * mpq_dmx_process_video_packet - Assemble PES data and output it
+ * to the stream-buffer connected to the decoder.
+ *
+ * @feed: The feed used for the video TS packets
+ * @buf: The buffer holding video TS packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID.
+ * If the output buffer is full while assembly, the function drops
+ * the packet and does not write them to the output buffer.
+ * Scrambled packets are bypassed.
+ */
+int mpq_dmx_process_video_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_process_pcr_packet - Extract PCR/STC pairs from
+ * a 192 bytes packet.
+ *
+ * @feed: The feed used for the PCR TS packets
+ * @buf: The buffer holding pcr/stc packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID, and that it has 4 bytes
+ * suffix as extra timestamp in the following format:
+ *
+ * Byte3: TSIF flags
+ * Byte0-2: TTS, 0..2^24-1 at 105.47 Khz (27*10^6/256).
+ *
+ * The function callbacks dmxdev after extraction of the pcr/stc
+ * pair.
+ */
+int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_extract_pcr_and_dci() - Extract the PCR field and discontinuity
+ * indicator from a TS packet buffer.
+ *
+ * @buf: TS packet buffer
+ * @pcr: returned PCR value
+ * @dci: returned discontinuity indicator
+ *
+ * Returns 1 if PCR was extracted, 0 otherwise.
+ */
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci);
+
+/**
+ * mpq_dmx_init_debugfs_entries -
+ * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure
+ * demux log level).
+ *
+ * @mpq_demux: The mpq_demux device to initialize.
+ */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_update_hw_statistics -
+ * Update dvb-demux debugfs with HW notification statistics.
+ *
+ * @mpq_demux: The mpq_demux device to update.
+ */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_set_cipher_ops - Handles setting of cipher operations
+ *
+ * @feed: The feed to set its cipher operations
+ * @cipher_ops: Cipher operations to be set
+ *
+ * This common function handles only the case when working with
+ * secure-demux. When working with secure demux a single decrypt cipher
+ * operation is allowed.
+ *
+ * Return error code
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+
+/**
+ * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS
+ * packet to 27MHz.
+ *
+ * @feed: The feed with TTS attached
+ * @timestamp: Buffer holding the timestamp attached by the HW
+ * @timestampIn27Mhz: Timestamp result in 27MHz
+ *
+ * Return error code
+ */
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+
+/**
+ * mpq_sdmx_open_session - Handle the details of opening a new secure demux
+ * session for the specified mpq demux instance. Multiple calls to this
+ * is allowed, reference counting is managed to open it only when needed.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_sdmx_close_session - Closes secure demux session. The session
+ * is closed only if reference counter of the session reaches 0.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_init_mpq_feed - Initialize an mpq feed object
+ * The function allocates mpq_feed object and saves in the dvb_demux_feed
+ * priv field.
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_terminate_feed - Destroy an mpq feed object
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_init_video_feed() - Initializes video related data structures
+ *
+ * @mpq_feed: mpq_feed object to initialize
+ *
+ * Return error code
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_terminate_video_feed() - Release video related feed resources
+ *
+ * @mpq_feed: mpq_feed object to terminate
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_write - demux write() function implementation.
+ *
+ * A wrapper function used for writing new data into the demux via DVR.
+ * It checks where new data should actually go, the secure demux or the normal
+ * dvb demux software demux.
+ *
+ * @demux: demux interface
+ * @buf: input buffer
+ * @count: number of data bytes in input buffer
+ *
+ * Return number of bytes processed or error code
+ */
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count);
+
+/**
+ * mpq_sdmx_process - Perform demuxing process on the specified input buffer
+ * in the secure demux instance
+ *
+ * @mpq_demux: mpq demux instance
+ * @input: input buffer descriptor
+ * @fill_count: number of data bytes in input buffer that can be read
+ * @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
+ *
+ * Return number of bytes read or error code
+ */
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size);
+
+/**
+ * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
+ * 0 otherwise. This function should be used to determine whether or not
+ * processing should take place in the SDMX.
+ */
+int mpq_sdmx_is_loaded(void);
+
+/**
+ * mpq_dmx_oob_command - Handles OOB command from dvb-demux.
+ *
+ * OOB marker commands trigger callback to the dmxdev.
+ * Handling of EOS command may trigger current (last on stream) PES/Frame to
+ * be reported, in addition to callback to the dmxdev.
+ * In case secure demux is active for the feed, EOS command is passed to the
+ * secure demux for handling.
+ *
+ * @feed: dvb demux feed object
+ * @cmd: oob command data
+ *
+ * returns 0 on success or error
+ */
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+
+/**
+ * mpq_dmx_peer_rec_feed() - For a recording filter with multiple feeds objects
+ * search for a feed object that shares the same filter as the specified feed
+ * object, and return it.
+ * This can be used to test whether the specified feed object is the first feed
+ * allocate for the recording filter - return value is NULL.
+ *
+ * @feed: dvb demux feed object
+ *
+ * Return the dvb_demux_feed sharing the same filter's buffer or NULL if no
+ * such is found.
+ */
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_eos_cmd() - Report EOS event to the mpq_streambuffer
+ *
+ * @mpq_feed: Audio/Video mpq_feed object for notification
+ * @feed_type: Feed type( Audio or Video )
+ *
+ * Return error code
+ */
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type);
+
+/**
+ * mpq_dmx_parse_mandatory_pes_header() - Parse non-optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_parse_remaining_pes_header() - Parse optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ * This function depends on mpq_dmx_parse_mandatory_pes_header being called
+ * first for state to be valid.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_flush_stream_buffer() - Flush video stream buffer object of the
+ * specific video feed, both meta-data packets and data.
+ *
+ * @feed: dvb demux video feed object
+ *
+ * Return error code
+ */
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_save_pts_dts() - Save the current PTS/DTS data
+ *
+ * @feed_data: Video feed structure where PTS/DTS is saved
+ */
+static inline void mpq_dmx_save_pts_dts(struct mpq_video_feed_info *feed_data)
+{
+ if (feed_data->new_info_exists) {
+ feed_data->saved_pts_dts_info.pts_exist =
+ feed_data->new_pts_dts_info.pts_exist;
+ feed_data->saved_pts_dts_info.pts =
+ feed_data->new_pts_dts_info.pts;
+ feed_data->saved_pts_dts_info.dts_exist =
+ feed_data->new_pts_dts_info.dts_exist;
+ feed_data->saved_pts_dts_info.dts =
+ feed_data->new_pts_dts_info.dts;
+
+ feed_data->new_info_exists = 0;
+ feed_data->saved_info_used = 0;
+ }
+}
+
+/**
+ * mpq_dmx_write_pts_dts() - Write out the saved PTS/DTS data and mark as used
+ *
+ * @feed_data: Video feed structure where PTS/DTS was saved
+ * @info: PTS/DTS structure to write to
+ */
+static inline void mpq_dmx_write_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct dmx_pts_dts_info *info)
+{
+ if (!feed_data->saved_info_used) {
+ info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+ info->pts = feed_data->saved_pts_dts_info.pts;
+ info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+ info->dts = feed_data->saved_pts_dts_info.dts;
+
+ feed_data->saved_info_used = 1;
+ } else {
+ info->pts_exist = 0;
+ info->dts_exist = 0;
+ }
+}
+
+/*
+ * mpq_dmx_calc_time_delta -
+ * Calculate delta in msec between two time snapshots.
+ *
+ * @curr_time: value of current time
+ * @prev_time: value of previous time
+ *
+ * Return time-delta in msec
+ */
+static inline u32 mpq_dmx_calc_time_delta(ktime_t curr_time, ktime_t prev_time)
+{
+ s64 delta_time_ms = ktime_ms_delta(curr_time, prev_time);
+
+ return (u32)delta_time_ms;
+}
+
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed);
+
+/* Return the common module parameter tsif_mode */
+int mpq_dmx_get_param_tsif_mode(void);
+
+/* Return the common module parameter clock_inv */
+int mpq_dmx_get_param_clock_inv(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_odd */
+int mpq_dmx_get_param_scramble_odd(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_even */
+int mpq_dmx_get_param_scramble_even(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_default_discard */
+int mpq_dmx_get_param_scramble_default_discard(void);
+
+/* APIs for Audio stream buffers interface -- Added for broadcase use case */
+/*
+ * The Audio/Video drivers (or consumers) require the stream_buffer information
+ * for consuming packet headers and compressed AV data from the
+ * ring buffer filled by demux driver which is the producer
+ */
+struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio);
+struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video);
+
+int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed);
+
+int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed);
+
+int mpq_dmx_parse_remaining_audio_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_audio_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+static inline void mpq_dmx_save_audio_pts_dts(
+ struct mpq_audio_feed_info *feed_data)
+{
+ if (feed_data->new_info_exists) {
+ feed_data->saved_pts_dts_info.pts_exist =
+ feed_data->new_pts_dts_info.pts_exist;
+ feed_data->saved_pts_dts_info.pts =
+ feed_data->new_pts_dts_info.pts;
+ feed_data->saved_pts_dts_info.dts_exist =
+ feed_data->new_pts_dts_info.dts_exist;
+ feed_data->saved_pts_dts_info.dts =
+ feed_data->new_pts_dts_info.dts;
+
+ feed_data->new_info_exists = 0;
+ feed_data->saved_info_used = 0;
+ }
+}
+
+/*
+ * mpq_dmx_process_audio_packet - Assemble Audio PES data and output to
+ * stream buffer connected to decoder.
+ */
+int mpq_dmx_process_audio_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+static inline void mpq_dmx_write_audio_pts_dts(
+ struct mpq_audio_feed_info *feed_data,
+ struct dmx_pts_dts_info *info)
+{
+ if (!feed_data->saved_info_used) {
+ info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+ info->pts = feed_data->saved_pts_dts_info.pts;
+ info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+ info->dts = feed_data->saved_pts_dts_info.dts;
+
+ feed_data->saved_info_used = 1;
+ } else {
+ info->pts_exist = 0;
+ info->dts_exist = 0;
+ }
+}
+
+#endif /* _MPQ_DMX_PLUGIN_COMMON_H */
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
new file mode 100644
index 0000000..16e1ba4
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+
+static int mpq_sw_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = -EINVAL;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(pid=%d) executed\n", __func__, feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid mpq_demux handle\n", __func__);
+ goto out;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ MPQ_DVB_ERR_PRINT("%s: only DVR source is supported (%d)\n",
+ __func__, mpq_demux->source);
+ goto out;
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__, ret);
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ ret = mpq_dmx_terminate_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_terminate_feed failed(%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
+ const u8 *buf, size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > (TIMESTAMP_LEN + TS_PACKET_SIZE))
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int ret = -EINVAL;
+
+ if (demux == NULL || demux->priv == NULL || src == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ goto out;
+ }
+
+ if (*src >= DMX_SOURCE_DVR0 && *src <= DMX_SOURCE_DVR3) {
+ ret = mpq_dmx_set_source(demux, src);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_set_source(%d) failed, ret=%d\n",
+ __func__, *src, ret);
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: not a DVR source\n", __func__);
+ }
+
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_get_caps(struct dmx_demux *demux, struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if (dvb_demux == NULL || caps == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = MPQ_MAX_DMX_FILES;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = 0;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* No STC support */
+ caps->max_stc = 0;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+static int mpq_sw_dmx_init(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int ret;
+ struct dvb_demux *dvb_demux = &mpq_demux->demux;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ dvb_demux->priv = (void *)mpq_demux;
+ dvb_demux->filternum = MPQ_MAX_DMX_FILES;
+ dvb_demux->feednum = MPQ_MAX_DMX_FILES;
+ dvb_demux->start_feed = mpq_sw_dmx_start_filtering;
+ dvb_demux->stop_feed = mpq_sw_dmx_stop_filtering;
+ dvb_demux->write_to_decoder = mpq_sw_dmx_write_to_decoder;
+ dvb_demux->decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ dvb_demux->decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ dvb_demux->decoder_fullness_abort = mpq_dmx_decoder_fullness_abort;
+ dvb_demux->decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ dvb_demux->reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ dvb_demux->set_cipher_op = mpq_dmx_set_cipher_ops;
+ dvb_demux->oob_command = mpq_dmx_oob_command;
+ dvb_demux->convert_ts = mpq_dmx_convert_tts;
+ dvb_demux->flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ ret = dvb_dmx_init(dvb_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed;
+ }
+
+ /* Now initialize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_sw_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = NULL;
+ mpq_demux->dmxdev.demux->get_caps = mpq_sw_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ ret = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with mpq demux statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(dvb_demux);
+init_failed:
+ return ret;
+}
+
+static int __init mpq_dmx_sw_plugin_init(void)
+{
+ return mpq_dmx_plugin_init(mpq_sw_dmx_init);
+}
+
+static void __exit mpq_dmx_sw_plugin_exit(void)
+{
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_sw_plugin_init);
+module_exit(mpq_dmx_sw_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux software plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
new file mode 100644
index 0000000..a075ed9
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -0,0 +1,2015 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-buf.h>
+#include <linux/qcom_tspp.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+#define TSIF_COUNT 2
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM 128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM 15
+
+/* HW index of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX 15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM 13
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM 128
+
+/* For each TSIF we use a single pipe holding the data after PID filtering */
+#define TSPP_CHANNEL 0
+
+/* the channel_id set to TSPP driver based on TSIF number and channel type */
+#define TSPP_CHANNEL_ID(tsif, ch) ((tsif << 1) + ch)
+#define TSPP_GET_TSIF_NUM(ch_id) (ch_id >> 1)
+
+/* mask that set to care for all bits in pid filter */
+#define TSPP_PID_MASK 0x1FFF
+
+/* dvb-demux defines pid 0x2000 as full capture pid */
+#define TSPP_PASS_THROUGH_PID 0x2000
+
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID 0x1FFF
+
+#define TSPP_RAW_TTS_SIZE 192
+#define TSPP_RAW_SIZE 188
+
+#define MAX_BAM_DESCRIPTOR_SIZE (32 * 1024 - 1)
+
+#define MAX_BAM_DESCRIPTOR_COUNT (8 * 1024 - 2)
+
+#define TSPP_BUFFER_SIZE (500 * 1024) /* 500KB */
+
+#define TSPP_DEFAULT_DESCRIPTOR_SIZE (TSPP_RAW_TTS_SIZE)
+
+#define TSPP_BUFFER_COUNT(buffer_size) \
+ ((buffer_size) / tspp_desc_size)
+
+/* When TSPP notifies demux that new packets are received.
+ * Using max descriptor size (170 packets).
+ * Assuming 20MBit/sec stream, with 170 packets
+ * per descriptor there would be about 82 descriptors,
+ * Meaning about 82 notifications per second.
+ */
+#define TSPP_NOTIFICATION_SIZE(desc_size) \
+ (MAX_BAM_DESCRIPTOR_SIZE / (desc_size))
+
+/* Channel timeout in msec */
+#define TSPP_CHANNEL_TIMEOUT 100
+
+enum mem_buffer_allocation_mode {
+ MPQ_DMX_TSPP_INTERNAL_ALLOC = 0,
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1
+};
+
+/* module parameters for load time configuration */
+static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
+static int tspp_out_buffer_size = TSPP_BUFFER_SIZE;
+static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE;
+static int tspp_notification_size =
+ TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE);
+static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT;
+static int tspp_out_ion_heap = ION_QSECOM_HEAP_ID;
+
+module_param(allocation_mode, int, 0644);
+module_param(tspp_out_buffer_size, int, 0644);
+module_param(tspp_desc_size, int, 0644);
+module_param(tspp_notification_size, int, 0644);
+module_param(tspp_channel_timeout, int, 0644);
+module_param(tspp_out_ion_heap, int, 0644);
+
+/* The following structure hold singleton information
+ * required for dmx implementation on top of TSPP.
+ */
+static struct
+{
+ /* Information for each TSIF input processing */
+ struct {
+ /*
+ * TSPP pipe holding all TS packets after PID filtering.
+ * The following is reference count for number of feeds
+ * allocated on that pipe.
+ */
+ int channel_ref;
+
+ /* Counter for data notifications on the pipe */
+ atomic_t data_cnt;
+
+ /* flag to indicate control operation is in progress */
+ atomic_t control_op;
+
+ /* ION handle used for TSPP data buffer allocation */
+ struct ion_handle *ch_mem_heap_handle;
+
+ /* TSPP data buffer heap virtual base address */
+ void *ch_mem_heap_virt_base;
+
+ /* TSPP data buffer heap physical base address */
+ ion_phys_addr_t ch_mem_heap_phys_base;
+
+ /* Buffer allocation index */
+ int buff_index;
+
+ /* Number of buffers */
+ u32 buffer_count;
+
+ /*
+ * Array holding the IDs of the TSPP buffer descriptors in the
+ * current aggregate, in order to release these descriptors at
+ * the end of processing.
+ */
+ int *aggregate_ids;
+
+ /*
+ * Holds PIDs of allocated filters along with
+ * how many feeds are opened on the same PID. For
+ * TSPP HW filters, holds also the filter table index.
+ * When pid == -1, the entry is free.
+ */
+ struct {
+ int pid;
+ int ref_count;
+ int hw_index;
+ } filters[TSPP_MAX_PID_FILTER_NUM];
+
+ /* Indicates available/allocated filter table indexes */
+ int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+ /* Number of currently allocated PID filters */
+ u16 current_filter_count;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * NULL packets (PID = 0x1FFF)
+ */
+ int pass_nulls_flag;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * all packets (PID = 0x2000)
+ */
+ int pass_all_flag;
+
+ /*
+ * Flag to indicate whether the filter that accepts
+ * all packets has already been added and is
+ * currently enabled
+ */
+ int accept_all_filter_exists_flag;
+
+ /* Thread processing TS packets from TSPP */
+ struct task_struct *thread;
+ wait_queue_head_t wait_queue;
+
+ /* TSIF alias */
+ char name[TSIF_NAME_LENGTH];
+
+ /* Pointer to the demux connected to this TSIF */
+ struct mpq_demux *mpq_demux;
+
+ /* Mutex protecting the data-structure */
+ struct mutex mutex;
+
+ /* ion dma buffer mapping structure */
+ struct tspp_ion_dma_buf_info ch_ion_dma_buf;
+
+ } tsif[TSIF_COUNT];
+
+ /* ION client used for TSPP data buffer allocation */
+ struct ion_client *ion_client;
+} mpq_dmx_tspp_info;
+
+static void *tspp_mem_allocator(int channel_id, u32 size,
+ phys_addr_t *phys_base, dma_addr_t *dma_base,
+ void *user)
+{
+ void *virt_addr = NULL;
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ if (mpq_dmx_tspp_info.tsif[i].buff_index ==
+ mpq_dmx_tspp_info.tsif[i].buffer_count)
+ return NULL;
+
+ virt_addr =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ *phys_base =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ *dma_base =
+ (mpq_dmx_tspp_info.tsif[i].ch_ion_dma_buf.dma_map_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ mpq_dmx_tspp_info.tsif[i].buff_index++;
+
+ return virt_addr;
+}
+
+static void tspp_mem_free(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user)
+{
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ /*
+ * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit().
+ * we update index here, so if this function is called repetitively
+ * for all the buffers, then afterwards tspp_mem_allocator()
+ * can be called again.
+ * Note: it would be incorrect to call tspp_mem_allocator()
+ * a few times, then call tspp_mem_free(), then call
+ * tspp_mem_allocator() again.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].buff_index > 0)
+ mpq_dmx_tspp_info.tsif[i].buff_index--;
+}
+
+/**
+ * Returns a free HW filter index that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return HW filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
+{
+ int i;
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
+ return i;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+ if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ return slot;
+
+ return -ENOMEM;
+}
+
+/**
+ * Returns filter index of specific pid.
+ *
+ * @tsif: The TSIF to which the pid is allocated
+ * @pid: The pid to search for
+ *
+ * Return filter index or -1 if no filter available
+ */
+static int mpq_tspp_get_filter_slot(int tsif, int pid)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+ return slot;
+
+ return -EINVAL;
+}
+
+/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+ const struct tspp_data_descriptor *tspp_data_desc)
+{
+ u32 notif_size;
+ int i;
+
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ for (i = 0; i < notif_size; i++)
+ dvb_dmx_swfilter_packet(&mpq_demux->demux,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
+ * Demux TS packets from TSPP by secure-demux.
+ * The function assumes the buffer is physically contiguous
+ * and that TSPP descriptors are continuous in memory.
+ *
+ * @tsif: The TSIF interface to process its packets
+ * @channel_id: the TSPP output pipe with the TS packets
+ */
+static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
+{
+ const struct tspp_data_descriptor *tspp_data_desc;
+ struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ struct sdmx_buff_descr input;
+ size_t aggregate_len = 0;
+ size_t aggregate_count = 0;
+ phys_addr_t buff_start_addr_phys;
+ phys_addr_t buff_current_addr_phys = 0;
+ u32 notif_size;
+ int i;
+
+ while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
+ if (aggregate_count == 0)
+ buff_current_addr_phys = tspp_data_desc->phys_base;
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
+ tspp_data_desc->id;
+ aggregate_len += tspp_data_desc->size;
+ aggregate_count++;
+ mpq_demux->hw_notification_size += notif_size;
+
+ /* Let SW filter process only if it might be relevant */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
+ }
+
+ if (!aggregate_count)
+ return;
+
+ buff_start_addr_phys =
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+
+ input.base_addr = (u64)buff_start_addr_phys;
+ input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size;
+
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX Processing %zu descriptors: %zu bytes at start address 0x%llx, read offset %d\n",
+ __func__, aggregate_count, aggregate_len,
+ input.base_addr,
+ (int)(buff_current_addr_phys - buff_start_addr_phys));
+
+ mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+ buff_current_addr_phys - buff_start_addr_phys,
+ TSPP_RAW_TTS_SIZE);
+ }
+
+ for (i = 0; i < aggregate_count; i++)
+ tspp_release_buffer(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
+}
+
+
+/**
+ * Demux thread function handling data from specific TSIF.
+ *
+ * @arg: TSIF number
+ */
+static int mpq_dmx_tspp_thread(void *arg)
+{
+ int tsif = (int)(uintptr_t)arg;
+ struct mpq_demux *mpq_demux;
+ const struct tspp_data_descriptor *tspp_data_desc;
+ atomic_t *data_cnt;
+ u32 notif_size;
+ int channel_id;
+ int ref_count;
+ int ret;
+
+ do {
+ ret = wait_event_interruptible(
+ mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+ (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) &&
+ !atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op))
+ || kthread_should_stop());
+
+ if ((ret < 0) || kthread_should_stop()) {
+ MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+ break;
+ }
+
+ /* Lock against the TSPP filters data-structure */
+ if (mutex_lock_interruptible(
+ &mpq_dmx_tspp_info.tsif[tsif].mutex))
+ return -ERESTARTSYS;
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+
+ ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* Make sure channel is still active */
+ if (ref_count == 0) {
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ continue;
+ }
+
+ atomic_dec(data_cnt);
+
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_demux->hw_notification_size = 0;
+
+ if (allocation_mode != MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded())
+ pr_err_once(
+ "%s: TSPP Allocation mode does not support secure demux.\n",
+ __func__);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded()) {
+ mpq_dmx_tspp_aggregated_process(tsif, channel_id);
+ } else {
+ /*
+ * Go through all filled descriptors
+ * and perform demuxing on them
+ */
+ do {
+ if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].
+ control_op)) {
+ /* restore for next iteration */
+ atomic_inc(data_cnt);
+ break;
+ }
+ tspp_data_desc = tspp_get_buffer(0, channel_id);
+ if (!tspp_data_desc)
+ break;
+
+ notif_size = tspp_data_desc->size /
+ TSPP_RAW_TTS_SIZE;
+ mpq_demux->hw_notification_size += notif_size;
+
+ mpq_dmx_tspp_swfilter_desc(mpq_demux,
+ tspp_data_desc);
+ /*
+ * Notify TSPP that the buffer
+ * is no longer needed
+ */
+ tspp_release_buffer(0, channel_id,
+ tspp_data_desc->id);
+ } while (1);
+ }
+
+ if (mpq_demux->hw_notification_size &&
+ (mpq_demux->hw_notification_size <
+ mpq_demux->hw_notification_min_size))
+ mpq_demux->hw_notification_min_size =
+ mpq_demux->hw_notification_size;
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ } while (1);
+
+ return 0;
+}
+
+/**
+ * Callback function from TSPP when new data is ready.
+ *
+ * @channel_id: Channel with new TS packets
+ * @user: user-data holding TSIF number
+ */
+static void mpq_tspp_callback(int channel_id, void *user)
+{
+ int tsif = (int)(uintptr_t)user;
+ struct mpq_demux *mpq_demux;
+
+ /* Save statistics on TSPP notifications */
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_dmx_update_hw_statistics(mpq_demux);
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt);
+ wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
+}
+
+/**
+ * Free memory of channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be freed.
+ */
+static void mpq_dmx_channel_mem_free(int tsif)
+{
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base = 0;
+
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base))
+ ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_handle);
+
+ ion_free(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = NULL;
+
+ tspp_detach_ion_dma_buff(0,
+ &mpq_dmx_tspp_info.tsif[tsif].ch_ion_dma_buf);
+}
+
+/**
+ * Allocate memory for channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be allocated.
+ *
+ * Return error status
+ */
+static int mpq_dmx_channel_mem_alloc(int tsif)
+{
+ int result;
+ size_t len;
+
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
+ ion_alloc(mpq_dmx_tspp_info.ion_client,
+ (mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size),
+ SZ_4K,
+ ION_HEAP(tspp_out_ion_heap),
+ 0); /* non-cached */
+
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save virtual base address of heap */
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
+ ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save physical base address of heap */
+ result = ion_phys(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
+ &(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_ion_dma_buf.dbuf = ion_share_dma_buf(
+ mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_ion_dma_buf.dbuf)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_share_dma_buf failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ result = tspp_attach_ion_dma_buff(0,
+ &mpq_dmx_tspp_info.tsif[tsif].ch_ion_dma_buf);
+ if (result) {
+ MPQ_DVB_ERR_PRINT("%s: tspp_attach_ion_dma_buff failed\n",
+ __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return result;
+ }
+
+ return 0;
+}
+
+/**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+ __func__);
+ return 0;
+ }
+
+ /* This filter will be the last entry in the table */
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+ /* Pass all pids - set mask to 0 */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+ __func__);
+ return 0;
+ }
+
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter removed successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int i, j;
+ u16 full_pid_mask = 0x1FFF;
+ u8 mask_shift;
+ u8 pid_shift;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ /*
+ * Add a total of 13 filters that will accept packets with
+ * every PID other than 0x1FFF, which is the NULL PID.
+ *
+ * Filter 0: accept all PIDs with bit 12 clear, i.e.
+ * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+ * Mask = 0x1000, PID = 0x0000.
+ *
+ * Filter 12: Accept PID 0x1FFE:
+ * Mask = 0x1FFF, PID = 0x1FFE.
+ *
+ * In general: For N = 0 .. 12,
+ * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+ * Filter <N> Mask = N+1 MSBits set, others clear.
+ * Filter <N> PID = <N> MSBits set, others clear.
+ */
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority != i) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: got unexpected HW index %d, expected %d\n",
+ __func__, tspp_filter.priority, i);
+ ret = -1;
+ break;
+ }
+ mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+ pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+ tspp_filter.mask =
+ ((full_pid_mask >> mask_shift) << mask_shift);
+ tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+ if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* cleanup on failure */
+ for (j = 0; j < i; j++) {
+ tspp_filter.priority = j;
+ mpq_tspp_release_hw_filter_index(tsif, j);
+ tspp_remove_filter(0, channel_id, &tspp_filter);
+ }
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: NULL blocking filters added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret = 0;
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = i;
+ if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+ MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+ __func__, i);
+ ret = -1;
+ }
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ }
+
+ return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int slot;
+ u16 added_count = 0;
+ u16 total_filters_count = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ continue;
+
+ /*
+ * count total number of user filters to verify that it is
+ * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+ */
+ total_filters_count++;
+
+ if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+ continue;
+
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+ TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+
+ if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+ added_count++;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+ __func__);
+ }
+ }
+
+ if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+ (added_count != total_filters_count))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ tspp_filter.priority = i;
+ MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+ __func__, tspp_filter.priority);
+ if (tspp_remove_filter(0, channel_id, &tspp_filter))
+ ret = -1;
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * Configure TSPP channel to filter the PID of new feed.
+ *
+ * @feed: The feed to configure the channel with
+ *
+ * Return error status
+ *
+ * The function checks if the new PID can be added to an already
+ * allocated channel, if not, a new channel is allocated and configured.
+ */
+static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct tspp_select_source tspp_source;
+ struct tspp_filter tspp_filter;
+ int tsif;
+ int tsif_mode = mpq_dmx_get_param_tsif_mode();
+ int ret = 0;
+ int slot;
+ int channel_id;
+ int *channel_ref_count;
+ u32 buffer_size;
+ int restore_user_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_null_blocking_filters = 0;
+ size_t agg_size;
+
+ tspp_source.clk_inverse = mpq_dmx_get_param_clock_inv();
+ tspp_source.data_inverse = 0;
+ tspp_source.sync_inverse = 0;
+ tspp_source.enable_inverse = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ switch (tsif_mode) {
+ case 1:
+ tspp_source.mode = TSPP_TSIF_MODE_1;
+ break;
+ case 2:
+ tspp_source.mode = TSPP_TSIF_MODE_2;
+ break;
+ default:
+ tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
+ break;
+ }
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source.source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source.source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * It is possible that this PID was already requested before.
+ * Can happen if we play and record same PES or PCR
+ * piggypacked on video packet.
+ */
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+ if (slot >= 0) {
+ /* PID already configured */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+ goto out;
+ }
+
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+
+ /*
+ * Recalculate 'tspp_notification_size' and buffer count in case
+ * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed.
+ */
+ buffer_size = tspp_desc_size;
+ tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size);
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ TSPP_BUFFER_COUNT(tspp_out_buffer_size);
+ if (mpq_dmx_tspp_info.tsif[tsif].buffer_count >
+ MAX_BAM_DESCRIPTOR_COUNT)
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ MAX_BAM_DESCRIPTOR_COUNT;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ agg_size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
+ sizeof(int);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids =
+ vzalloc(agg_size);
+ if (!mpq_dmx_tspp_info.tsif[tsif].aggregate_ids) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to allocate memory for buffer descriptors aggregation\n",
+ __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mpq_dmx_channel_mem_alloc(tsif);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+ }
+
+ ret = tspp_open_channel(0, channel_id);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_open_channel(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+
+ /* set TSPP source */
+ ret = tspp_open_stream(0, channel_id, &tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_select_source(%d,%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ tspp_source.source,
+ ret);
+
+ goto add_channel_close_ch;
+ }
+
+ /* register notification on TS packets */
+ tspp_register_notification(0,
+ channel_id,
+ mpq_tspp_callback,
+ (void *)(uintptr_t)tsif,
+ tspp_channel_timeout);
+
+ /*
+ * Register allocator and provide allocation function
+ * that allocates from contiguous memory so that we can have
+ * big notification size, smallest descriptor, and still provide
+ * TZ with single big buffer based on notification size.
+ */
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ tspp_mem_allocator, tspp_mem_free, NULL);
+ } else {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ NULL, NULL, NULL);
+ }
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_allocate_buffers(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_unregister_notif;
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
+ }
+
+ /* add new PID to the existing pipe */
+ slot = mpq_tspp_get_free_filter_slot(tsif);
+ if (slot < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+ __func__, tsif);
+
+ goto add_channel_unregister_notif;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ tspp_filter.priority = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* HW filtering mode */
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority < 0)
+ goto add_channel_free_filter_slot;
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid = feed->pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change
+ * TSPP_RAW_TTS_SIZE accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = tspp_source.source;
+ tspp_filter.decrypt = 0;
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_add_filter(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_free_filter_slot;
+ }
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* Crossing the threshold - from HW to SW filtering mode */
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove all existing user filters */
+ ret = mpq_tspp_remove_all_user_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Add HW filters to block NULL packets */
+ ret = mpq_tspp_add_null_blocking_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove filters that accepts all packets, if necessary */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ remove_null_blocking_filters = 1;
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ } else {
+ /* Already working in SW filtering mode */
+ if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
+
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ }
+
+ (*channel_ref_count)++;
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ goto out;
+
+add_channel_free_filter_slot:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ /* release HW index if we allocated one */
+ if (tspp_filter.priority >= 0) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ }
+
+ /* restore HW filter table state if necessary */
+ if (remove_null_blocking_filters)
+ mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source.source);
+
+ if (restore_user_filters)
+ mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+add_channel_unregister_notif:
+ if (*channel_ref_count == 0) {
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ }
+add_channel_close_ch:
+ if (*channel_ref_count == 0)
+ tspp_close_channel(0, channel_id);
+add_channel_failed:
+ if (*channel_ref_count == 0)
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+/**
+ * Removes filter from TSPP.
+ *
+ * @feed: The feed to remove
+ *
+ * Return error status
+ *
+ * The function checks if this is the only PID allocated within
+ * the channel, if so, the channel is closed as well.
+ */
+static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
+{
+ int tsif;
+ int ret = 0;
+ int channel_id;
+ int slot;
+ atomic_t *data_cnt;
+ int *channel_ref_count;
+ enum tspp_source tspp_source;
+ struct tspp_filter tspp_filter;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ int restore_null_blocking_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_user_filters = 0;
+ int accept_all_filter_existed = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ /* invalid feed provided as the channel is not allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed (%d)\n",
+ __func__,
+ channel_id);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+
+ if (slot < 0) {
+ /* invalid feed provided as it has no filter allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
+ __func__,
+ feed->pid,
+ tsif);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* since filter was found, ref_count > 0 so it's ok to decrement it */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
+ /*
+ * there are still references to this pid, do not
+ * remove the filter yet
+ */
+ goto out;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* staying in HW filtering mode */
+ tspp_filter.priority =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_remove_filter failed (%d,%d)\n",
+ __func__,
+ channel_id,
+ tspp_filter.priority);
+
+ goto remove_channel_failed_restore_count;
+ }
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+ __func__, feed->pid, tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ (TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+ /* Crossing the threshold - from SW to HW filtering mode */
+
+ accept_all_filter_existed =
+ mpq_dmx_tspp_info.tsif[tsif].
+ accept_all_filter_exists_flag;
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_add_all_user_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+ } else {
+ /* staying in SW filtering mode */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+ }
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
+ (*channel_ref_count)--;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ if (*channel_ref_count == 0) {
+ /* channel is not used any more, release it */
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ tspp_close_channel(0, channel_id);
+ atomic_set(data_cnt, 0);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+ }
+
+ goto out;
+
+remove_channel_failed_restore_count:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ if (remove_user_filters)
+ mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+ if (restore_null_blocking_filters)
+ mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s(pid=%d) executed\n",
+ __func__,
+ feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid mpq_demux handle\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_add_channel(feed);
+
+ if (ret < 0) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_tspp_dmx_add_channel failed(%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__,
+ ret);
+ if (mpq_demux->source < DMX_SOURCE_DVR0)
+ mpq_tspp_dmx_remove_channel(feed);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ mpq_dmx_terminate_feed(feed);
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_remove_channel(feed);
+ }
+
+ return ret;
+}
+
+static int mpq_tspp_dmx_write_to_decoder(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > TSPP_RAW_TTS_SIZE)
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_audio_feed(feed))
+ return mpq_dmx_process_audio_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+/**
+ * Returns demux capabilities of TSPPv1 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+ struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if ((dvb_demux == NULL) || (caps == NULL)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUDIO_DECODER_DATA | DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = TSIF_COUNT;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* TSIF reports 3 bytes STC at unit of 27MHz/256 */
+ caps->max_stc = (u64)0xFFFFFF * 256;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+
+/**
+ * Reads TSIF STC from TSPP
+ *
+ * @demux: demux device
+ * @num: STC number. 0 for TSIF0 and 1 for TSIF1.
+ * @stc: STC value
+ * @base: divisor to get 90KHz value
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base)
+{
+ enum tspp_source source;
+ u32 tcr_counter;
+ u64 avtimer_stc = 0;
+ int tts_source = 0;
+
+ if (!demux || !stc || !base)
+ return -EINVAL;
+
+ if (num == 0)
+ source = TSPP_SOURCE_TSIF0;
+ else if (num == 1)
+ source = TSPP_SOURCE_TSIF1;
+ else
+ return -EINVAL;
+
+ if (tspp_get_tts_source(0, &tts_source) < 0)
+ tts_source = TSIF_TTS_TCR;
+
+ if (tts_source != TSIF_TTS_LPASS_TIMER) {
+ tspp_get_ref_clk_counter(0, source, &tcr_counter);
+ *stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */
+ *base = 300; /* divisor to get 90KHz clock from stc value */
+ } else {
+ if (tspp_get_lpass_time_counter(0, source, &avtimer_stc) < 0)
+ return -EINVAL;
+ *stc = avtimer_stc;
+ }
+ return 0;
+}
+
+static int mpq_tspp_dmx_init(
+ struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ mpq_demux->demux.priv = (void *)mpq_demux;
+ mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+ mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
+ mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
+ mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
+ mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder;
+ mpq_demux->demux.decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ mpq_demux->demux.decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ mpq_demux->demux.decoder_fullness_abort =
+ mpq_dmx_decoder_fullness_abort;
+ mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ mpq_demux->demux.set_cipher_op = mpq_dmx_set_cipher_ops;
+ mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+ mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
+ mpq_demux->demux.flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ result = dvb_dmx_init(&mpq_demux->demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
+ goto init_failed;
+ }
+
+ /* Now initailize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc;
+ mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
+ __func__,
+ result);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with TSPP statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ /* Get the TSIF TTS info */
+ if (tspp_get_tts_source(0, &mpq_demux->ts_packet_timestamp_source) < 0)
+ mpq_demux->ts_packet_timestamp_source = TSIF_TTS_TCR;
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(&mpq_demux->demux);
+init_failed:
+ return result;
+}
+
+static int __init mpq_dmx_tspp_plugin_init(void)
+{
+ int i;
+ int j;
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
+ mpq_dmx_tspp_info.tsif[i].buff_index = 0;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0;
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0);
+
+ for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
+ mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
+ mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+ mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
+ }
+
+ for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+ mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+ mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
+ snprintf(mpq_dmx_tspp_info.tsif[i].name,
+ TSIF_NAME_LENGTH,
+ "dmx_tsif%d",
+ i);
+
+ init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+ mpq_dmx_tspp_info.tsif[i].thread =
+ kthread_run(
+ mpq_dmx_tspp_thread, (void *)(uintptr_t)i,
+ mpq_dmx_tspp_info.tsif[i].name);
+
+ if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
+ for (j = 0; j < i; j++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
+ }
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: kthread_run failed\n",
+ __func__);
+
+ return -ENOMEM;
+ }
+
+ mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_plugin_init failed (errno=%d)\n",
+ __func__,
+ ret);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void __exit mpq_dmx_tspp_plugin_exit(void)
+{
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
+
+ /*
+ * Note: tspp_close_channel will also free the TSPP buffers
+ * even if we allocated them ourselves,
+ * using our free function.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].channel_ref) {
+ tspp_unregister_notification(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+ tspp_close_channel(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+
+ if (allocation_mode ==
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(i);
+ }
+ }
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_tspp_plugin_init);
+module_exit(mpq_dmx_tspp_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux TSPP version 1 HW Plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
new file mode 100644
index 0000000..860c365
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
@@ -0,0 +1,1023 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "qseecom_kernel.h"
+#include "mpq_sdmx.h"
+
+static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS];
+static struct mutex sdmx_lock[SDMX_MAX_SESSIONS];
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+
+enum sdmx_cmd_id {
+ SDMX_OPEN_SESSION_CMD,
+ SDMX_CLOSE_SESSION_CMD,
+ SDMX_SET_SESSION_CFG_CMD,
+ SDMX_ADD_FILTER_CMD,
+ SDMX_REMOVE_FILTER_CMD,
+ SDMX_SET_KL_IDX_CMD,
+ SDMX_ADD_RAW_PID_CMD,
+ SDMX_REMOVE_RAW_PID_CMD,
+ SDMX_PROCESS_CMD,
+ SDMX_GET_DBG_COUNTERS_CMD,
+ SDMX_RESET_DBG_COUNTERS_CMD,
+ SDMX_GET_VERSION_CMD,
+ SDMX_INVALIDATE_KL_CMD,
+ SDMX_SET_LOG_LEVEL_CMD
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_proc_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u8 flags;
+ struct sdmx_buff_descr in_buf_descr;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 num_filters;
+ struct sdmx_filter_status filters_status[];
+};
+
+struct sdmx_proc_rsp {
+ enum sdmx_status ret;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 err_indicators;
+ u32 status_indicators;
+};
+
+struct sdmx_open_ses_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_open_ses_rsp {
+ enum sdmx_status ret;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_ses_cfg_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ enum sdmx_proc_mode process_mode;
+ enum sdmx_inp_mode input_mode;
+ enum sdmx_pkt_format packet_len;
+ u8 odd_scramble_bits;
+ u8 even_scramble_bits;
+};
+
+struct sdmx_ses_cfg_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_set_kl_ind_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ u32 kl_index;
+};
+
+struct sdmx_set_kl_ind_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ enum sdmx_filter filter_type;
+ struct sdmx_buff_descr meta_data_buf;
+ enum sdmx_buf_mode buffer_mode;
+ enum sdmx_raw_out_format ts_out_format;
+ u32 flags;
+ u32 num_data_bufs;
+ struct sdmx_data_buff_descr data_bufs[];
+};
+
+struct sdmx_add_filt_rsp {
+ enum sdmx_status ret;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_add_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_rem_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_rem_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 num_filters;
+};
+
+struct sdmx_get_counters_rsp {
+ enum sdmx_status ret;
+ struct sdmx_session_dbg_counters session_counters;
+ u32 num_filters;
+ struct sdmx_filter_dbg_counters filter_counters[];
+};
+
+struct sdmx_rst_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_rst_counters_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_version_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_get_version_rsp {
+ enum sdmx_status ret;
+ int32_t version;
+};
+
+struct sdmx_set_log_level_req {
+ enum sdmx_cmd_id cmd_id;
+ enum sdmx_log_level level;
+ u32 session_handle;
+};
+
+struct sdmx_set_log_level_rsp {
+ enum sdmx_status ret;
+};
+
+#pragma pack(pop, sdmx)
+
+static int get_cmd_rsp_buffers(int handle_index,
+ void **cmd,
+ int *cmd_len,
+ void **rsp,
+ int *rsp_len)
+{
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s: shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, *cmd_len, *rsp_len);
+ return SDMX_STATUS_OUT_OF_MEM;
+ }
+
+ *cmd = sdmx_qseecom_handles[handle_index]->sbuf;
+ *rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len;
+ return SDMX_SUCCESS;
+}
+
+/*
+ * Returns version of secure-demux app.
+ *
+ * @session_handle: Returned instance handle. Must not be NULL.
+ * Return error code
+ */
+int sdmx_get_version(int session_handle, int32_t *version)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_version_req *cmd;
+ struct sdmx_get_version_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (version == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_version_req);
+ rsp_len = sizeof(struct sdmx_get_version_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_VERSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+ *version = rsp->version;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(sdmx_get_version);
+
+/*
+ * Initializes a new secure demux instance and returns a handle of the instance.
+ *
+ * @session_handle: handle of a secure demux instance to get its version.
+ * Return the version if successful or an error code.
+ */
+int sdmx_open_session(int *session_handle)
+{
+ int res, cmd_len, rsp_len;
+ enum sdmx_status ret, version_ret;
+ struct sdmx_open_ses_req *cmd;
+ struct sdmx_open_ses_rsp *rsp;
+ struct qseecom_handle *qseecom_handle = NULL;
+ int32_t version;
+
+ /* Input validation */
+ if (session_handle == NULL)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ /* Start the TZ app */
+ res = qseecom_start_app(&qseecom_handle, "securemm",
+ QSEECOM_SBUFF_SIZE);
+
+ if (res < 0)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ cmd_len = sizeof(struct sdmx_open_ses_req);
+ rsp_len = sizeof(struct sdmx_open_ses_rsp);
+
+ /* Get command and response buffers */
+ cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf;
+
+ if (cmd_len & QSEECOM_ALIGN_MASK)
+ cmd_len = QSEECOM_ALIGN(cmd_len);
+
+ rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len;
+
+ if (rsp_len & QSEECOM_ALIGN_MASK)
+ rsp_len = QSEECOM_ALIGN(rsp_len);
+
+ /* Will be later overridden by SDMX response */
+ *session_handle = SDMX_INVALID_SESSION_HANDLE;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_OPEN_SESSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len,
+ (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ qseecom_shutdown_app(&qseecom_handle);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_handle = rsp->session_handle;
+
+ /* Initialize handle and mutex */
+ sdmx_qseecom_handles[*session_handle] = qseecom_handle;
+ mutex_init(&sdmx_lock[*session_handle]);
+ ret = rsp->ret;
+
+ /* Get and print the app version */
+ version_ret = sdmx_get_version(*session_handle, &version);
+ if (version_ret == SDMX_SUCCESS)
+ pr_info("TZ SDMX version is %x.%x\n", version >> 8,
+ version & 0xFF);
+ else
+ pr_err("Error reading TZ SDMX version\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_open_session);
+
+/*
+ * Closes a secure demux instance.
+ *
+ * @session_handle: handle of a secure demux instance to close.
+ * Return error code
+ */
+int sdmx_close_session(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_close_ses_req *cmd;
+ struct sdmx_close_ses_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_close_ses_req);
+ rsp_len = sizeof(struct sdmx_close_ses_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_CLOSE_SESSION_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+
+ /* Shutdown the TZ app (or at least free the current handle) */
+ res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ sdmx_qseecom_handles[session_handle] = NULL;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_close_session);
+
+/*
+ * Configures an open secure demux instance.
+ *
+ * @session_handle: secure demux instance
+ * @proc_mode: Defines secure demux's behavior in case of output
+ * buffer overflow.
+ * @inp_mode: Defines the input encryption settings.
+ * @pkt_format: TS packet length in input buffer.
+ * @odd_scramble_bits: Value of the scramble bits indicating the ODD key.
+ * @even_scramble_bits: Value of the scramble bits indicating the EVEN key.
+ * Return error code
+ */
+int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode,
+ enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits,
+ u8 even_scramble_bits)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_ses_cfg_req *cmd;
+ struct sdmx_ses_cfg_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_ses_cfg_req);
+ rsp_len = sizeof(struct sdmx_ses_cfg_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD;
+ cmd->session_handle = session_handle;
+ cmd->process_mode = proc_mode;
+ cmd->input_mode = inp_mode;
+ cmd->packet_len = pkt_format;
+ cmd->odd_scramble_bits = odd_scramble_bits;
+ cmd->even_scramble_bits = even_scramble_bits;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_session_cfg);
+
+/*
+ * Creates a new secure demux filter and returns a filter handle
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid to filter
+ * @filter_type: type of filtering
+ * @meta_data_buf: meta data buffer descriptor
+ * @data_buf_mode: data buffer mode (ring/linear)
+ * @num_data_bufs: number of data buffers (use 1 for a ring buffer)
+ * @data_bufs: data buffers descriptors array
+ * @filter_handle: returned filter handle
+ * @ts_out_format: output format for raw filters
+ * @flags: optional flags for filter
+ * (currently only clear section CRC verification is supported)
+ *
+ * Return error code
+ */
+int sdmx_add_filter(int session_handle,
+ u16 pid,
+ enum sdmx_filter filterype,
+ struct sdmx_buff_descr *meta_data_buf,
+ enum sdmx_buf_mode d_buf_mode,
+ u32 num_data_bufs,
+ struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle,
+ enum sdmx_raw_out_format ts_out_format,
+ u32 flags)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_filt_req *cmd;
+ struct sdmx_add_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (filter_handle == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_filt_req)
+ + num_data_bufs * sizeof(struct sdmx_data_buff_descr);
+ rsp_len = sizeof(struct sdmx_add_filt_rsp);
+
+ /* Will be later overridden by SDMX response */
+ *filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->filter_type = filterype;
+ cmd->ts_out_format = ts_out_format;
+ cmd->flags = flags;
+ if (meta_data_buf != NULL)
+ memcpy(&(cmd->meta_data_buf), meta_data_buf,
+ sizeof(struct sdmx_buff_descr));
+ else
+ memset(&(cmd->meta_data_buf), 0, sizeof(cmd->meta_data_buf));
+
+ cmd->buffer_mode = d_buf_mode;
+ cmd->num_data_bufs = num_data_bufs;
+ memcpy(cmd->data_bufs, data_bufs,
+ num_data_bufs * sizeof(struct sdmx_data_buff_descr));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *filter_handle = rsp->filter_handle;
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_filter);
+
+/*
+ * Removes a secure demux filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: filter handle to remove
+ *
+ * Return error code
+ */
+int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_filt_req *cmd;
+ struct sdmx_rem_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_filt_req);
+ rsp_len = sizeof(struct sdmx_rem_filt_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_filter);
+
+/*
+ * Associates a key ladder index for the specified pid
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid
+ * @key_ladder_index: key ladder index to associate to the pid
+ *
+ * Return error code
+ *
+ * Note: if pid already has some key ladder index associated, it will be
+ * overridden.
+ */
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_kl_ind_req *cmd;
+ struct sdmx_set_kl_ind_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_set_kl_ind_req);
+ rsp_len = sizeof(struct sdmx_set_kl_ind_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_KL_IDX_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->kl_index = key_ladder_index;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_kl_ind);
+
+/*
+ * Adds the specified pid to an existing raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_raw_req *cmd;
+ struct sdmx_add_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_raw_req);
+ rsp_len = sizeof(struct sdmx_add_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_raw_pid);
+
+/*
+ * Removes the specified pid from a raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_raw_req *cmd;
+ struct sdmx_rem_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_raw_req);
+ rsp_len = sizeof(struct sdmx_rem_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_raw_pid);
+
+/*
+ * Call secure demux to perform processing on the specified input buffer
+ *
+ * @session_handle: secure demux instance
+ * @flags: input flags. Currently only EOS marking is supported.
+ * @input_buf_desc: input buffer descriptor
+ * @input_fill_count: number of bytes available in input buffer
+ * @input_read_offset: offset inside input buffer where data starts
+ * @error_indicators: returned general error indicators
+ * @status_indicators: returned general status indicators
+ * @num_filters: number of filters in filter status array
+ * @filter_status: filter status descriptor array
+ *
+ * Return error code
+ */
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count,
+ u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_proc_req *cmd;
+ struct sdmx_proc_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (input_buf_desc == NULL) ||
+ (input_fill_count == NULL) || (input_read_offset == NULL) ||
+ (error_indicators == NULL) || (status_indicators == NULL) ||
+ (filter_status == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_proc_req)
+ + num_filters * sizeof(struct sdmx_filter_status);
+ rsp_len = sizeof(struct sdmx_proc_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_PROCESS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->flags = flags;
+ cmd->in_buf_descr.base_addr = input_buf_desc->base_addr;
+ cmd->in_buf_descr.size = input_buf_desc->size;
+ cmd->inp_fill_cnt = *input_fill_count;
+ cmd->in_rd_offset = *input_read_offset;
+ cmd->num_filters = num_filters;
+ memcpy(cmd->filters_status, filter_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *input_fill_count = rsp->inp_fill_cnt;
+ *input_read_offset = rsp->in_rd_offset;
+ *error_indicators = rsp->err_indicators;
+ *status_indicators = rsp->status_indicators;
+ memcpy(filter_status, cmd->filters_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_process);
+
+/*
+ * Returns session-level & filter-level debug counters
+ *
+ * @session_handle: secure demux instance
+ * @session_counters: returned session-level debug counters
+ * @num_filters: returned number of filters reported in filter_counters
+ * @filter_counters: returned filter-level debug counters array
+ *
+ * Return error code
+ */
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_counters_req *cmd;
+ struct sdmx_get_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (session_counters == NULL) || (num_filters == NULL) ||
+ (filter_counters == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_counters_req);
+ rsp_len = sizeof(struct sdmx_get_counters_rsp)
+ + *num_filters * sizeof(struct sdmx_filter_dbg_counters);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->num_filters = *num_filters;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_counters = rsp->session_counters;
+ *num_filters = rsp->num_filters;
+ memcpy(filter_counters, rsp->filter_counters,
+ *num_filters * sizeof(struct sdmx_filter_dbg_counters));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_get_dbg_counters);
+
+/*
+ * Reset debug counters
+ *
+ * @session_handle: secure demux instance
+ *
+ * Return error code
+ */
+int sdmx_reset_dbg_counters(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rst_counters_req *cmd;
+ struct sdmx_rst_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rst_counters_req);
+ rsp_len = sizeof(struct sdmx_rst_counters_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_reset_dbg_counters);
+
+/*
+ * Set debug log verbosity level
+ *
+ * @session_handle: secure demux instance
+ * @level: requested log level
+ *
+ * Return error code
+ */
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_log_level_req *cmd;
+ struct sdmx_set_log_level_rsp *rsp;
+ enum sdmx_status ret;
+
+ cmd_len = sizeof(struct sdmx_set_log_level_req);
+ rsp_len = sizeof(struct sdmx_set_log_level_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
+ cmd->session_handle = session_handle;
+ cmd->level = level;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+ ret = rsp->ret;
+out:
+ /* Unlock */
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
new file mode 100644
index 0000000..9be26ae5
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -0,0 +1,368 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_SDMX_H
+#define _MPQ_SDMX_H
+
+#include <linux/types.h>
+
+/* Constant declarations */
+#define SDMX_MAX_SESSIONS (4)
+#define SDMX_LOOPBACK_PID (0x2000)
+
+#define SDMX_MAX_PHYSICAL_CHUNKS (256)
+
+/* Filter-level error indicators */
+#define SDMX_FILTER_SUCCESS (0)
+#define SDMX_FILTER_ERR_MD_BUF_FULL BIT(0)
+#define SDMX_FILTER_ERR_D_BUF_FULL BIT(1)
+#define SDMX_FILTER_ERR_D_LIN_BUFS_FULL BIT(2)
+#define SDMX_FILTER_ERR_INVALID_SCRAMBLE_BITS BIT(3)
+#define SDMX_FILTER_ERR_KL_IND_NOT_SET BIT(4)
+#define SDMX_FILTER_ERR_CAS_DECRYPT_ERROR BIT(5)
+#define SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL BIT(6)
+#define SDMX_FILTER_ERR_SEC_INTERNAL_MALLOC_FAIL BIT(7)
+#define SDMX_FILTER_ERR_SEC_LEN_INVALID BIT(8)
+#define SDMX_FILTER_ERR_SEC_PUSI_PTR_INVALID BIT(9)
+#define SDMX_FILTER_ERR_TS_SYNC_BYTE_INVALID BIT(10)
+#define SDMX_FILTER_ERR_TS_TRANSPORT_ERR BIT(11)
+#define SDMX_FILTER_ERR_CONT_CNT_INVALID BIT(12)
+#define SDMX_FILTER_ERR_CONT_CNT_DUPLICATE BIT(13)
+#define SDMX_FILTER_ERR_INVALID_PES_HDR BIT(14)
+#define SDMX_FILTER_ERR_INVALID_PES_LEN BIT(15)
+#define SDMX_FILTER_ERR_INVALID_PES_ENCRYPTION BIT(16)
+#define SDMX_FILTER_ERR_SECURITY_FAULT BIT(17)
+#define SDMX_FILTER_ERR_IN_NS_BUFFER BIT(18)
+
+/* Filter-level status indicators */
+#define SDMX_FILTER_STATUS_EOS BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED BIT(1)
+
+/* Filter-level flags */
+#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC BIT(0)
+
+#define SDMX_INVALID_SESSION_HANDLE (-1)
+#define SDMX_INVALID_FILTER_HANDLE (-1)
+
+/* Input flags */
+#define SDMX_INPUT_FLAG_EOS BIT(0)
+#define SDMX_INPUT_FLAG_DBG_ENABLE BIT(1)
+
+
+enum sdmx_buf_mode {
+ SDMX_RING_BUF,
+ SDMX_LINEAR_GROUP_BUF,
+};
+
+enum sdmx_proc_mode {
+ SDMX_PUSH_MODE,
+ SDMX_PULL_MODE,
+};
+
+enum sdmx_inp_mode {
+ SDMX_PKT_ENC_MODE,
+ SDMX_BULK_ENC_MODE,
+ SDMX_CLEAR_MODE,
+};
+
+enum sdmx_pkt_format {
+ SDMX_188_BYTE_PKT = 188,
+ SDMX_192_BYTE_PKT = 192,
+ SDMX_195_BYTE_PKT = 195,
+};
+
+enum sdmx_log_level {
+ SDMX_LOG_NO_PRINT,
+ SDMX_LOG_MSG_ERROR,
+ SDMX_LOG_DEBUG,
+ SDMX_LOG_VERBOSE
+};
+
+enum sdmx_status {
+ SDMX_SUCCESS = 0,
+ SDMX_STATUS_GENERAL_FAILURE = -1,
+ SDMX_STATUS_MAX_OPEN_SESSIONS_REACHED = -2,
+ SDMX_STATUS_INVALID_SESSION_HANDLE = -3,
+ SDMX_STATUS_INVALID_INPUT_PARAMS = -4,
+ SDMX_STATUS_UNSUPPORTED_MODE = -5,
+ SDMX_STATUS_INVALID_PID = -6,
+ SDMX_STATUS_OUT_OF_MEM = -7,
+ SDMX_STATUS_FILTER_EXISTS = -8,
+ SDMX_STATUS_INVALID_FILTER_HANDLE = -9,
+ SDMX_STATUS_MAX_RAW_PIDS_REACHED = -10,
+ SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
+ SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
+ SDMX_STATUS_INVALID_FILTER_CFG = -13,
+ SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+ SDMX_STATUS_SECURITY_FAULT = -15,
+ SDMX_STATUS_NS_BUFFER_ERROR = -16,
+};
+
+enum sdmx_filter {
+ SDMX_PES_FILTER, /* Other PES */
+ SDMX_SEPARATED_PES_FILTER, /* Separated PES (for decoder) */
+ SDMX_SECTION_FILTER, /* Section */
+ SDMX_PCR_FILTER, /* PCR */
+ SDMX_RAW_FILTER, /* Recording */
+};
+
+enum sdmx_raw_out_format {
+ SDMX_188_OUTPUT,
+ SDMX_192_HEAD_OUTPUT,
+ SDMX_192_TAIL_OUTPUT
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_session_dbg_counters {
+ /* Total number of TS-packets input to SDMX. */
+ u32 ts_pkt_in;
+
+ /* Total number of TS-packets filtered out by SDMX. */
+ u32 ts_pkt_out;
+};
+
+struct sdmx_filter_dbg_counters {
+ int filter_handle;
+
+ /* Number of TS-packets filtered. */
+ u32 ts_pkt_count;
+
+ /* Number of TS-packets with adaptation field only (no payload). */
+ u32 ts_pkt_no_payload;
+
+ /* Number of TS-packets with the discontinuity indicator set. */
+ u32 ts_pkt_discont;
+
+ /* Number of duplicate TS-packets detected. */
+ u32 ts_pkt_dup;
+
+ /* Number of packets not decrypted because the key wasn't ready. */
+ u32 ts_pkt_key_not_ready;
+};
+
+struct sdmx_pes_counters {
+ /* Number of TS packets with the TEI flag set */
+ u32 transport_err_count;
+
+ /* Number of TS packets with continuity counter errors */
+ u32 continuity_err_count;
+
+ /* Number of TS packets composing this PES frame */
+ u32 pes_ts_count;
+
+ /* Number of TS packets dropped due to full buffer */
+ u32 drop_count;
+};
+
+struct sdmx_buff_descr {
+ /* Physical address where buffer starts */
+ u64 base_addr;
+
+ /* Size of buffer */
+ u32 size;
+};
+
+struct sdmx_data_buff_descr {
+ /* Physical chunks of the buffer */
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS];
+
+ /* Length of buffer */
+ u32 length;
+};
+
+/*
+ * Data payload residing in the data buffers is described using this meta-data
+ * header. The meta data header specifies where the payload is located in the
+ * data buffer and how big it is.
+ * The meta data header optionally carries additional relevant meta data
+ * immediately following the meta-data header.
+ */
+struct sdmx_metadata_header {
+ /*
+ * Payload start offset inside data buffer. In case data is managed
+ * as a linear buffer group, this specifies buffer index.
+ */
+ u32 payload_start;
+
+ /* Payload length */
+ u32 payload_length;
+
+ /* Number of meta data bytes immediately following this header */
+ u32 metadata_length;
+};
+
+
+struct sdmx_filter_status {
+ /* Secure demux filter handle */
+ int filter_handle;
+
+ /*
+ * Number of pending bytes in filter's output data buffer.
+ * For linear buffer mode, this is number of buffers pending.
+ */
+ u32 data_fill_count;
+
+ /*
+ * Offset in data buffer for next data payload to be written.
+ * For linear buffer mode, this is a buffer index.
+ */
+ u32 data_write_offset;
+
+ /* Number of pending bytes in filter's output meta data buffer */
+ u32 metadata_fill_count;
+
+ /* Offset in meta data buffer for next metadata header to be written */
+ u32 metadata_write_offset;
+
+ /* Errors (bitmap) reported by secure demux for this filter */
+ u32 error_indicators;
+
+ /* General status (bitmap) reported by secure demux for this filter */
+ u32 status_indicators;
+};
+#pragma pack(pop, sdmx)
+
+#ifdef CONFIG_QSEECOM
+
+int sdmx_open_session(int *session_handle);
+
+int sdmx_close_session(int session_handle);
+
+int sdmx_get_version(int session_handle, int32_t *version);
+
+int sdmx_set_session_cfg(int session_handle, enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits);
+
+int sdmx_add_filter(int session_handle, u16 pid, enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags);
+
+int sdmx_remove_filter(int session_handle, int filter_handle);
+
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index);
+
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status);
+
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters);
+
+int sdmx_reset_dbg_counters(int session_handle);
+
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level);
+
+#else
+
+static inline int sdmx_open_session(int *session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_close_session(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_version(int session_handle, int32_t *version)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_filter(int session_handle, u16 pid,
+ enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_kl_ind(int session_handle, u16 pid,
+ u32 key_ladder_index)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ *status_indicators = 0;
+ *error_indicators = 0;
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_reset_dbg_counters(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_log_level(int session_handle,
+ enum sdmx_log_level level)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+#endif
+
+#endif /* _MPQ_SDMX_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
new file mode 100644
index 0000000..c55a5aa
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -0,0 +1,222 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_ADAPTER_H
+#define _MPQ_ADAPTER_H
+
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "mpq_stream_buffer.h"
+
+
+
+/** IDs of interfaces holding stream-buffers */
+enum mpq_adapter_stream_if {
+ /** Interface holding stream-buffer for video0 stream */
+ MPQ_ADAPTER_VIDEO0_STREAM_IF = 0,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO1_STREAM_IF = 1,
+
+ /** Interface holding stream-buffer for video2 stream */
+ MPQ_ADAPTER_VIDEO2_STREAM_IF = 2,
+
+ /** Interface holding stream-buffer for video3 stream */
+ MPQ_ADAPTER_VIDEO3_STREAM_IF = 3,
+
+ /** Interface holding stream-buffer for audio0 stream */
+ MPQ_ADAPTER_AUDIO0_STREAM_IF = 4,
+
+ /** Interface holding stream-buffer for audio1 stream */
+ MPQ_ADAPTER_AUDIO1_STREAM_IF = 5,
+
+ /** Interface holding stream-buffer for audio2 stream */
+ MPQ_ADAPTER_AUDIO2_STREAM_IF = 6,
+
+ /** Interface holding stream-buffer for audio3 stream */
+ MPQ_ADAPTER_AUDIO3_STREAM_IF = 7,
+
+ /** Maximum number of interfaces holding stream-buffers */
+ MPQ_ADAPTER_MAX_NUM_OF_INTERFACES,
+};
+
+enum dmx_packet_type {
+ DMX_PES_PACKET,
+ DMX_FRAMING_INFO_PACKET,
+ DMX_EOS_PACKET,
+ DMX_MARKER_PACKET
+};
+
+struct dmx_pts_dts_info {
+ /** Indication whether PTS exist */
+ int pts_exist;
+
+ /** Indication whether DTS exist */
+ int dts_exist;
+
+ /** PTS value associated with the PES data if any */
+ u64 pts;
+
+ /** DTS value associated with the PES data if any */
+ u64 dts;
+};
+
+struct dmx_framing_packet_info {
+ /** framing pattern type, one of DMX_IDX_* definitions */
+ u64 pattern_type;
+
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the pattern */
+ u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the frame.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the frame */
+ __u32 continuity_error_counter;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported frame.
+ */
+ __u32 ts_dropped_bytes;
+
+ /* Total number of TS packets holding the frame */
+ __u32 ts_packets_num;
+};
+
+struct dmx_pes_packet_info {
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the PES */
+ u64 stc;
+};
+
+struct dmx_marker_info {
+ /* marker id */
+ u64 id;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+ /** meta-data packet type */
+ enum dmx_packet_type packet_type;
+
+ /** packet-type specific information */
+ union {
+ struct dmx_framing_packet_info framing;
+ struct dmx_pes_packet_info pes;
+ struct dmx_marker_info marker;
+ } info;
+} __packed;
+
+/** The meta-data used for audio interface */
+struct mpq_adapter_audio_meta_data {
+ /** meta-data packet type */
+ enum dmx_packet_type packet_type;
+
+ /** packet-type specific information */
+ union {
+ struct dmx_pes_packet_info pes;
+ struct dmx_marker_info marker;
+ } info;
+} __packed;
+
+/** Callback function to notify on registrations of specific interfaces */
+typedef void (*mpq_adapter_stream_if_callback)(
+ enum mpq_adapter_stream_if interface_id,
+ void *user_param);
+
+
+/**
+ * mpq_adapter_get - Returns pointer to Qualcomm Technologies Inc. DVB adapter
+ *
+ * Return dvb adapter or NULL if not exist.
+ */
+struct dvb_adapter *mpq_adapter_get(void);
+
+
+/**
+ * mpq_adapter_register_stream_if - Register a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The buffer used for the interface
+ *
+ * Return error status
+ *
+ * Stream interface used to connect between two units in tunneling
+ * mode using mpq_streambuffer implementation.
+ * The producer of the interface should register the new interface,
+ * consumer may get the interface using mpq_adapter_get_stream_if.
+ *
+ * Note that the function holds a pointer to this interface,
+ * stream_buffer pointer assumed to be valid as long as interface
+ * is active.
+ */
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer);
+
+
+/**
+ * mpq_adapter_unregister_stream_if - Un-register a stream interface.
+ *
+ * @interface_id: The interface id
+ *
+ * Return error status
+ */
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id);
+
+
+/**
+ * mpq_adapter_get_stream_if - Get buffer used for a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The returned stream buffer
+ *
+ * Return error status
+ */
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer);
+
+
+/**
+ * mpq_adapter_notify_stream_if - Register notification
+ * to be triggered when a stream interface is registered.
+ *
+ * @interface_id: The interface id
+ * @callback: The callback to be triggered when the interface is registered
+ * @user_param: A parameter that is passed back to the callback function
+ * when triggered.
+ *
+ * Return error status
+ *
+ * Producer may use this to register notification when desired
+ * interface registered in the system and query its information
+ * afterwards using mpq_adapter_get_stream_if.
+ * To remove the callback, this function should be called with NULL
+ * value in callback parameter.
+ */
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param);
+
+#endif /* _MPQ_ADAPTER_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
new file mode 100644
index 0000000..6550ddd
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DVB_DEBUG_H
+#define _MPQ_DVB_DEBUG_H
+
+/* Enable this line if you want to output debug printouts */
+#define MPG_DVB_DEBUG_ENABLE
+
+#undef MPQ_DVB_DBG_PRINT /* undef it, just in case */
+
+#ifdef MPG_DVB_DEBUG_ENABLE
+#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args)
+#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args)
+#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args)
+#else /* MPG_DVB_DEBUG_ENABLE */
+#define MPQ_DVB_ERR_PRINT(fmt, args...)
+#define MPQ_DVB_WARN_PRINT(fmt, args...)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...)
+#define MPQ_DVB_DBG_PRINT(fmt, args...)
+#endif /* MPG_DVB_DEBUG_ENABLE */
+
+
+/*
+ * The following can be used to disable specific printout
+ * by adding a letter to the end of MPQ_DVB_DBG_PRINT
+ */
+#undef MPQ_DVB_DBG_PRINTT
+#define MPQ_DVB_DBG_PRINTT(fmt, args...)
+
+#endif /* _MPQ_DVB_DEBUG_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
new file mode 100644
index 0000000..6240451
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
@@ -0,0 +1,494 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_STREAM_BUFFER_H
+#define _MPQ_STREAM_BUFFER_H
+
+#include "dvb_ringbuffer.h"
+
+/**
+ * DOC: MPQ Stream Buffer
+ *
+ * A stream buffer implementation is used to transfer data between two units
+ * such as demux and decoders. The implementation relies on dvb_ringbuffer
+ * implementation. Refer to dvb_ringbuffer.h for details.
+ *
+ * The implementation uses two dvb_ringbuffers, one to pass the
+ * raw-data (PES payload for example) and the other to pass
+ * meta-data (information from PES header for example).
+ *
+ * The meta-data uses dvb_ringbuffer packet interface. Each meta-data
+ * packet points to the data buffer, and includes the offset to the data in the
+ * buffer, the size of raw-data described by the meta-data packet, and also the
+ * size of user's own parameters if any required.
+ *
+ * Data can be managed in two ways: ring-buffer & linear buffers, as specified
+ * in initialization when calling the mpq_streambuffer_init function.
+ * For managing data as a ring buffer exactly 1 data buffer descriptor must be
+ * specified in initialization. For this mode, dvb_ringbuffer is used "as-is".
+ * For managing data in several linear buffers, an array of buffer descriptors
+ * must be passed.
+ * For both modes, data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object.
+ * Apart from initialization API remains the same for both modes.
+ *
+ * Contrary to dvb_ringbuffer implementation, this API makes sure there's
+ * enough data to read/write when making read/write operations.
+ * Users interested to flush/reset specific buffer, check for bytes
+ * ready or space available for write should use the respective services
+ * in dvb_ringbuffer (dvb_ringbuffer_avail, dvb_ringbuffer_free,
+ * dvb_ringbuffer_reset, dvb_ringbuffer_flush,
+ * dvb_ringbuffer_flush_spinlock_wakeup).
+ *
+ * Concurrency protection is handled in the same manner as in
+ * dvb_ringbuffer implementation.
+ *
+ * Typical call flow from producer:
+ *
+ * - Start writing the raw-data of new packet, the following call is
+ * repeated until end of data of the specific packet
+ *
+ * mpq_streambuffer_data_write(...)
+ *
+ * - Now write a new packet describing the new available raw-data
+ * mpq_streambuffer_pkt_write(...)
+ *
+ * For linear buffer mode, writing a new packet with data size > 0, causes the
+ * current buffer to be marked as pending for reading, and triggers moving to
+ * the next available buffer, that shall now be the current write buffer.
+ *
+ * Typical call flow from consumer:
+ *
+ * - Poll for next available packet:
+ * mpq_streambuffer_pkt_next(&streambuff,-1,&len)
+ *
+ * In different approach, consumer can wait on event for new data and then
+ * call mpq_streambuffer_pkt_next, waiting for data can be done as follows:
+ *
+ * wait_event_interruptible(
+ * streambuff->packet_data->queue,
+ * !dvb_ringbuffer_empty(&streambuff->packet_data) ||
+ * (streambuff->packet_data.error != 0);
+ *
+ * - Get the new packet information:
+ * mpq_streambuffer_pkt_read(..)
+ *
+ * - Read the raw-data of the new packet. Here you can use two methods:
+ *
+ * 1. Read the data to a user supplied buffer:
+ * mpq_streambuffer_data_read()
+ *
+ * In this case memory copy is done, read pointer is updated in the raw
+ * data buffer, the amount of raw-data is provided part of the
+ * packet's information. User should then call mpq_streambuffer_pkt_dispose
+ * with dispose_data set to 0 as the raw-data was already disposed.
+ * Note that secure buffer cannot be accessed directly and an error will
+ * occur.
+ *
+ * 2. Access the data directly using the raw-data address. The address
+ * of the raw data is provided part of the packet's information. User
+ * then should call mpq_streambuffer_pkt_dispose with dispose_data set
+ * to 1 to dispose the packet along with it's raw-data.
+ *
+ * - Disposal of packets:
+ * mpq_streambuffer_pkt_dispose(...)
+ *
+ * For linear buffer mode, disposing of a packet with data size > 0,
+ * regardless of the 'dispose_data' parameter, causes the current buffer's
+ * data to be disposed and marked as free for writing, and triggers moving to
+ * the next available buffer, that shall now be the current read buffer.
+ */
+
+struct mpq_streambuffer;
+struct mpq_streambuffer_packet_header;
+
+typedef void (*mpq_streambuffer_dispose_cb) (
+ struct mpq_streambuffer *sbuff,
+ u32 offset,
+ size_t len,
+ void *user_data);
+
+enum mpq_streambuffer_mode {
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING,
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR
+};
+
+/**
+ * struct mpq_streambuffer - mpq stream buffer representation
+ *
+ * @raw_data: The buffer used to hold raw-data, or linear buffer descriptors
+ * @packet_data: The buffer user to hold the meta-data
+ * @buffers: array of buffer descriptor(s) holding buffer initial & dynamic
+ * buffer information
+ * @mode: mpq_streambuffer buffer management work mode - Ring-buffer or Linear
+ * buffers
+ * @buffers_num: number of data buffers to manage
+ * @pending_buffers_count: for linear buffer management, counts the number of
+ * buffer that has been
+ */
+struct mpq_streambuffer {
+ struct dvb_ringbuffer raw_data;
+ struct dvb_ringbuffer packet_data;
+ struct mpq_streambuffer_buffer_desc *buffers;
+ enum mpq_streambuffer_mode mode;
+ u32 buffers_num;
+ u32 pending_buffers_count;
+ mpq_streambuffer_dispose_cb cb;
+ void *cb_user_data;
+};
+
+/**
+ * mpq_streambuffer_linear_desc
+ * @handle: ION handle's file descriptor of buffer
+ * @base: kernel mapped address to start of buffer.
+ * Can be NULL for secured buffers
+ * @size: size of buffer
+ * @read_ptr: initial read pointer value (should normally be 0)
+ * @write_ptr: initial write pointer value (should normally be 0)
+ */
+struct mpq_streambuffer_buffer_desc {
+ int handle;
+ void *base;
+ u32 size;
+ u32 read_ptr;
+ u32 write_ptr;
+};
+
+/**
+ * struct mpq_streambuffer_packet_header - packet header saved in packet buffer
+ * @user_data_len: length of private user (meta) data
+ * @raw_data_handle: ION handle's file descriptor of raw-data buffer
+ * @raw_data_offset: offset of raw-data from start of buffer (0 for linear)
+ * @raw_data_len: size of raw-data in the raw-data buffer (can be 0)
+ *
+ * The packet structure that is saved in each packet-buffer:
+ * user_data_len
+ * raw_data_handle
+ * raw_data_offset
+ * raw_data_len
+ * private user-data bytes
+ */
+struct mpq_streambuffer_packet_header {
+ u32 user_data_len;
+ int raw_data_handle;
+ u32 raw_data_offset;
+ u32 raw_data_len;
+} __packed;
+
+/**
+ * mpq_streambuffer_init - Initialize a new stream buffer
+ *
+ * @sbuff: The buffer to initialize
+ * @data_buffers: array of data buffer descriptor(s).
+ * Data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object
+ * @data_buff_num: number of data buffer in array
+ * @packet_buff: The buffer holding meta-data
+ * @packet_buff_size: Size of meta-data buffer
+ *
+ * Return Error status, -EINVAL if any of the arguments are invalid
+ *
+ * Note:
+ * for data_buff_num > 1, mpq_streambuffer object manages these buffers as a
+ * separated set of linear buffers. A linear buffer cannot wrap-around and one
+ * can only write as many data bytes as the buffer's size. Data will not be
+ * written to the next free buffer.
+ */
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size);
+
+/**
+ * mpq_streambuffer_terminate - Terminate stream buffer
+ *
+ * @sbuff: The buffer to terminate
+ *
+ * The function sets the the buffers error flags to ENODEV
+ * and wakeup any waiting threads on the buffer queues.
+ * Threads waiting on the buffer queues should check if
+ * error was set.
+ */
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_packet_next - Returns index of next available packet.
+ *
+ * @sbuff: The stream buffer
+ * @idx: Previous packet index or -1 to return index of the the first
+ * available packet.
+ * @pktlen: The length of the ready packet
+ *
+ * Return index to the packet-buffer, -1 if buffer is empty
+ *
+ * After getting the index, the user of this function can either
+ * access the packet buffer directly using the returned index
+ * or ask to read the data back from the buffer using mpq_ringbuffer_pkt_read
+ */
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen);
+
+/**
+ * mpq_streambuffer_pkt_read - Reads out the packet from the provided index.
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be read
+ * @packet: The read packet's header
+ * @user_data: The read private user data
+ *
+ * Return The actual number of bytes read, -EINVAL if the packet is
+ * already disposed or the packet-data is invalid.
+ *
+ * The packet is not disposed after this function is called, to dispose it
+ * along with the raw-data it points to use mpq_streambuffer_pkt_dispose.
+ * If there are no private user-data, the user-data pointer can be NULL.
+ * The caller of this function must make sure that the private user-data
+ * buffer has enough space for the private user-data length
+ */
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_pkt_dispose - Disposes a packet from the packet buffer
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be disposed
+ * @dispose_data: Indicates whether to update the read pointer inside the
+ * raw-data buffer for the respective data pointed by the packet.
+ *
+ * Return error status, -EINVAL if the packet-data is invalid
+ *
+ * The function updates the read pointer inside the raw-data buffer
+ * for the respective data pointed by the packet if dispose_data is set.
+ */
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data);
+
+/**
+ * mpq_streambuffer_pkt_write - Write a new packet to the packet buffer.
+ *
+ * @sbuff: The stream buffer
+ * @packet: The packet header to write
+ * @user_data: The private user-data to be written
+ *
+ * Return error status, -ENOSPC if there's no space to write the packet
+ */
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_data_write - Write data to raw-data buffer
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer holding the data to be written
+ * @len: The length of the data buffer
+ *
+ * Return The actual number of bytes written or -ENOSPC if
+ * no space to write the data
+ */
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_write_deposit - Advances the raw-buffer write pointer.
+ * Assumes the raw-data was written by the user directly
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data that was already written
+ *
+ * Return error status
+ */
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+
+/**
+ * mpq_streambuffer_data_read - Reads out raw-data to the provided buffer.
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer to read the raw-data data to
+ * @len: The length of the buffer that will hold the raw-data
+ *
+ * Return The actual number of bytes read or error code
+ *
+ * This function copies the data from the ring-buffer to the
+ * provided buf parameter. The user can save the extra copy by accessing
+ * the data pointer directly and reading from it, then update the
+ * read pointer by the amount of data that was read using
+ * mpq_streambuffer_data_read_dispose
+ */
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_user
+ *
+ * Same as mpq_streambuffer_data_read except data can be copied to user-space
+ * buffer.
+ */
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_dispose - Advances the raw-buffer read pointer.
+ * Assumes the raw-data was read by the user directly.
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data to be disposed
+ *
+ * Return error status, -EINVAL if buffer there's no enough data to
+ * be disposed
+ *
+ * The user can instead dispose a packet along with the data in the
+ * raw-data buffer using mpq_streambuffer_pkt_dispose.
+ */
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+/**
+ * mpq_streambuffer_get_buffer_handle - Returns the current linear buffer
+ * ION handle.
+ * @sbuff: The stream buffer
+ * @read_buffer: specifies if a read buffer handle is requested (when set),
+ * or a write buffer handle is requested.
+ * For linear buffer mode read & write buffers may be different
+ * buffers. For ring buffer mode, the same (single) buffer handle
+ * is returned.
+ * buffer handle
+ * @handle: returned handle
+ *
+ * Return error status
+ * -EINVAL is arguments are invalid.
+ * -EPERM if stream buffer specified was not initialized with linear support.
+ */
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle);
+
+/**
+ * mpq_streambuffer_data_free - Returns number of free bytes in data buffer.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of free bytes in the
+ * current write buffer only.
+ */
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_data_avail - Returns number of bytes in data buffer that
+ * can be read.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of data bytes in the
+ * current read buffer only.
+ */
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_register_pkt_dispose - Registers a callback to notify on
+ * packet disposal events.
+ * can be read.
+ * @sbuff: The stream buffer object
+ * @cb_func: user callback function
+ * @user_data: user data to be passed to callback function.
+ *
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data);
+
+/**
+ * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data
+ * buffer.
+ * @sbuff: The stream buffer object
+ * @read_offset: returned read offset
+ * @write_offset: returned write offset
+ *
+ * Note: read offset or write offset may be NULL if not required.
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset);
+
+/**
+ * mpq_streambuffer_metadata_free - returns number of free bytes in the meta
+ * data buffer, or error status.
+ * @sbuff: the stream buffer object
+ */
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_flush - flush both pending packets and data in buffer
+ *
+ * @sbuff: the stream buffer object
+ *
+ * Returns error status
+ */
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff);
+
+/*
+ * ------------------------------------------------------
+ * Consumer or AV Decoder Stream Interface to Ring Buffer
+ * ------------------------------------------------------
+ * Producer is Demux Driver
+ * ------------------------
+ *
+ * call from Audio/Video Decoder Driver to find Audio/Video
+ * streambuffer AV handles, "DMX_PES_AUDIO0 through 3" or
+ * DMX_PES_VIDEO0 through 3" interfaces corresponding to 4 programs.
+ */
+
+/* call from Audio/Video Decoder Driver via POLLING to consume
+ * Headers and Compressed data from ring buffer using streambuffer handle.
+ * hdrdata[] and cdata[] buffers have to be malloc'd by consumer
+ *
+ * --------------------------
+ * Consumer Calling Sequence
+ * --------------------------
+ * Find the streambuffer corresponding to a DMX TS PES stream instance.
+ * 1. consumer_audio_streambuffer() or consumer_video_streambuffer()
+ * Process the packet headers if required.
+ * 2. mpq_read_new_packet_hdr_data()
+ * Process the compressed data by forwarding to AV decoder.
+ * 3. mpq_read_new_packet_compressed_data()
+ * Dispose the packet.
+ * 4. mpq_dispose_new_packet_read()
+ *
+ * The Audio/Video drivers (or consumers) require the stream_buffer information
+ * for consuming packet headers and compressed AV data from the
+ * ring buffer filled by demux driver which is the producer
+ */
+
+#endif /* _MPQ_STREAM_BUFFER_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 15b8a2d..ae01baf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -283,6 +283,59 @@
return;
}
+/*
+ * sde_mdp_set_vbif_memtype - set memtype output for the given xin port
+ * @mdata: pointer to global rotator data
+ * @xin_id: xin identifier
+ * @memtype: memtype output configuration
+ * return: none
+ */
+static void sde_mdp_set_vbif_memtype(struct sde_rot_data_type *mdata,
+ u32 xin_id, u32 memtype)
+{
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register.
+ */
+ if (xin_id >= 8)
+ return;
+
+ reg_off = MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0;
+
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = SDE_VBIF_READ(mdata, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (memtype & 0x7) << bit_off;
+ SDE_VBIF_WRITE(mdata, reg_off, reg_val);
+}
+
+/*
+ * sde_mdp_init_vbif - initialize static vbif configuration
+ * return: 0 if success; error code otherwise
+ */
+int sde_mdp_init_vbif(void)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ int i;
+
+ if (!mdata)
+ return -EINVAL;
+
+ if (mdata->vbif_memtype_count && mdata->vbif_memtype) {
+ for (i = 0; i < mdata->vbif_memtype_count; i++)
+ sde_mdp_set_vbif_memtype(mdata, i,
+ mdata->vbif_memtype[i]);
+
+ SDEROT_DBG("amemtype=0x%x\n", SDE_VBIF_READ(mdata,
+ MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0));
+ }
+
+ return 0;
+}
+
struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
{
struct reg_bus_client *client;
@@ -398,6 +451,32 @@
return len;
}
+static void sde_mdp_parse_vbif_memtype(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ int rc;
+
+ mdata->vbif_memtype_count = sde_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-rot-vbif-memtype");
+ mdata->vbif_memtype = kzalloc(sizeof(u32) *
+ mdata->vbif_memtype_count, GFP_KERNEL);
+ if (!mdata->vbif_memtype) {
+ mdata->vbif_memtype_count = 0;
+ return;
+ }
+
+ rc = sde_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-rot-vbif-memtype", mdata->vbif_memtype,
+ mdata->vbif_memtype_count);
+ if (rc) {
+ SDEROT_DBG("vbif memtype not found\n");
+ kfree(mdata->vbif_memtype);
+ mdata->vbif_memtype = NULL;
+ mdata->vbif_memtype_count = 0;
+ return;
+ }
+}
+
static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
struct sde_rot_data_type *mdata)
{
@@ -409,14 +488,19 @@
"qcom,mdss-rot-vbif-qos-setting");
mdata->vbif_nrt_qos = kzalloc(sizeof(u32) *
mdata->npriority_lvl, GFP_KERNEL);
- if (!mdata->vbif_nrt_qos)
+ if (!mdata->vbif_nrt_qos) {
+ mdata->npriority_lvl = 0;
return;
+ }
rc = sde_mdp_parse_dt_handler(pdev,
"qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
mdata->npriority_lvl);
if (rc) {
SDEROT_DBG("vbif setting not found\n");
+ kfree(mdata->vbif_nrt_qos);
+ mdata->vbif_nrt_qos = NULL;
+ mdata->npriority_lvl = 0;
return;
}
}
@@ -579,6 +663,8 @@
sde_mdp_parse_vbif_qos(pdev, mdata);
+ sde_mdp_parse_vbif_memtype(pdev, mdata);
+
sde_mdp_parse_rot_lut_setting(pdev, mdata);
sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
@@ -588,6 +674,17 @@
return 0;
}
+static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ kfree(mdata->vbif_memtype);
+ mdata->vbif_memtype = NULL;
+ kfree(mdata->vbif_rt_qos);
+ mdata->vbif_rt_qos = NULL;
+ kfree(mdata->vbif_nrt_qos);
+ mdata->vbif_nrt_qos = NULL;
+}
+
#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_AMPSS_M0, \
@@ -742,6 +839,7 @@
sde_rot_res = NULL;
sde_mdp_bus_scale_unregister(mdata);
+ sde_mdp_destroy_dt_misc(pdev, mdata);
sde_rot_iounmap(&mdata->vbif_nrt_io);
sde_rot_iounmap(&mdata->sde_io);
devm_kfree(&pdev->dev, mdata);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 313c709..b1438d5 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -225,6 +225,9 @@
u32 *vbif_nrt_qos;
u32 npriority_lvl;
+ u32 vbif_memtype_count;
+ u32 *vbif_memtype;
+
int iommu_attached;
int iommu_ref_cnt;
@@ -271,6 +274,8 @@
void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
+int sde_mdp_init_vbif(void);
+
#define SDE_VBIF_WRITE(mdata, offset, value) \
(sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))
#define SDE_VBIF_READ(mdata, offset) \
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index a850bc0..5db8de7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -424,6 +424,9 @@
if (ret)
goto error_rot_sub;
+ /* reinitialize static vbif setting */
+ sde_mdp_init_vbif();
+
/* Active+Sleep */
msm_bus_scale_client_update_context(
mgr->data_bus.bus_hdl, false,
@@ -2583,6 +2586,7 @@
{
int ret = 0, i;
int usecases;
+ struct device_node *node;
mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
@@ -2594,12 +2598,27 @@
}
}
- mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
- usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
- for (i = 0; i < usecases; i++) {
- rot_reg_bus_usecases[i].num_paths = 1;
- rot_reg_bus_usecases[i].vectors =
- &rot_reg_bus_vectors[i];
+ node = of_get_child_by_name(dev->dev.of_node, "qcom,rot-reg-bus");
+ if (node) {
+ mgr->reg_bus.bus_scale_pdata
+ = msm_bus_pdata_from_node(dev, node);
+ if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
+ SDEROT_ERR("reg bus pdata parsing failed\n");
+ ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
+ if (!mgr->reg_bus.bus_scale_pdata)
+ ret = -EINVAL;
+ mgr->reg_bus.bus_scale_pdata = NULL;
+ }
+ } else {
+ SDEROT_DBG(
+ "no DT entries, configuring default reg bus table\n");
+ mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+ usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+ for (i = 0; i < usecases; i++) {
+ rot_reg_bus_usecases[i].num_paths = 1;
+ rot_reg_bus_usecases[i].vectors =
+ &rot_reg_bus_vectors[i];
+ }
}
return ret;
@@ -2958,7 +2977,7 @@
*pmgr = mgr;
ret = sde_rotator_footswitch_ctrl(mgr, true);
if (ret) {
- SDEROT_ERR("res_init failed %d\n", ret);
+ SDEROT_INFO("res_init failed %d, use probe defer\n", ret);
ret = -EPROBE_DEFER;
goto error_fs_en_fail;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 3e686e9..da36e38 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1136,6 +1136,9 @@
if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
return -EINVAL;
+ if (off % sizeof(u32))
+ return -EINVAL;
+
if (off > dbg->max_offset)
return -EINVAL;
@@ -1204,6 +1207,9 @@
if (cnt < 2)
return -EFAULT;
+ if (off % sizeof(u32))
+ return -EFAULT;
+
if (off >= dbg->max_offset)
return -EFAULT;
@@ -1252,6 +1258,9 @@
goto debug_read_error;
}
+ if (dbg->off % sizeof(u32))
+ return -EFAULT;
+
ptr = dbg->base + dbg->off;
tot = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index f2778b0..8727535 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -486,7 +486,7 @@
struct sde_rotator_vbinfo *vbinfo =
&ctx->vbinfo_cap[i];
- if (vbinfo->fence && vbinfo->fd < 0) {
+ if (vbinfo->fence) {
/* fence is not used */
SDEDEV_DBG(rot_dev->dev,
"put fence s:%d t:%d i:%d\n",
@@ -2158,7 +2158,7 @@
&& (buf->index < ctx->nbuf_cap)) {
int idx = buf->index;
- if (ctx->vbinfo_cap[idx].fence && ctx->vbinfo_cap[idx].fd < 0) {
+ if (ctx->vbinfo_cap[idx].fence) {
/* fence is not used */
SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n",
ctx->session_id, idx);
@@ -2487,6 +2487,7 @@
struct msm_sde_rotator_fence *fence = arg;
struct msm_sde_rotator_comp_ratio *comp_ratio = arg;
struct sde_rotator_vbinfo *vbinfo;
+ int ret;
switch (cmd) {
case VIDIOC_S_SDE_ROTATOR_FENCE:
@@ -2545,17 +2546,37 @@
vbinfo = &ctx->vbinfo_cap[fence->index];
- if (vbinfo->fence == NULL) {
- vbinfo->fd = -1;
- } else {
- vbinfo->fd =
- sde_rotator_get_sync_fence_fd(vbinfo->fence);
- if (vbinfo->fd < 0) {
+ if (!vbinfo)
+ return -EINVAL;
+
+ if (vbinfo->fence) {
+ ret = sde_rotator_get_sync_fence_fd(vbinfo->fence);
+ if (ret < 0) {
SDEDEV_ERR(rot_dev->dev,
- "fail get fence fd s:%d\n",
- ctx->session_id);
- return vbinfo->fd;
+ "fail get fence fd s:%d\n",
+ ctx->session_id);
+ return ret;
}
+
+ /**
+ * Loose any reference to sync fence once we pass
+ * it to user. Driver does not clean up user
+ * unclosed fence descriptors.
+ */
+ vbinfo->fence = NULL;
+
+ /**
+ * Cache fence descriptor in case user calls this
+ * ioctl multiple times. Cached value would be stale
+ * if user duplicated and closed old descriptor.
+ */
+ vbinfo->fd = ret;
+ } else if (!sde_rotator_get_fd_sync_fence(vbinfo->fd)) {
+ /**
+ * User has closed cached fence descriptor.
+ * Invalidate descriptor cache.
+ */
+ vbinfo->fd = -1;
}
fence->fd = vbinfo->fd;
@@ -3291,7 +3312,10 @@
ret = sde_rotator_core_init(&rot_dev->mgr, pdev);
if (ret < 0) {
- SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
+ if (ret == -EPROBE_DEFER)
+ SDEDEV_INFO(&pdev->dev, "probe defer for core init\n");
+ else
+ SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
goto error_rotator_core_init;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 573e0a8..27e9ba6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -783,10 +783,15 @@
if (!fmt_found) {
for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
- if (format == fmt->format)
+ if (format == fmt->format) {
+ fmt_found = true;
break;
+ }
}
}
+ /* If format not supported than return NULL */
+ if (!fmt_found)
+ fmt = NULL;
return fmt;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
index de448a4..5593919 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -65,6 +65,7 @@
#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2 0x00C8
#define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0 0x00D0
#define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define MMSS_VBIF_NRT_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 0x0550
#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 0x0590
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index aa6c522..205eeef 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -57,6 +57,9 @@
#define DEFAULT_MAXLINEWIDTH 4096
+/* stride alignment requirement for avoiding partial writes */
+#define PARTIAL_WRITE_ALIGNMENT 0x1F
+
/* Macro for constructing the REGDMA command */
#define SDE_REGDMA_WRITE(p, off, data) \
do { \
@@ -869,6 +872,8 @@
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+ (ctx->rot->highest_bank & 0x3) << 8);
SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
@@ -1270,7 +1275,7 @@
u32 *wrptr;
u32 pack = 0;
u32 dst_format = 0;
- u32 partial_write = 0;
+ u32 no_partial_writes = 0;
int i;
wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1355,12 +1360,34 @@
(cfg->h_downscale_factor << 16));
/* partial write check */
- if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
- !sde_mdp_is_ubwc_format(fmt))
- partial_write = BIT(10);
+ if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
+ no_partial_writes = BIT(10);
+
+ /*
+ * For simplicity, don't disable partial writes if
+ * the ROI does not span the entire width of the
+ * output image, and require the total stride to
+ * also be properly aligned.
+ *
+ * This avoids having to determine the memory access
+ * alignment of the actual horizontal ROI on a per
+ * color format basis.
+ */
+ if (sde_mdp_is_ubwc_format(fmt)) {
+ no_partial_writes = 0x0;
+ } else if (cfg->dst_rect->x ||
+ cfg->dst_rect->w != cfg->img_width) {
+ no_partial_writes = 0x0;
+ } else {
+ for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
+ if (cfg->dst_plane.ystride[i] &
+ PARTIAL_WRITE_ALIGNMENT)
+ no_partial_writes = 0x0;
+ }
+ }
/* write config setup for bank configuration */
- SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
+ SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
(ctx->rot->highest_bank & 0x3) << 8);
if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
@@ -1419,6 +1446,9 @@
if (bw > 0xFF)
bw = 0xFF;
+ else if (bw == 0)
+ bw = 1;
+
SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index e209192..9e47187 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -519,7 +519,8 @@
char name[MAX_CLIENT_NAME_LEN];
if (!mdata) {
- SDEROT_ERR("probe failed as mdata is not initialized\n");
+ SDEROT_INFO(
+ "probe failed as mdata is not initializedi, probe defer\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index d329a8b..a93f054 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -330,7 +330,9 @@
*/
/* Decoder parameters */
int width, height, lcu_size, dpb_bpp, opb_bpp, fps, opb_factor;
- bool unified_dpb_opb, dpb_compression_enabled, opb_compression_enabled;
+ bool unified_dpb_opb, dpb_compression_enabled, opb_compression_enabled,
+ llc_ref_read_l2_cache_enabled = false,
+ llc_vpss_ds_line_buf_enabled = false;
fp_t dpb_opb_scaling_ratio, dpb_read_compression_factor,
dpb_write_compression_factor, opb_compression_factor,
qsmmu_bw_overhead_factor, height_ratio;
@@ -342,7 +344,8 @@
fp_t bins_to_bit_factor, dpb_write_factor, ten_bpc_packing_factor,
ten_bpc_bpp_factor, vsp_read_factor, vsp_write_factor,
bw_for_1x_8bpc, dpb_bw_for_1x,
- motion_vector_complexity = 0, row_cache_penalty = 0, opb_bw = 0;
+ motion_vector_complexity = 0, row_cache_penalty = 0, opb_bw = 0,
+ dpb_total = 0;
/* Output parameters */
struct {
@@ -352,6 +355,10 @@
total;
} ddr = {0};
+ struct {
+ fp_t dpb_read, opb_read, total;
+ } llc = {0};
+
unsigned long ret = 0;
unsigned int integer_part, frac_part;
@@ -387,16 +394,31 @@
integer_part = d->compression_ratio >> 16;
frac_part =
- ((d->compression_ratio - (integer_part * 65536)) * 100) >> 16;
+ ((d->compression_ratio - (integer_part << 16)) * 100) >> 16;
dpb_read_compression_factor = FP(integer_part, frac_part, 100);
+ integer_part = d->complexity_factor >> 16;
+ frac_part =
+ ((d->complexity_factor - (integer_part << 16)) * 100) >> 16;
+
+ motion_vector_complexity = FP(integer_part, frac_part, 100);
+
dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE :
__compression_ratio(__lut(width, height, fps), opb_bpp);
+ dpb_write_compression_factor = d->use_dpb_read ?
+ dpb_read_compression_factor :
+ dpb_write_compression_factor;
+
opb_compression_factor = !opb_compression_enabled ? FP_ONE :
__compression_ratio(__lut(width, height, fps), opb_bpp);
+ llc_ref_read_l2_cache_enabled = llc_vpss_ds_line_buf_enabled = false;
+ if (d->use_sys_cache) {
+ llc_ref_read_l2_cache_enabled = true;
+ llc_vpss_ds_line_buf_enabled = true;
+ }
/* Derived parameters setup */
lcu_per_frame = DIV_ROUND_UP(width, lcu_size) *
@@ -437,8 +459,6 @@
lcu_per_frame * fps / bps(1));
ddr.line_buffer_write = ddr.line_buffer_read;
- motion_vector_complexity = FP_INT(4);
-
bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
@@ -454,6 +474,12 @@
ddr.dpb_write = fp_div(fp_mult(dpb_bw_for_1x, dpb_write_factor),
dpb_write_compression_factor);
+ dpb_total = ddr.dpb_read + ddr.dpb_write;
+ if (llc_ref_read_l2_cache_enabled) {
+ row_cache_penalty = FP(1, 30, 100);
+ ddr.dpb_read = fp_div(ddr.dpb_read, row_cache_penalty);
+ llc.dpb_read = dpb_total - ddr.dpb_read;
+ }
opb_factor = dpb_bpp == 8 ? 8 : 4;
@@ -465,6 +491,11 @@
FP(1, 50, 100)), dpb_opb_scaling_ratio),
opb_compression_factor);
+ if (llc_vpss_ds_line_buf_enabled) {
+ llc.opb_read = ddr.opb_read;
+ ddr.opb_write -= ddr.opb_read;
+ ddr.opb_read = 0;
+ }
ddr.total = ddr.vsp_read + ddr.vsp_write +
ddr.collocated_read + ddr.collocated_write +
ddr.opb_read + ddr.opb_write +
@@ -473,6 +504,7 @@
qsmmu_bw_overhead_factor = FP(1, 3, 100);
ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
+ llc.total = llc.dpb_read + llc.opb_read;
/* Dump all the variables for easier debugging */
if (debug) {
@@ -513,6 +545,8 @@
{"DERIVED PARAMETERS (2)", "", DUMP_HEADER_MAGIC},
{"MV complexity", DUMP_FP_FMT, motion_vector_complexity},
{"row cache penalty", DUMP_FP_FMT, row_cache_penalty},
+ {"qsmmu_bw_overhead_factor", DUMP_FP_FMT,
+ qsmmu_bw_overhead_factor},
{"OPB B/W (single instance)", DUMP_FP_FMT, opb_bw},
{"INTERMEDIATE DDR B/W", "", DUMP_HEADER_MAGIC},
@@ -528,6 +562,8 @@
{"OPB write", DUMP_FP_FMT, ddr.opb_write},
{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
+ {"LLC DPB read", DUMP_FP_FMT, llc.dpb_read},
+ {"LLC OPB read", DUMP_FP_FMT, llc.opb_read},
};
__dump(dump, ARRAY_SIZE(dump));
@@ -538,7 +574,7 @@
ret = kbps(fp_round(ddr.total));
break;
case GOVERNOR_LLCC:
- dprintk(VIDC_PROF, "LLCC Voting not supported yet\n");
+ ret = kbps(fp_round(llc.total));
break;
default:
dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
@@ -557,32 +593,25 @@
*/
/* Encoder Parameters */
- enum hal_video_codec standard;
- int width, height, fps;
- enum hal_uncompressed_format dpb_color_format;
- enum hal_uncompressed_format original_color_format;
+ int width, height, fps, dpb_bpp, lcu_per_frame, lcu_size,
+ vertical_tile_width, colocated_bytes_per_lcu, bitrate,
+ ref_overlap_bw_factor;
+ enum hal_uncompressed_format dpb_color_format, original_color_format;
bool dpb_compression_enabled, original_compression_enabled,
- two_stage_encoding, low_power, rotation, cropping_or_scaling;
+ work_mode_1, low_power, rotation, cropping_or_scaling,
+ b_frames_enabled = false,
+ llc_dual_core_ref_read_buf_enabled = false,
+ llc_top_line_buf_enabled = false,
+ llc_ref_chroma_cache_enabled = false;
fp_t dpb_compression_factor, original_compression_factor,
- qsmmu_bw_overhead_factor;
- bool b_frames_enabled;
-
- /* Derived Parameters */
- int lcu_size;
- enum gop {
- GOP_IBBP,
- GOP_IPPP,
- } gop;
- unsigned long bitrate;
- fp_t bins_to_bit_factor, chroma_luma_factor_dpb, one_frame_bw_dpb,
- chroma_luma_factor_original, one_frame_bw_original,
- line_buffer_size_per_lcu, line_buffer_size, line_buffer_bw,
- bw_increase_p, bw_increase_b;
- int collocated_mv_per_lcu, max_transaction_size,
- search_window_size_vertical_p, search_window_factor_p,
- search_window_factor_bw_p,
- search_window_size_vertical_b, search_window_factor_b,
- search_window_factor_bw_b;
+ input_compression_factor, qsmmu_bw_overhead_factor,
+ ref_y_bw_factor, ref_cb_cr_bw_factor, ten_bpc_bpp_factor,
+ bw_for_1x_8bpc, dpb_bw_for_1x, ref_cb_cr_read,
+ bins_to_bit_factor, ref_y_read, ten_bpc_packing_factor,
+ dpb_write_factor, ref_overlap_bw, llc_ref_y_read,
+ llc_ref_cb_cr_read;
+ fp_t integer_part, frac_part;
+ unsigned long ret = 0;
/* Output paramaters */
struct {
@@ -591,27 +620,49 @@
original_write, dpb_read, dpb_write, total;
} ddr = {0};
- unsigned long ret = 0;
- fp_t integer_part, frac_part;
+ struct {
+ fp_t dpb_read, line_buffer, total;
+ } llc = {0};
/* Encoder Parameters setup */
+ ten_bpc_packing_factor = FP(1, 67, 1000);
+ ten_bpc_bpp_factor = FP(1, 1, 4);
+ rotation = false;
+ cropping_or_scaling = false;
+ vertical_tile_width = 960;
+ ref_y_bw_factor = FP(1, 30, 100);
+ ref_cb_cr_bw_factor = FP(1, 50, 100);
+ dpb_write_factor = FP(1, 8, 100);
- standard = d->codec;
+
+ /* Derived Parameters */
+ lcu_size = d->lcu_size;
+ fps = d->fps;
+ b_frames_enabled = d->b_frames_enabled;
width = max(d->input_width, BASELINE_DIMENSIONS.width);
height = max(d->input_height, BASELINE_DIMENSIONS.height);
+ bitrate = __lut(width, height, fps)->bitrate;
+ lcu_per_frame = DIV_ROUND_UP(width, lcu_size) *
+ DIV_ROUND_UP(height, lcu_size);
dpb_color_format = HAL_COLOR_FORMAT_NV12_UBWC;
original_color_format = d->num_formats >= 1 ?
d->color_formats[0] : HAL_UNUSED_COLOR;
- fps = d->fps;
+ dpb_bpp = d->num_formats >= 1 ? __bpp(d->color_formats[0]) : INT_MAX;
dpb_compression_enabled = __ubwc(dpb_color_format);
original_compression_enabled = __ubwc(original_color_format);
- two_stage_encoding = false;
+ work_mode_1 = d->work_mode == VIDC_WORK_MODE_1;
low_power = d->power_mode == VIDC_POWER_LOW;
- b_frames_enabled = false;
+ bins_to_bit_factor = work_mode_1 ?
+ FP_INT(0) : FP_INT(4);
+
+ if (d->use_sys_cache) {
+ llc_dual_core_ref_read_buf_enabled = true;
+ llc_ref_chroma_cache_enabled = true;
+ }
/*
* Convert Q16 number into Integer and Fractional part upto 2 places.
@@ -628,96 +679,106 @@
dpb_compression_factor = FP(integer_part, frac_part, 100);
- original_compression_factor = dpb_compression_factor;
+ integer_part = d->input_cr >> 16;
+ frac_part =
+ ((d->input_cr - (integer_part * 65536)) * 100) >> 16;
- rotation = false;
- cropping_or_scaling = false;
+ input_compression_factor = FP(integer_part, frac_part, 100);
- /* Derived Parameters */
- lcu_size = 16;
- gop = b_frames_enabled ? GOP_IBBP : GOP_IPPP;
- bitrate = __lut(width, height, fps)->bitrate;
- bins_to_bit_factor = FP(1, 6, 10);
+ original_compression_factor =
+ original_compression_enabled ? d->use_dpb_read ?
+ dpb_compression_factor : input_compression_factor :
+ FP_ONE;
- /*
- * FIXME: Minor color format related hack: a lot of the derived params
- * depend on the YUV bitdepth as a variable. However, we don't have
- * appropriate enums defined yet (hence no support). As a result omit
- * a lot of the checks (which should look like the snippet below) in
- * favour of hardcoding.
- * dpb_color_format == YUV420 ? 0.5 :
- * dpb_color_format == YUV422 ? 1.0 : 2.0
- * Similar hacks are annotated inline in code with the string "CF hack"
- * for documentation purposes.
- */
- chroma_luma_factor_dpb = FP(0, 1, 2);
- one_frame_bw_dpb = fp_mult(FP_ONE + chroma_luma_factor_dpb,
- fp_div(FP_INT(width * height * fps),
- FP_INT(1000 * 1000)));
-
- chroma_luma_factor_original = FP(0, 1, 2); /* XXX: CF hack */
- one_frame_bw_original = fp_mult(FP_ONE + chroma_luma_factor_original,
- fp_div(FP_INT(width * height * fps),
- FP_INT(1000 * 1000)));
-
- line_buffer_size_per_lcu = FP_ZERO;
- if (lcu_size == 16)
- line_buffer_size_per_lcu = FP_INT(128) + fp_mult(FP_INT(256),
- FP_ONE /*XXX: CF hack */);
- else
- line_buffer_size_per_lcu = FP_INT(192) + fp_mult(FP_INT(512),
- FP_ONE /*XXX: CF hack */);
-
- line_buffer_size = fp_div(
- fp_mult(FP_INT(width / lcu_size),
- line_buffer_size_per_lcu),
- FP_INT(1024));
- line_buffer_bw = fp_mult(line_buffer_size,
- fp_div(FP_INT((height / lcu_size /
- (two_stage_encoding ? 2 : 1) - 1) * fps),
- FP_INT(1000)));
-
- collocated_mv_per_lcu = lcu_size == 16 ? 16 : 64;
- max_transaction_size = 256;
-
- search_window_size_vertical_p = low_power ? 32 :
- b_frames_enabled ? 80 :
- width > 2048 ? 64 : 48;
- search_window_factor_p = search_window_size_vertical_p * 2 / lcu_size;
- search_window_factor_bw_p = !two_stage_encoding ?
- search_window_size_vertical_p * 2 / lcu_size + 1 :
- (search_window_size_vertical_p * 2 / lcu_size + 2) / 2;
- bw_increase_p = fp_mult(one_frame_bw_dpb,
- FP_INT(search_window_factor_bw_p - 1) / 3);
-
- search_window_size_vertical_b = 48;
- search_window_factor_b = search_window_size_vertical_b * 2 / lcu_size;
- search_window_factor_bw_b = !two_stage_encoding ?
- search_window_size_vertical_b * 2 / lcu_size + 1 :
- (search_window_size_vertical_b * 2 / lcu_size + 2) / 2;
- bw_increase_b = fp_mult(one_frame_bw_dpb,
- FP_INT((search_window_factor_bw_b - 1) / 3));
-
- /* Output parameters for DDR */
ddr.vsp_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)),
bins_to_bit_factor);
ddr.vsp_write = ddr.vsp_read + fp_div(FP_INT(bitrate), FP_INT(8));
- ddr.collocated_read = fp_div(FP_INT(DIV_ROUND_UP(width, lcu_size) *
- DIV_ROUND_UP(height, lcu_size) *
- collocated_mv_per_lcu * fps), FP_INT(1000 * 1000));
+ colocated_bytes_per_lcu = lcu_size == 16 ? 16 :
+ lcu_size == 32 ? 64 : 256;
+
+ ddr.collocated_read = FP_INT(lcu_per_frame *
+ colocated_bytes_per_lcu * fps / bps(1));
+
ddr.collocated_write = ddr.collocated_read;
+ ddr.line_buffer_read = FP_INT(16 * lcu_per_frame * fps / bps(1));
+
ddr.line_buffer_write = ddr.line_buffer_read;
- ddr.original_read = fp_div(one_frame_bw_original,
- original_compression_factor);
+ llc.line_buffer = ddr.line_buffer_read + ddr.line_buffer_write;
+ if (llc_top_line_buf_enabled)
+ ddr.line_buffer_read = ddr.line_buffer_write = FP_INT(0);
+
+ llc.line_buffer -= (ddr.line_buffer_read + ddr.line_buffer_write);
+
+ bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
+
+ bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
+ fp_div(FP_INT(256 * 30), FP_INT(1000 * 1000)));
+
+ dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
+ fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
+ ten_bpc_bpp_factor));
+
+ ddr.original_read = fp_div(fp_mult(FP(1, 50, 100), dpb_bw_for_1x),
+ input_compression_factor);
+
ddr.original_write = FP_ZERO;
- ddr.dpb_read = FP_ZERO;
+ ref_y_bw_factor =
+ width == vertical_tile_width ? FP_INT(1) : ref_y_bw_factor;
- ddr.dpb_read = fp_div(ddr.dpb_read, dpb_compression_factor);
- ddr.dpb_write = fp_div(one_frame_bw_dpb, dpb_compression_factor);
+ ref_y_read = fp_mult(ref_y_bw_factor, dpb_bw_for_1x);
+
+ ref_y_read = fp_div(ref_y_read, dpb_compression_factor);
+
+ ref_y_read =
+ b_frames_enabled ? fp_mult(ref_y_read, FP_INT(2)) : ref_y_read;
+
+ llc_ref_y_read = ref_y_read;
+ if (llc_dual_core_ref_read_buf_enabled)
+ ref_y_read = fp_div(ref_y_read, FP_INT(2));
+
+ llc_ref_y_read -= ref_y_read;
+
+ ref_cb_cr_read = fp_mult(ref_cb_cr_bw_factor, dpb_bw_for_1x);
+
+ ref_cb_cr_read = fp_div(ref_cb_cr_read, dpb_compression_factor);
+
+ ref_cb_cr_read =
+ b_frames_enabled ? fp_mult(ref_cb_cr_read, FP_INT(2)) :
+ ref_cb_cr_read;
+
+ llc_ref_cb_cr_read = ref_cb_cr_read;
+
+ if (llc_ref_chroma_cache_enabled)
+ ref_cb_cr_read = fp_div(ref_cb_cr_read, ref_cb_cr_bw_factor);
+
+ if (llc_dual_core_ref_read_buf_enabled)
+ ref_cb_cr_read = fp_div(ref_cb_cr_read, FP_INT(2));
+
+ llc_ref_cb_cr_read -= ref_cb_cr_read;
+
+ ddr.dpb_write = fp_mult(dpb_write_factor, dpb_bw_for_1x);
+
+ ddr.dpb_write = fp_mult(ddr.dpb_write, FP(1, 50, 100));
+
+ ddr.dpb_write = fp_div(ddr.dpb_write, input_compression_factor);
+
+ ref_overlap_bw_factor =
+ width <= vertical_tile_width ? FP_INT(0) : FP_INT(1);
+
+ ref_overlap_bw = fp_mult(ddr.dpb_write, ref_overlap_bw_factor);
+
+ ref_overlap_bw = fp_div(ref_overlap_bw, dpb_write_factor);
+
+ ref_overlap_bw = fp_mult(ref_overlap_bw,
+ (dpb_write_factor - FP_INT(1)));
+
+ ddr.dpb_read = ref_y_read + ref_cb_cr_read + ref_overlap_bw;
+
+ llc.dpb_read = llc_ref_y_read + llc_ref_cb_cr_read;
ddr.total = ddr.vsp_read + ddr.vsp_write +
ddr.collocated_read + ddr.collocated_write +
@@ -725,13 +786,14 @@
ddr.original_read + ddr.original_write +
ddr.dpb_read + ddr.dpb_write;
+ llc.total = llc.dpb_read + llc.line_buffer;
+
qsmmu_bw_overhead_factor = FP(1, 3, 100);
ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor);
if (debug) {
struct dump dump[] = {
{"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC},
- {"standard", "%#x", standard},
{"width", "%d", width},
{"height", "%d", height},
{"DPB format", "%#x", dpb_color_format},
@@ -740,8 +802,8 @@
{"DPB compression enable", "%d", dpb_compression_enabled},
{"original compression enable", "%d",
original_compression_enabled},
- {"two stage encoding", "%d", two_stage_encoding},
{"low power mode", "%d", low_power},
+ {"Work Mode", "%d", work_mode_1},
{"DPB compression factor", DUMP_FP_FMT,
dpb_compression_factor},
{"original compression factor", DUMP_FP_FMT,
@@ -751,46 +813,27 @@
{"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC},
{"LCU size", "%d", lcu_size},
- {"GOB pattern", "%d", gop},
{"bitrate (Mbit/sec)", "%lu", bitrate},
{"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor},
- {"B-frames enabled", "%d", b_frames_enabled},
- {"search window size vertical (B)", "%d",
- search_window_size_vertical_b},
- {"search window factor (B)", "%d", search_window_factor_b},
- {"search window factor BW (B)", "%d",
- search_window_factor_bw_b},
- {"bw increase (MB/s) (B)", DUMP_FP_FMT, bw_increase_b},
- {"search window size vertical (P)", "%d",
- search_window_size_vertical_p},
- {"search window factor (P)", "%d", search_window_factor_p},
- {"search window factor BW (P)", "%d",
- search_window_factor_bw_p},
- {"bw increase (MB/s) (P)", DUMP_FP_FMT, bw_increase_p},
- {"chroma/luma factor DPB", DUMP_FP_FMT,
- chroma_luma_factor_dpb},
- {"one frame BW DPB (MB/s)", DUMP_FP_FMT, one_frame_bw_dpb},
- {"chroma/Luma factor original", DUMP_FP_FMT,
- chroma_luma_factor_original},
- {"one frame BW original (MB/s)", DUMP_FP_FMT,
- one_frame_bw_original},
- {"line buffer size per LCU", DUMP_FP_FMT,
- line_buffer_size_per_lcu},
- {"line buffer size (KB)", DUMP_FP_FMT, line_buffer_size},
- {"line buffer BW (MB/s)", DUMP_FP_FMT, line_buffer_bw},
- {"collocated MVs per LCU", "%d", collocated_mv_per_lcu},
+ {"qsmmu_bw_overhead_factor",
+ DUMP_FP_FMT, qsmmu_bw_overhead_factor},
{"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC},
+ {"ref_y_read", DUMP_FP_FMT, ref_y_read},
+ {"ref_cb_cr_read", DUMP_FP_FMT, ref_cb_cr_read},
+ {"ref_overlap_bw", DUMP_FP_FMT, ref_overlap_bw},
{"VSP read", DUMP_FP_FMT, ddr.vsp_read},
- {"VSP read", DUMP_FP_FMT, ddr.vsp_write},
+ {"VSP write", DUMP_FP_FMT, ddr.vsp_write},
{"collocated read", DUMP_FP_FMT, ddr.collocated_read},
- {"collocated read", DUMP_FP_FMT, ddr.collocated_write},
+ {"collocated write", DUMP_FP_FMT, ddr.collocated_write},
{"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read},
- {"line buffer read", DUMP_FP_FMT, ddr.line_buffer_write},
+ {"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write},
{"original read", DUMP_FP_FMT, ddr.original_read},
- {"original read", DUMP_FP_FMT, ddr.original_write},
+ {"original write", DUMP_FP_FMT, ddr.original_write},
{"DPB read", DUMP_FP_FMT, ddr.dpb_read},
{"DPB write", DUMP_FP_FMT, ddr.dpb_write},
+ {"LLC DPB read", DUMP_FP_FMT, llc.dpb_read},
+ {"LLC Line buffer", DUMP_FP_FMT, llc.line_buffer},
};
__dump(dump, ARRAY_SIZE(dump));
}
@@ -800,7 +843,7 @@
ret = kbps(fp_round(ddr.total));
break;
case GOVERNOR_LLCC:
- dprintk(VIDC_PROF, "LLCC Voting not supported yet\n");
+ ret = kbps(fp_round(llc.total));
break;
default:
dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__);
@@ -819,6 +862,11 @@
[HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder,
};
+ if (d->domain >= ARRAY_SIZE(calc)) {
+ dprintk(VIDC_ERR, "%s: invalid domain %d\n",
+ __func__, d->domain);
+ return 0;
+ }
return calc[d->domain](d, gm);
}
@@ -833,10 +881,6 @@
if (!dev || !freq)
return -EINVAL;
- /* Start with highest frequecy and decide correct one later*/
-
- ab_kbps = INT_MAX;
-
gov = container_of(dev->governor,
struct governor, devfreq_gov);
dev->profile->get_dev_status(dev->dev.parent, &stats);
@@ -847,11 +891,11 @@
for (c = 0; c < vidc_data->data_count; ++c) {
if (vidc_data->data->power_mode == VIDC_POWER_TURBO) {
+ ab_kbps = INT_MAX;
goto exit;
}
}
- ab_kbps = 0;
for (c = 0; c < vidc_data->data_count; ++c)
ab_kbps += __calculate(&vidc_data->data[c], gov->mode);
@@ -871,6 +915,7 @@
switch (event) {
case DEVFREQ_GOV_START:
case DEVFREQ_GOV_RESUME:
+ case DEVFREQ_GOV_SUSPEND:
mutex_lock(&devfreq->lock);
rc = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 40c306d..e49ea72 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -616,6 +616,9 @@
case HAL_EXTRADATA_VPX_COLORSPACE:
ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA;
break;
+ case HAL_EXTRADATA_UBWC_CR_STATS_INFO:
+ ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA;
+ break;
default:
dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
break;
@@ -1235,10 +1238,8 @@
pkt->rg_property_data[1] = HFI_RATE_CONTROL_CBR_VFR;
break;
case HAL_RATE_CONTROL_VBR_CFR:
- pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
- break;
case HAL_RATE_CONTROL_VBR_VFR:
- pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_CFR;
break;
case HAL_RATE_CONTROL_MBR_CFR:
pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_CFR;
@@ -1290,7 +1291,7 @@
struct hal_quantization *hal_quant =
(struct hal_quantization *) pdata;
pkt->rg_property_data[0] =
- HFI_PROPERTY_CONFIG_VENC_SESSION_QP;
+ HFI_PROPERTY_CONFIG_VENC_FRAME_QP;
hfi = (struct hfi_quantization *) &pkt->rg_property_data[1];
hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 |
hal_quant->qpb << 16;
@@ -1357,11 +1358,15 @@
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
hfi = (struct hfi_conceal_color *) &pkt->rg_property_data[1];
- if (hfi)
- hfi->conceal_color =
+ if (hfi) {
+ hfi->conceal_color_8bit =
((struct hfi_conceal_color *) pdata)->
- conceal_color;
- pkt->size += sizeof(u32) * 2;
+ conceal_color_8bit;
+ hfi->conceal_color_10bit =
+ ((struct hfi_conceal_color *) pdata)->
+ conceal_color_10bit;
+ }
+ pkt->size += sizeof(u32) + sizeof(struct hfi_conceal_color);
break;
}
case HAL_PARAM_VPE_ROTATION:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index c2a93a96..efe4ca3 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -337,26 +337,26 @@
cmd_done.device_id = device_id;
cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
cmd_done.status = hfi_map_err_status(pkt->event_data1);
+ info->response.cmd = cmd_done;
dprintk(VIDC_INFO, "Received: SESSION_ERROR with event id : %#x %#x\n",
pkt->event_data1, pkt->event_data2);
switch (pkt->event_data1) {
+ /* Ignore below errors */
case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
- case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
- case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
- cmd_done.status = VIDC_ERR_NONE;
dprintk(VIDC_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
info->response_type = HAL_RESPONSE_UNUSED;
- info->response.cmd = cmd_done;
- return 0;
+ break;
default:
+ /* All other errors are not expected and treated as sys error */
dprintk(VIDC_ERR,
- "HFI_EVENT_SESSION_ERROR: data1 %#x, data2 %#x\n",
- pkt->event_data1, pkt->event_data2);
- info->response_type = HAL_SESSION_ERROR;
- info->response.cmd = cmd_done;
- return 0;
+ "%s: data1 %#x, data2 %#x, treat as sys error\n",
+ __func__, pkt->event_data1, pkt->event_data2);
+ info->response_type = HAL_SYS_ERROR;
+ break;
}
+
+ return 0;
}
static int hfi_process_event_notify(u32 device_id,
@@ -775,6 +775,47 @@
return size;
}
+static int copy_profile_caps_to_sessions(struct hfi_profile_level *prof,
+ u32 profile_count, struct msm_vidc_capability *capabilities,
+ u32 num_sessions, u32 codecs, u32 domain)
+{
+ u32 i = 0, j = 0;
+ struct msm_vidc_capability *capability;
+ u32 sess_codec;
+ u32 sess_domain;
+
+ /*
+ * iterate over num_sessions and copy all the profile capabilities
+ * to matching sessions.
+ */
+ for (i = 0; i < num_sessions; i++) {
+ sess_codec = 0;
+ sess_domain = 0;
+ capability = &capabilities[i];
+
+ if (capability->codec)
+ sess_codec =
+ vidc_get_hfi_codec(capability->codec);
+ if (capability->domain)
+ sess_domain =
+ vidc_get_hfi_domain(capability->domain);
+
+ if (!(sess_codec & codecs && sess_domain & domain))
+ continue;
+
+ capability->profile_level.profile_count = profile_count;
+ for (j = 0; j < profile_count; j++) {
+ /* HFI and HAL follow same enums, hence no conversion */
+ capability->profile_level.profile_level[j].profile =
+ prof[j].profile;
+ capability->profile_level.profile_level[j].level =
+ prof[j].level;
+ }
+ }
+
+ return 0;
+}
+
static int copy_caps_to_sessions(struct hfi_capability_supported *cap,
u32 num_caps, struct msm_vidc_capability *capabilities,
u32 num_sessions, u32 codecs, u32 domain)
@@ -914,38 +955,25 @@
}
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
{
- struct msm_vidc_capability capability;
- char *ptr = NULL;
- u32 count = 0;
- u32 prof_count = 0;
- struct hfi_profile_level *prof_level;
struct hfi_profile_level_supported *prop =
(struct hfi_profile_level_supported *)
(data_ptr + next_offset);
- ptr = (char *) &prop->rg_profile_level[0];
- prof_count = prop->profile_count;
- next_offset += sizeof(u32);
+ next_offset += sizeof(u32) +
+ prop->profile_count *
+ sizeof(struct hfi_profile_level);
- if (prof_count > MAX_PROFILE_COUNT) {
- prof_count = MAX_PROFILE_COUNT;
+ if (prop->profile_count > MAX_PROFILE_COUNT) {
+ prop->profile_count = MAX_PROFILE_COUNT;
dprintk(VIDC_WARN,
"prop count exceeds max profile count\n");
break;
}
- while (prof_count) {
- prof_level = (struct hfi_profile_level *)ptr;
- capability.
- profile_level.profile_level[count].profile
- = prof_level->profile;
- capability.
- profile_level.profile_level[count].level
- = prof_level->level;
- prof_count--;
- count++;
- ptr += sizeof(struct hfi_profile_level);
- next_offset += sizeof(struct hfi_profile_level);
- }
+
+ copy_profile_caps_to_sessions(
+ &prop->rg_profile_level[0],
+ prop->profile_count, capabilities,
+ num_sessions, codecs, domain);
num_properties--;
break;
}
@@ -1140,6 +1168,12 @@
buffreq->buffer[10].buffer_type =
HAL_BUFFER_INTERNAL_PERSIST_1;
break;
+ case HFI_BUFFER_COMMON_INTERNAL_RECON:
+ memcpy(&buffreq->buffer[11], hfi_buf_req,
+ sizeof(struct hfi_buffer_requirements));
+ buffreq->buffer[11].buffer_type =
+ HAL_BUFFER_INTERNAL_RECON;
+ break;
default:
dprintk(VIDC_ERR,
"hal_process_sess_get_prop_buf_req: bad_buffer_type: %d\n",
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 8c63469..54766a2 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -347,6 +347,14 @@
}
INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler);
+
+ mutex_lock(&core->lock);
+ core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES,
+ sizeof(*core->vote_data), GFP_KERNEL);
+ if (!core->vote_data)
+ dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+ mutex_unlock(&core->lock);
+
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 554e89a..988f79c 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -24,7 +24,8 @@
#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
#define MIN_NUM_DEC_OUTPUT_BUFFERS 4
#define MIN_NUM_DEC_CAPTURE_BUFFERS 4
-#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
+// Y=16(0-9bits), Cb(10-19bits)=Cr(20-29bits)=128, black by default
+#define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8020010
#define MB_SIZE_IN_PIXEL (16 * 16)
#define OPERATING_FRAME_RATE_STEP (1 << 16)
@@ -152,7 +153,7 @@
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -179,7 +180,8 @@
(1 <<
V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY) |
- (1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -263,11 +265,20 @@
.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
},
{
- .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR,
- .name = "Picture concealed color",
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT,
+ .name = "Picture concealed color 8bit",
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0x0,
- .maximum = 0xffffff,
+ .maximum = 0xff3fcff,
+ .default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK,
+ .step = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT,
+ .name = "Picture concealed color 10bit",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0x0,
+ .maximum = 0x3fffffff,
.default_value = DEFAULT_VIDEO_CONCEAL_COLOR_BLACK,
.step = 1,
},
@@ -803,6 +814,10 @@
}
bufreq->buffer_count_min =
MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+ bufreq->buffer_count_min_host =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+ bufreq->buffer_count_actual =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
@@ -818,6 +833,10 @@
bufreq->buffer_count_min =
MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+ bufreq->buffer_count_min_host =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+ bufreq->buffer_count_actual =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
bufreq = get_buff_req_buffer(inst,
HAL_BUFFER_OUTPUT2);
@@ -830,6 +849,11 @@
bufreq->buffer_count_min =
MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+ bufreq->buffer_count_min_host =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+ bufreq->buffer_count_actual =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+
} else {
bufreq = get_buff_req_buffer(inst,
@@ -842,6 +866,10 @@
}
bufreq->buffer_count_min =
MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS;
+ bufreq->buffer_count_min_host =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
+ bufreq->buffer_count_actual =
+ MIN_NUM_THUMBNAIL_MODE_OUTPUT_BUFFERS;
}
@@ -877,6 +905,7 @@
case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+ case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO:
inst->bufq[CAPTURE_PORT].num_planes = 2;
inst->bufq[CAPTURE_PORT].plane_sizes[EXTRADATA_IDX(2)] =
VENUS_EXTRADATA_SIZE(
@@ -972,11 +1001,6 @@
break;
}
break;
- case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR:
- property_id = HAL_PARAM_VDEC_CONCEAL_COLOR;
- property_val = ctrl->val;
- pdata = &property_val;
- break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
property_id =
@@ -1037,9 +1061,6 @@
inst, inst->clk_data.operating_rate >> 16,
ctrl->val >> 16);
inst->clk_data.operating_rate = ctrl->val;
-
- msm_vidc_update_operating_rate(inst);
-
break;
default:
break;
@@ -1065,29 +1086,37 @@
int rc = 0, i = 0, fourcc = 0;
struct v4l2_ext_control *ext_control;
struct v4l2_control control;
+ struct hal_conceal_color conceal_color = {0};
+ struct hfi_device *hdev;
- if (!inst || !inst->core || !ctrl) {
+ if (!inst || !inst->core || !inst->core->device || !ctrl) {
dprintk(VIDC_ERR,
"%s invalid parameters\n", __func__);
return -EINVAL;
}
+ hdev = inst->core->device;
+
+ v4l2_try_ext_ctrls(&inst->ctrl_handler, ctrl);
+
ext_control = ctrl->controls;
- control.id =
- V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
for (i = 0; i < ctrl->count; i++) {
switch (ext_control[i].id) {
case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE:
control.value = ext_control[i].value;
-
+ control.id =
+ V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
rc = msm_comm_s_ctrl(inst, &control);
if (rc)
dprintk(VIDC_ERR,
"%s Failed setting stream output mode : %d\n",
__func__, rc);
+ rc = msm_vidc_update_host_buff_counts(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT:
+ control.id =
+ V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE;
switch (ext_control[i].value) {
case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
if (!msm_comm_g_ctrl_for_id(inst, control.id)) {
@@ -1115,7 +1144,15 @@
__func__, rc);
break;
}
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s Failed to get buffer requirements : %d\n",
+ __func__, rc);
+ break;
+ }
}
+ rc = msm_vidc_update_host_buff_counts(inst);
inst->clk_data.dpb_fourcc = fourcc;
break;
default:
@@ -1126,6 +1163,36 @@
break;
}
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT:
+ conceal_color.conceal_color_8bit = ext_control[i].value;
+ i++;
+ switch (ext_control[i].id) {
+ case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT:
+ conceal_color.conceal_color_10bit =
+ ext_control[i].value;
+ dprintk(VIDC_DBG,
+ "conceal color: 8bit=0x%x 10bit=0x%x",
+ conceal_color.conceal_color_8bit,
+ conceal_color.conceal_color_10bit);
+ rc = call_hfi_op(hdev, session_set_property,
+ inst->session,
+ HAL_PARAM_VDEC_CONCEAL_COLOR,
+ &conceal_color);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s Failed setting conceal color",
+ __func__);
+ }
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "%s Could not find CONCEAL_COLOR_10BIT ext_control",
+ __func__);
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ break;
default:
dprintk(VIDC_ERR
, "%s Unsupported set control %d",
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e2ea2bc..69070d5 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1553,23 +1553,25 @@
break;
}
- buff_req_buffer = get_buff_req_buffer(inst,
- HAL_BUFFER_EXTRADATA_INPUT);
-
extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+ if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+ buff_req_buffer = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_INPUT);
- inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
- buff_req_buffer ?
- buff_req_buffer->buffer_size : 0;
-
- buff_req_buffer = get_buff_req_buffer(inst,
- HAL_BUFFER_EXTRADATA_OUTPUT);
+ inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+ buff_req_buffer ?
+ buff_req_buffer->buffer_size : 0;
+ }
extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
- inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
- buff_req_buffer ?
- buff_req_buffer->buffer_size : 0;
+ if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+ buff_req_buffer = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_OUTPUT);
+ inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
+ buff_req_buffer ?
+ buff_req_buffer->buffer_size : 0;
+ }
property_id = 0;
}
break;
@@ -1597,7 +1599,7 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
property_id = HAL_CONFIG_VENC_USELTRFRAME;
- use_ltr.ref_ltr = ctrl->val;
+ use_ltr.ref_ltr = 0x1 << ctrl->val;
use_ltr.use_constraint = false;
use_ltr.frames = 0;
pdata = &use_ltr;
@@ -1748,8 +1750,6 @@
ctrl->val >> 16);
inst->clk_data.operating_rate = ctrl->val;
- msm_vidc_update_operating_rate(inst);
-
break;
case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE:
{
@@ -1897,6 +1897,23 @@
vui_timing_info.time_scale = NSEC_PER_SEC;
break;
}
+ case V4L2_CID_MPEG_VIDC_VIDEO_LTRMODE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT:
+ case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH:
+ case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_HEIGHT:
+ case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH:
+ case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT:
+ case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
+ case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
+ case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
+ case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
+ case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
+ case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
+ case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
+ dprintk(VIDC_DBG, "Set the control : %#x using ext ctrl\n",
+ ctrl->id);
+ break;
default:
dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
rc = -ENOTSUPP;
@@ -2345,6 +2362,7 @@
rc = -EINVAL;
goto exit;
}
+ inst->clk_data.opb_fourcc = f->fmt.pix_mp.pixelformat;
memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2ca3e8d..971e57a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -140,7 +140,8 @@
int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
{
struct msm_vidc_inst *inst = instance;
- int rc = 0;
+ struct hal_profile_level_supported *prof_level_supported;
+ int rc = 0, i = 0, profile_mask = 0, v4l2_prof_value = 0, max_level = 0;
if (!inst || !ctrl)
return -EINVAL;
@@ -178,6 +179,43 @@
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_bytes);
break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
+ case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+ {
+ prof_level_supported = &inst->capability.profile_level;
+ for (i = 0; i < prof_level_supported->profile_count; i++) {
+ v4l2_prof_value = msm_comm_hal_to_v4l2(ctrl->id,
+ prof_level_supported->profile_level[i].profile);
+ if (v4l2_prof_value == -EINVAL) {
+ dprintk(VIDC_WARN, "Invalid profile");
+ rc = -EINVAL;
+ }
+ profile_mask |= (1 << v4l2_prof_value);
+ }
+ ctrl->flags = profile_mask;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
+ case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+ {
+ prof_level_supported = &inst->capability.profile_level;
+ for (i = 0; i < prof_level_supported->profile_count; i++) {
+ if (max_level < prof_level_supported->
+ profile_level[i].level) {
+ max_level = prof_level_supported->
+ profile_level[i].level;
+ }
+ }
+ ctrl->maximum = msm_comm_hal_to_v4l2(ctrl->id, max_level);
+ if (ctrl->maximum == -EINVAL) {
+ dprintk(VIDC_WARN, "Invalid max level");
+ rc = -EINVAL;
+ }
+ break;
+ }
default:
rc = -EINVAL;
}
@@ -426,10 +464,16 @@
if (vb2->type != type || vb2->index != index)
continue;
+ if (mbuf->flags & MSM_VIDC_FLAG_RBR_PENDING) {
+ print_vidc_buffer(VIDC_DBG,
+ "skip rel buf (rbr pending)", inst, mbuf);
+ continue;
+ }
+
print_vidc_buffer(VIDC_DBG, "release buf", inst, mbuf);
msm_comm_unmap_vidc_buffer(inst, mbuf);
list_del(&mbuf->list);
- kfree(mbuf);
+ kref_put_mbuf(mbuf);
}
mutex_unlock(&inst->registeredbufs.lock);
@@ -442,6 +486,7 @@
struct msm_vidc_inst *inst = instance;
int rc = 0, i = 0;
struct buf_queue *q = NULL;
+ u32 cr = 0;
if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) {
dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n",
@@ -453,8 +498,16 @@
b->m.planes[i].m.fd = b->m.planes[i].reserved[0];
b->m.planes[i].data_offset = b->m.planes[i].reserved[1];
}
+
msm_comm_qbuf_cache_operations(inst, b);
+ /* Compression ratio is valid only for Encoder YUV buffers. */
+ if (inst->session_type == MSM_VIDC_ENCODER &&
+ b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ cr = b->m.planes[0].reserved[2];
+ msm_comm_update_input_cr(inst, b->index, cr);
+ }
+
q = msm_comm_get_vb2q(inst, b->type);
if (!q) {
dprintk(VIDC_ERR,
@@ -709,10 +762,9 @@
sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
bufreq->buffer_count_actual = *num_buffers;
- rc = set_buffer_count(inst, bufreq->buffer_count_actual,
- *num_buffers, HAL_BUFFER_INPUT);
+ rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+ bufreq->buffer_count_actual, HAL_BUFFER_INPUT);
}
-
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
buffer_type = msm_comm_get_hal_output_buffer(inst);
@@ -743,8 +795,8 @@
sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
bufreq->buffer_count_actual = *num_buffers;
- rc = set_buffer_count(inst, bufreq->buffer_count_actual,
- *num_buffers, buffer_type);
+ rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+ bufreq->buffer_count_actual, buffer_type);
}
break;
default:
@@ -837,13 +889,13 @@
b.buffer_type = HAL_BUFFER_OUTPUT;
}
+ rc = msm_comm_try_get_bufreqs(inst);
+
b.buffer_size = inst->bufq[CAPTURE_PORT].plane_sizes[0];
rc = call_hfi_op(hdev, session_set_property,
inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
&b);
- rc = msm_comm_try_get_bufreqs(inst);
-
/* Verify if buffer counts are correct */
rc = msm_vidc_verify_buffer_counts(inst);
if (rc) {
@@ -919,22 +971,8 @@
fail_start:
if (rc) {
- struct msm_vidc_buffer *temp, *next;
-
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry_safe(temp, next,
- &inst->registeredbufs.list, list) {
- struct vb2_buffer *vb;
-
- print_vidc_buffer(VIDC_ERR, "return buf", inst, temp);
- vb = msm_comm_get_vb_using_vidc_buffer(inst, temp);
- if (vb)
- vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
- msm_comm_unmap_vidc_buffer(inst, temp);
- list_del(&temp->list);
- kfree(temp);
- }
- mutex_unlock(&inst->registeredbufs.lock);
+ dprintk(VIDC_ERR, "%s: kill session %pK\n", __func__, inst);
+ msm_comm_kill_session(inst);
}
return rc;
}
@@ -987,6 +1025,35 @@
}
stream_start_failed:
+ if (rc) {
+ struct msm_vidc_buffer *temp, *next;
+ struct vb2_buffer *vb;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next, &inst->registeredbufs.list,
+ list) {
+ if (temp->vvb.vb2_buf.type != q->type)
+ continue;
+ /*
+ * queued_list lock is already acquired before
+ * vb2_stream so no need to acquire it again.
+ */
+ list_for_each_entry(vb, &q->queued_list, queued_entry) {
+ if (msm_comm_compare_vb2_planes(inst, temp,
+ vb)) {
+ print_vb2_buffer(VIDC_ERR, "return vb",
+ inst, vb);
+ vb2_buffer_done(vb,
+ VB2_BUF_STATE_QUEUED);
+ break;
+ }
+ }
+ msm_comm_unmap_vidc_buffer(inst, temp);
+ list_del(&temp->list);
+ kref_put_mbuf(temp);
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+ }
return rc;
}
@@ -1060,10 +1127,16 @@
inst, vb2);
return;
}
+ if (!kref_get_mbuf(inst, mbuf)) {
+ dprintk(VIDC_ERR, "%s: mbuf not found\n", __func__);
+ return;
+ }
rc = msm_comm_qbuf(inst, mbuf);
if (rc)
print_vidc_buffer(VIDC_ERR, "failed qbuf", inst, mbuf);
+
+ kref_put_mbuf(mbuf);
}
static const struct vb2_ops msm_vidc_vb2q_ops = {
@@ -1095,7 +1168,7 @@
q->mem_ops = &msm_vidc_vb2_mem_ops;
q->drv_priv = inst;
- q->allow_zero_bytesused = 1;
+ q->allow_zero_bytesused = !V4L2_TYPE_IS_OUTPUT(type);
q->copy_timestamp = 1;
return vb2_queue_init(q);
}
@@ -1223,29 +1296,6 @@
return rc;
}
-static int set_actual_buffer_count(struct msm_vidc_inst *inst,
- int count, enum hal_buffer type)
-{
- int rc = 0;
- struct hfi_device *hdev;
- struct hal_buffer_count_actual buf_count;
-
- hdev = inst->core->device;
-
- buf_count.buffer_type = type;
- buf_count.buffer_count_min_host = count;
- buf_count.buffer_count_actual = count;
- rc = call_hfi_op(hdev, session_set_property,
- inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL,
- &buf_count);
- if (rc)
- dprintk(VIDC_ERR,
- "Failed to set actual count %d for buffer type %d\n",
- count, type);
- return rc;
-}
-
-
static int msm_vidc_get_count(struct msm_vidc_inst *inst,
struct v4l2_ctrl *ctrl)
{
@@ -1270,13 +1320,20 @@
"Buffer count Host changed from %d to %d\n",
bufreq->buffer_count_min_host,
ctrl->val);
- bufreq->buffer_count_min_host = ctrl->val;
+ bufreq->buffer_count_actual =
+ bufreq->buffer_count_min =
+ bufreq->buffer_count_min_host =
+ ctrl->val;
} else {
ctrl->val = bufreq->buffer_count_min_host;
}
- rc = set_actual_buffer_count(inst,
- bufreq->buffer_count_min_host,
+ rc = set_buffer_count(inst,
+ bufreq->buffer_count_min_host,
+ bufreq->buffer_count_actual,
HAL_BUFFER_INPUT);
+
+ msm_vidc_update_host_buff_counts(inst);
+ ctrl->val = bufreq->buffer_count_min_host;
return rc;
} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
@@ -1297,31 +1354,37 @@
return 0;
}
-
- if (inst->in_reconfig) {
- ctrl->val = bufreq->buffer_count_min;
- }
if (inst->session_type == MSM_VIDC_DECODER &&
!inst->in_reconfig &&
inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
dprintk(VIDC_DBG,
"Clients updates Buffer count from %d to %d\n",
bufreq->buffer_count_min_host, ctrl->val);
- bufreq->buffer_count_min_host = ctrl->val;
+ bufreq->buffer_count_actual =
+ bufreq->buffer_count_min =
+ bufreq->buffer_count_min_host =
+ ctrl->val;
}
if (ctrl->val > bufreq->buffer_count_min_host) {
dprintk(VIDC_DBG,
"Buffer count Host changed from %d to %d\n",
bufreq->buffer_count_min_host,
ctrl->val);
- bufreq->buffer_count_min_host = ctrl->val;
+ bufreq->buffer_count_actual =
+ bufreq->buffer_count_min =
+ bufreq->buffer_count_min_host =
+ ctrl->val;
} else {
ctrl->val = bufreq->buffer_count_min_host;
}
- rc = set_actual_buffer_count(inst,
- bufreq->buffer_count_min_host,
+ rc = set_buffer_count(inst,
+ bufreq->buffer_count_min_host,
+ bufreq->buffer_count_actual,
HAL_BUFFER_OUTPUT);
+ msm_vidc_update_host_buff_counts(inst);
+ ctrl->val = bufreq->buffer_count_min_host;
+
return rc;
}
return -EINVAL;
@@ -1367,6 +1430,8 @@
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (inst->in_reconfig)
+ msm_vidc_update_host_buff_counts(inst);
buffer_type = msm_comm_get_hal_output_buffer(inst);
bufreq = get_buff_req_buffer(inst,
buffer_type);
@@ -1481,11 +1546,13 @@
INIT_MSM_VIDC_LIST(&inst->scratchbufs);
INIT_MSM_VIDC_LIST(&inst->freqs);
+ INIT_MSM_VIDC_LIST(&inst->input_crs);
INIT_MSM_VIDC_LIST(&inst->persistbufs);
INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
INIT_MSM_VIDC_LIST(&inst->outputbufs);
INIT_MSM_VIDC_LIST(&inst->registeredbufs);
INIT_MSM_VIDC_LIST(&inst->reconbufs);
+ INIT_MSM_VIDC_LIST(&inst->eosbufs);
kref_init(&inst->kref);
@@ -1590,6 +1657,9 @@
DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->freqs);
+ DEINIT_MSM_VIDC_LIST(&inst->input_crs);
kfree(inst);
inst = NULL;
@@ -1598,15 +1668,29 @@
}
EXPORT_SYMBOL(msm_vidc_open);
-static void cleanup_instance(struct msm_vidc_inst *inst)
+static void msm_vidc_cleanup_instance(struct msm_vidc_inst *inst)
{
+ struct msm_vidc_buffer *temp, *dummy;
+
if (!inst) {
dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
return;
}
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list,
+ list) {
+ print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp);
+ msm_comm_unmap_vidc_buffer(inst, temp);
+ list_del(&temp->list);
+ kref_put_mbuf(temp);
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+
msm_comm_free_freq_table(inst);
+ msm_comm_free_input_cr_table(inst);
+
if (msm_comm_release_scratch_buffers(inst, false))
dprintk(VIDC_ERR,
"Failed to release scratch buffers\n");
@@ -1625,6 +1709,8 @@
*/
msm_comm_validate_output_buffers(inst);
+ msm_comm_release_eos_buffers(inst);
+
if (msm_comm_release_output_buffers(inst, true))
dprintk(VIDC_ERR,
"Failed to release output buffers\n");
@@ -1658,18 +1744,21 @@
msm_comm_ctrl_deinit(inst);
- DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
- DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
- DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
- DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
- DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
-
v4l2_fh_del(&inst->event_handler);
v4l2_fh_exit(&inst->event_handler);
for (i = 0; i < MAX_PORT_NUM; i++)
vb2_queue_release(&inst->bufq[i].vb2_bufq);
+ DEINIT_MSM_VIDC_LIST(&inst->scratchbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->persistbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
+ DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
+ DEINIT_MSM_VIDC_LIST(&inst->freqs);
+ DEINIT_MSM_VIDC_LIST(&inst->input_crs);
+
mutex_destroy(&inst->sync_lock);
mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
mutex_destroy(&inst->bufq[OUTPUT_PORT].lock);
@@ -1692,7 +1781,6 @@
int msm_vidc_close(void *instance)
{
struct msm_vidc_inst *inst = instance;
- struct msm_vidc_buffer *temp, *dummy;
int rc = 0;
if (!inst || !inst->core) {
@@ -1704,30 +1792,20 @@
* Make sure that HW stop working on these buffers that
* we are going to free.
*/
- if (inst->state != MSM_VIDC_CORE_INVALID &&
- inst->core->state != VIDC_CORE_INVALID)
- rc = msm_comm_try_state(inst,
- MSM_VIDC_RELEASE_RESOURCES_DONE);
-
- mutex_lock(&inst->registeredbufs.lock);
- list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list,
- list) {
- print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp);
- msm_comm_unmap_vidc_buffer(inst, temp);
- list_del(&temp->list);
- kfree(temp);
- }
- mutex_unlock(&inst->registeredbufs.lock);
-
- cleanup_instance(inst);
- if (inst->state != MSM_VIDC_CORE_INVALID &&
- inst->core->state != VIDC_CORE_INVALID)
- rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
- else
- rc = msm_comm_force_cleanup(inst);
+ rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
if (rc)
dprintk(VIDC_ERR,
- "Failed to move video instance to uninit state\n");
+ "Failed to move inst %pK to rel resource done state\n",
+ inst);
+
+ msm_vidc_cleanup_instance(inst);
+
+ rc = msm_comm_try_state(inst, MSM_VIDC_CORE_UNINIT);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to move inst %pK to uninit state\n", inst);
+ rc = msm_comm_force_cleanup(inst);
+ }
msm_comm_session_clean(inst);
msm_smem_delete_client(inst->mem_client);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 5e366d0..51023f0 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,6 +16,12 @@
#include "msm_vidc_debug.h"
#include "msm_vidc_clocks.h"
+#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR (1 << 16)
+#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR (4 << 16)
+
+#define MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO (1 << 16)
+#define MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO (5 << 16)
+
static inline unsigned long int get_ubwc_compression_ratio(
struct ubwc_cr_stats_info_type ubwc_stats_info)
{
@@ -87,20 +93,58 @@
mutex_unlock(&inst->reconbufs.lock);
}
-static int fill_recon_stats(struct msm_vidc_inst *inst,
+static int fill_dynamic_stats(struct msm_vidc_inst *inst,
struct vidc_bus_vote_data *vote_data)
{
- struct recon_buf *binfo;
- u32 CR = 0, CF = 0;
+ struct recon_buf *binfo, *nextb;
+ struct vidc_input_cr_data *temp, *next;
+ u32 min_cf = 0, max_cf = 0;
+ u32 min_input_cr = 0, max_input_cr = 0, min_cr = 0, max_cr = 0;
mutex_lock(&inst->reconbufs.lock);
- list_for_each_entry(binfo, &inst->reconbufs.list, list) {
- CR = max(CR, binfo->CR);
- CF = max(CF, binfo->CF);
+ list_for_each_entry_safe(binfo, nextb, &inst->reconbufs.list, list) {
+ min_cr = min(min_cr, binfo->CR);
+ max_cr = max(max_cr, binfo->CR);
+ min_cf = min(min_cf, binfo->CF);
+ max_cf = max(max_cf, binfo->CF);
}
mutex_unlock(&inst->reconbufs.lock);
- vote_data->complexity_factor = CF;
- vote_data->compression_ratio = CR;
+
+ mutex_lock(&inst->input_crs.lock);
+ list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
+ min_input_cr = min(min_input_cr, temp->input_cr);
+ max_input_cr = max(max_input_cr, temp->input_cr);
+ }
+ mutex_unlock(&inst->input_crs.lock);
+
+ /* Sanitize CF values from HW . */
+ max_cf = min_t(u32, max_cf, MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR);
+ min_cf = max_t(u32, min_cf, MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR);
+ max_cr = min_t(u32, max_cr, MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO);
+ min_cr = max_t(u32, min_cr, MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO);
+ max_input_cr = min_t(u32,
+ max_input_cr, MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO);
+ min_input_cr = max_t(u32,
+ min_input_cr, MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO);
+
+ vote_data->compression_ratio = min_cr;
+ vote_data->complexity_factor = max_cf;
+ vote_data->input_cr = min_input_cr;
+ vote_data->use_dpb_read = false;
+
+ /* Check if driver can vote for lower bus BW */
+ if (inst->clk_data.load <= inst->clk_data.load_norm) {
+ vote_data->compression_ratio = max_cr;
+ vote_data->complexity_factor = min_cf;
+ vote_data->input_cr = max_input_cr;
+ vote_data->use_dpb_read = true;
+ }
+
+ dprintk(VIDC_PROF,
+ "Input CR = %d Recon CR = %d Complexity Factor = %d\n",
+ vote_data->input_cr, vote_data->compression_ratio,
+ vote_data->complexity_factor);
+
return 0;
}
@@ -118,17 +162,46 @@
hdev = core->device;
+ mutex_lock(&core->lock);
vote_data = core->vote_data;
if (!vote_data) {
dprintk(VIDC_PROF,
"Failed to get vote_data for inst %pK\n",
inst);
+ mutex_unlock(&core->lock);
return -EINVAL;
}
- mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
int codec = 0;
+ struct msm_vidc_buffer *temp, *next;
+ u32 filled_len = 0;
+ u32 device_addr = 0;
+
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s Invalid args\n",
+ __func__);
+ mutex_unlock(&core->lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next,
+ &inst->registeredbufs.list, list) {
+ if (temp->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ temp->flags & MSM_VIDC_FLAG_DEFERRED) {
+ filled_len = max(filled_len,
+ temp->vvb.vb2_buf.planes[0].bytesused);
+ device_addr = temp->smem[0].device_addr;
+ }
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ if (!filled_len || !device_addr) {
+ dprintk(VIDC_DBG, "%s No ETBs\n", __func__);
+ continue;
+ }
++vote_data_count;
@@ -136,6 +209,8 @@
inst->fmts[OUTPUT_PORT].fourcc :
inst->fmts[CAPTURE_PORT].fourcc;
+ memset(&(vote_data[i]), 0x0, sizeof(struct vidc_bus_vote_data));
+
vote_data[i].domain = get_hal_domain(inst->session_type);
vote_data[i].codec = get_hal_codec(codec);
vote_data[i].input_width = max(inst->prop.width[OUTPUT_PORT],
@@ -148,6 +223,9 @@
max(inst->prop.height[CAPTURE_PORT],
inst->prop.height[OUTPUT_PORT]);
vote_data[i].lcu_size = codec == V4L2_PIX_FMT_HEVC ? 32 : 16;
+ vote_data[i].b_frames_enabled =
+ msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES) != 0;
if (inst->clk_data.operating_rate)
vote_data[i].fps =
@@ -177,7 +255,11 @@
vote_data[i].num_formats = 2;
}
vote_data[i].work_mode = inst->clk_data.work_mode;
- fill_recon_stats(inst, &vote_data[i]);
+ fill_dynamic_stats(inst, &vote_data[i]);
+
+ if (core->resources.sys_cache_res_set)
+ vote_data[i].use_sys_cache = true;
+
i++;
}
mutex_unlock(&core->lock);
@@ -213,6 +295,7 @@
int rc = 0;
int fw_pending_bufs = 0;
int total_output_buf = 0;
+ int min_output_buf = 0;
int buffers_outside_fw = 0;
struct msm_vidc_core *core;
struct hal_buffer_requirements *output_buf_req;
@@ -247,19 +330,40 @@
/* Total number of output buffers */
total_output_buf = output_buf_req->buffer_count_actual;
+ min_output_buf = output_buf_req->buffer_count_min;
+
/* Buffers outside FW are with display */
buffers_outside_fw = total_output_buf - fw_pending_bufs;
- dprintk(VIDC_DBG,
- "Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
- total_output_buf, fw_pending_bufs, buffers_outside_fw);
+ dprintk(VIDC_PROF,
+ "Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+ total_output_buf, min_output_buf, fw_pending_bufs,
+ buffers_outside_fw);
- if (buffers_outside_fw >= dcvs->min_threshold &&
- dcvs->load > dcvs->load_low) {
- dcvs->load = dcvs->load_low;
- } else if (buffers_outside_fw < dcvs->min_threshold &&
- dcvs->load == dcvs->load_low) {
+ /*
+ * PMS decides clock level based on below algo
+
+ * Limits :
+ * max_threshold : Client extra allocated buffers. Client
+ * reserves these buffers for it's smooth flow.
+ * min_output_buf : HW requested buffers for it's smooth
+ * flow of buffers.
+ * min_threshold : Driver requested extra buffers for PMS.
+
+ * 1) When buffers outside FW are reaching client's extra buffers,
+ * FW is slow and will impact pipeline, Increase clock.
+ * 2) When pending buffers with FW are same as FW requested,
+ * pipeline has cushion to absorb FW slowness, Decrease clocks.
+ * 3) When none of 1) or 2) FW is just fast enough to maintain
+ * pipeline, request Right Clocks.
+ */
+
+ if (buffers_outside_fw <= dcvs->max_threshold)
dcvs->load = dcvs->load_high;
- }
+ else if (fw_pending_bufs <= min_output_buf)
+ dcvs->load = dcvs->load_low;
+ else
+ dcvs->load = dcvs->load_norm;
+
return rc;
}
@@ -280,15 +384,18 @@
if (!found) {
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp) {
+ dprintk(VIDC_WARN, "%s: malloc failure.\n", __func__);
+ goto exit;
+ }
temp->freq = freq;
temp->device_addr = device_addr;
list_add_tail(&temp->list, &inst->freqs.list);
}
+exit:
mutex_unlock(&inst->freqs.lock);
}
-// TODO this needs to be removed later and use queued_list
-
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
u32 device_addr)
{
@@ -318,9 +425,8 @@
/* If current requirement is within DCVS limits, try DCVS. */
- if (freq < inst->clk_data.load_high) {
+ if (freq < inst->clk_data.load_norm) {
dprintk(VIDC_DBG, "Calling DCVS now\n");
- // TODO calling DCVS here may reduce the residency. Re-visit.
msm_dcvs_scale_clocks(inst);
freq = inst->clk_data.load;
}
@@ -342,6 +448,60 @@
mutex_unlock(&inst->freqs.lock);
}
+void msm_comm_free_input_cr_table(struct msm_vidc_inst *inst)
+{
+ struct vidc_input_cr_data *temp, *next;
+
+ mutex_lock(&inst->input_crs.lock);
+ list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+ INIT_LIST_HEAD(&inst->input_crs.list);
+ mutex_unlock(&inst->input_crs.lock);
+}
+
+void msm_comm_update_input_cr(struct msm_vidc_inst *inst,
+ u32 index, u32 cr)
+{
+ struct vidc_input_cr_data *temp, *next;
+ bool found = false;
+
+ mutex_lock(&inst->input_crs.lock);
+ list_for_each_entry_safe(temp, next, &inst->input_crs.list, list) {
+ if (temp->index == index) {
+ temp->input_cr = cr;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp) {
+ dprintk(VIDC_WARN, "%s: malloc failure.\n", __func__);
+ goto exit;
+ }
+ temp->index = index;
+ temp->input_cr = cr;
+ list_add_tail(&temp->list, &inst->input_crs.list);
+ }
+exit:
+ mutex_unlock(&inst->input_crs.lock);
+}
+
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
+{
+ struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+ unsigned long freq = 0;
+
+ allowed_clks_tbl = core->resources.allowed_clks_tbl;
+ freq = allowed_clks_tbl[0].clock_rate;
+ dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
+
+ return freq;
+}
+
static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
u32 filled_len)
{
@@ -373,17 +533,17 @@
vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles;
/* 10 / 7 is overhead factor */
- vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
+ vsp_cycles += ((inst->prop.fps * filled_len * 8) / 7) * 10;
} else {
- // TODO return Min or Max ?
dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__);
- return freq;
+ return msm_vidc_max_freq(inst->core);
}
freq = max(vpp_cycles, vsp_cycles);
- dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq);
+ dprintk(VIDC_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n",
+ __func__, inst, filled_len, freq);
return freq;
}
@@ -425,18 +585,6 @@
return rc;
}
-static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
-{
- struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
- unsigned long freq = 0;
-
- allowed_clks_tbl = core->resources.allowed_clks_tbl;
- freq = allowed_clks_tbl[0].clock_rate;
- dprintk(VIDC_PROF, "Max rate = %lu", freq);
-
- return freq;
-}
-
int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst)
{
struct v4l2_ctrl *ctrl = NULL;
@@ -528,7 +676,7 @@
list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
if (temp->vvb.vb2_buf.type ==
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
- temp->deferred) {
+ temp->flags & MSM_VIDC_FLAG_DEFERRED) {
filled_len = max(filled_len,
temp->vvb.vb2_buf.planes[0].bytesused);
device_addr = temp->smem[0].device_addr;
@@ -537,8 +685,8 @@
mutex_unlock(&inst->registeredbufs.lock);
if (!filled_len || !device_addr) {
- dprintk(VIDC_PROF, "No Change in frequency\n");
- goto decision_done;
+ dprintk(VIDC_DBG, "%s No ETBs\n", __func__);
+ goto no_clock_change;
}
freq = msm_vidc_calc_freq(inst, filled_len);
@@ -555,8 +703,9 @@
else
inst->clk_data.curr_freq = freq;
-decision_done:
msm_vidc_set_clocks(inst->core);
+
+no_clock_change:
return 0;
}
@@ -603,7 +752,6 @@
}
inst->clk_data.dcvs_mode = true;
- // TODO : Update with proper number based on on-target tuning.
inst->clk_data.extra_capture_buffer_count =
DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
inst->clk_data.extra_output_buffer_count =
@@ -641,12 +789,13 @@
static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
{
- dprintk(VIDC_DBG,
- "DCVS: Load_Low %d, Load High %d\n",
+ dprintk(VIDC_PROF,
+ "DCVS: Load_Low %d, Load Norm %d, Load High %d\n",
dcvs->load_low,
+ dcvs->load_norm,
dcvs->load_high);
- dprintk(VIDC_DBG,
+ dprintk(VIDC_PROF,
"DCVS: min_threshold %d, max_threshold %d\n",
dcvs->min_threshold, dcvs->max_threshold);
}
@@ -659,6 +808,7 @@
u64 total_freq = 0, rate = 0, load;
int cycles;
struct clock_data *dcvs;
+ struct hal_buffer_requirements *output_buf_req;
dprintk(VIDC_DBG, "Init DCVS Load\n");
@@ -679,12 +829,22 @@
cycles;
dcvs->buffer_type = HAL_BUFFER_INPUT;
- // TODO : Update with proper no based on Buffer counts change.
- dcvs->min_threshold = 7;
+ dcvs->min_threshold =
+ msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
} else if (inst->session_type == MSM_VIDC_DECODER) {
dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst);
- // TODO : Update with proper no based on Buffer counts change.
- dcvs->min_threshold = 4;
+ output_buf_req = get_buff_req_buffer(inst,
+ dcvs->buffer_type);
+ if (!output_buf_req) {
+ dprintk(VIDC_ERR,
+ "%s: No bufer req for buffer type %x\n",
+ __func__, dcvs->buffer_type);
+ return;
+ }
+ dcvs->max_threshold = output_buf_req->buffer_count_actual -
+ output_buf_req->buffer_count_min_host + 1;
+ dcvs->min_threshold =
+ msm_vidc_get_extra_buff_count(inst, dcvs->buffer_type);
} else {
return;
}
@@ -697,15 +857,17 @@
break;
}
- dcvs->load = dcvs->load_high = rate;
- dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+ dcvs->load = dcvs->load_norm = rate;
+
+ dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ?
+ allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm;
+ dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate :
+ dcvs->load_norm;
inst->clk_data.buffer_counter = 0;
msm_dcvs_print_dcvs_stats(dcvs);
- msm_vidc_update_operating_rate(inst);
-
rc = msm_comm_scale_clocks_and_bus(inst);
if (rc)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index e1226e4..707f034 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -15,21 +15,11 @@
#define _MSM_VIDC_CLOCKS_H_
#include "msm_vidc_internal.h"
-/* Low threshold for encoder dcvs */
-#define DCVS_ENC_LOW_THR 4
-/* High threshold for encoder dcvs */
-#define DCVS_ENC_HIGH_THR 9
/* extra o/p buffers in case of encoder dcvs */
#define DCVS_ENC_EXTRA_OUTPUT_BUFFERS 2
+
/* extra o/p buffers in case of decoder dcvs */
#define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4
-/* Default threshold to reduce the core frequency */
-#define DCVS_NOMINAL_THRESHOLD 8
-/* Default threshold to increase the core frequency */
-#define DCVS_TURBO_THRESHOLD 4
-
-/* Considering one safeguard buffer */
-#define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
void msm_clock_data_reset(struct msm_vidc_inst *inst);
int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst);
@@ -43,6 +33,9 @@
int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
u32 device_addr);
+void msm_comm_free_input_cr_table(struct msm_vidc_inst *inst);
+void msm_comm_update_input_cr(struct msm_vidc_inst *inst, u32 index,
+ u32 cr);
void update_recon_stats(struct msm_vidc_inst *inst,
struct recon_stats_type *recon_stats);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ac69ab8..e72c099 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -35,9 +35,6 @@
V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
#define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-#define MAX_SUPPORTED_INSTANCES 16
-static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
-
const char *const mpeg_video_vidc_extradata[] = {
"Extradata none",
"Extradata MB Quantization",
@@ -70,6 +67,7 @@
"Extradata PQ Info",
"Extradata display VUI",
"Extradata vpx color space",
+ "Extradata UBWC CR stats info",
};
struct getprop_buf {
@@ -81,7 +79,6 @@
static void msm_comm_generate_sys_error(struct msm_vidc_inst *inst);
static void handle_session_error(enum hal_command_response cmd, void *data);
static void msm_vidc_print_running_insts(struct msm_vidc_core *core);
-static void msm_comm_print_debug_info(struct msm_vidc_inst *inst);
bool msm_comm_turbo_session(struct msm_vidc_inst *inst)
{
@@ -131,8 +128,10 @@
struct v4l2_ctrl **cluster = kmalloc(sizeof(struct v4l2_ctrl *) *
num_ctrls, GFP_KERNEL);
- if (!cluster || !inst)
+ if (!cluster || !inst) {
+ kfree(cluster);
return NULL;
+ }
for (c = 0; c < num_ctrls; c++)
cluster[c] = inst->ctrls[c];
@@ -955,8 +954,8 @@
/* This should come from sys_init_done */
core->resources.max_inst_count =
sys_init_msg->max_sessions_supported ?
- sys_init_msg->max_sessions_supported :
- MAX_SUPPORTED_INSTANCES;
+ min_t(u32, sys_init_msg->max_sessions_supported,
+ MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
core->resources.max_secure_inst_count =
core->resources.max_secure_inst_count ?
@@ -977,24 +976,19 @@
__func__, core->codec_count, core->enc_codec_supported,
core->dec_codec_supported);
- core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES,
- sizeof(core->vote_data), GFP_KERNEL);
- if (!core->vote_data)
- dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-
complete(&(core->completions[index]));
}
+static void put_inst_helper(struct kref *kref)
+{
+ struct msm_vidc_inst *inst = container_of(kref,
+ struct msm_vidc_inst, kref);
+
+ msm_vidc_destroy(inst);
+}
+
static void put_inst(struct msm_vidc_inst *inst)
{
- void put_inst_helper(struct kref *kref)
- {
- struct msm_vidc_inst *inst = container_of(kref,
- struct msm_vidc_inst, kref);
-
- msm_vidc_destroy(inst);
- }
-
if (!inst)
return;
@@ -1173,11 +1167,6 @@
if (!rc) {
dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
SESSION_MSG_INDEX(cmd));
- call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
- dprintk(VIDC_ERR,
- "sess resp timeout can potentially crash the system\n");
- msm_comm_print_debug_info(inst);
- msm_vidc_handle_hw_error(inst->core);
msm_comm_kill_session(inst);
rc = -EIO;
} else {
@@ -1246,7 +1235,7 @@
if (ctrl) {
v4l2_ctrl_modify_range(ctrl, capability->min,
capability->max, ctrl->step,
- capability->min);
+ ctrl->default_value);
dprintk(VIDC_DBG,
"%s: Updated Range = %lld --> %lld Def value = %lld\n",
ctrl->name, ctrl->minimum, ctrl->maximum,
@@ -1437,6 +1426,15 @@
print_cap("max_work_modes", &inst->capability.max_work_modes);
print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats);
+ dprintk(VIDC_DBG, "profile count : %u",
+ inst->capability.profile_level.profile_count);
+ for (i = 0; i < inst->capability.profile_level.profile_count; i++) {
+ dprintk(VIDC_DBG, "profile : %u ", inst->capability.
+ profile_level.profile_level[i].profile);
+ dprintk(VIDC_DBG, "level : %u ", inst->capability.
+ profile_level.profile_level[i].level);
+ }
+
signal_session_msg_receipt(cmd, inst);
/*
@@ -1495,6 +1493,7 @@
break;
case HAL_EVENT_RELEASE_BUFFER_REFERENCE:
{
+ struct msm_vidc_buffer *mbuf;
u32 planes[VIDEO_MAX_PLANES] = {0};
dprintk(VIDC_DBG,
@@ -1504,8 +1503,15 @@
planes[0] = event_notify->packet_buffer;
planes[1] = event_notify->extra_data_buffer;
- handle_release_buffer_reference(inst, planes);
-
+ mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
+ dprintk(VIDC_ERR,
+ "%s: data_addr %x, extradata_addr %x not found\n",
+ __func__, planes[0], planes[1]);
+ } else {
+ handle_release_buffer_reference(inst, mbuf);
+ kref_put_mbuf(mbuf);
+ }
goto err_bad_event;
}
default:
@@ -1604,10 +1610,8 @@
return;
}
bufreq->buffer_count_min = event_notify->capture_buf_count;
-
}
- msm_vidc_update_host_buff_counts(inst);
mutex_unlock(&inst->lock);
if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) {
@@ -1785,6 +1789,12 @@
return;
}
mutex_lock(&inst->outputbufs.lock);
+ if (list_empty(&inst->outputbufs.list)) {
+ dprintk(VIDC_DBG, "%s: no OUTPUT buffers allocated\n",
+ __func__);
+ mutex_unlock(&inst->outputbufs.lock);
+ return;
+ }
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
if (binfo->buffer_ownership != DRIVER) {
dprintk(VIDC_DBG,
@@ -1945,7 +1955,6 @@
hdev = inst->core->device;
dprintk(VIDC_WARN, "Session error received for session %pK\n", inst);
- change_inst_state(inst, MSM_VIDC_CORE_INVALID);
if (response->status == VIDC_ERR_MAX_CLIENTS) {
dprintk(VIDC_WARN, "Too many clients, rejecting %pK", inst);
@@ -1983,7 +1992,6 @@
dprintk(VIDC_WARN, "%s: Core %pK\n", __func__, core);
mutex_lock(&core->lock);
- core->state = VIDC_CORE_INVALID;
list_for_each_entry(inst, &core->instances, list) {
mutex_lock(&inst->lock);
@@ -2020,52 +2028,37 @@
}
mutex_lock(&core->lock);
- if (core->state == VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_UNINIT) {
+ if (core->state == VIDC_CORE_UNINIT) {
dprintk(VIDC_ERR,
- "%s: Core already moved to state %d\n",
- __func__, core->state);
+ "%s: Core %pK already moved to state %d\n",
+ __func__, core, core->state);
mutex_unlock(&core->lock);
return;
}
- mutex_unlock(&core->lock);
- dprintk(VIDC_WARN, "SYS_ERROR %d received for core %pK\n", cmd, core);
- msm_comm_clean_notify_client(core);
-
- hdev = core->device;
- mutex_lock(&core->lock);
- if (core->state == VIDC_CORE_INVALID) {
- dprintk(VIDC_DBG, "Calling core_release\n");
- rc = call_hfi_op(hdev, core_release,
- hdev->hfi_device_data);
- if (rc) {
- dprintk(VIDC_ERR, "core_release failed\n");
- mutex_unlock(&core->lock);
- return;
- }
- core->state = VIDC_CORE_UNINIT;
- }
- mutex_unlock(&core->lock);
-
- msm_vidc_print_running_insts(core);
+ dprintk(VIDC_WARN, "SYS_ERROR received for core %pK\n", core);
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
+ list_for_each_entry(inst, &core->instances, list) {
+ dprintk(VIDC_WARN,
+ "%s: Send sys error for inst %pK\n", __func__, inst);
+ change_inst_state(inst, MSM_VIDC_CORE_INVALID);
+ msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR);
+ msm_comm_print_inst_info(inst);
+ }
+ hdev = core->device;
+ dprintk(VIDC_DBG, "Calling core_release\n");
+ rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
+ if (rc) {
+ dprintk(VIDC_ERR, "core_release failed\n");
+ mutex_unlock(&core->lock);
+ return;
+ }
+ core->state = VIDC_CORE_UNINIT;
+ mutex_unlock(&core->lock);
+
dprintk(VIDC_ERR,
"SYS_ERROR can potentially crash the system\n");
- /*
- * For SYS_ERROR, there will not be any inst pointer.
- * Just grab one of the inst from instances list and
- * use it.
- */
-
- mutex_lock(&core->lock);
- inst = list_first_entry_or_null(&core->instances,
- struct msm_vidc_inst, list);
- mutex_unlock(&core->lock);
-
- msm_comm_print_debug_info(inst);
-
msm_vidc_handle_hw_error(core);
}
@@ -2078,19 +2071,22 @@
dprintk(VIDC_ERR, "%s invalid params\n", __func__);
return;
}
+ if (!inst->session) {
+ dprintk(VIDC_DBG, "%s: inst %pK session already cleaned\n",
+ __func__, inst);
+ return;
+ }
hdev = inst->core->device;
mutex_lock(&inst->lock);
- if (hdev && inst->session) {
- dprintk(VIDC_DBG, "cleaning up instance: %pK\n", inst);
- rc = call_hfi_op(hdev, session_clean,
- (void *)inst->session);
- if (rc) {
- dprintk(VIDC_ERR,
- "Session clean failed :%pK\n", inst);
- }
- inst->session = NULL;
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
+ rc = call_hfi_op(hdev, session_clean,
+ (void *)inst->session);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Session clean failed :%pK\n", inst);
}
+ inst->session = NULL;
mutex_unlock(&inst->lock);
}
@@ -2185,11 +2181,31 @@
return 0;
}
+static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr)
+{
+ struct eos_buf *temp, *next;
+ bool found = false;
+
+ mutex_lock(&inst->eosbufs.lock);
+ list_for_each_entry_safe(temp, next, &inst->eosbufs.list, list) {
+ if (temp->smem.device_addr == device_addr) {
+ found = true;
+ list_del(&temp->list);
+ msm_comm_smem_free(inst, &temp->smem);
+ kfree(temp);
+ break;
+ }
+ }
+ mutex_unlock(&inst->eosbufs.lock);
+
+ return found;
+}
+
static void handle_ebd(enum hal_command_response cmd, void *data)
{
struct msm_vidc_cb_data_done *response = data;
struct msm_vidc_buffer *mbuf;
- struct vb2_buffer *vb;
+ struct vb2_buffer *vb, *vb2;
struct msm_vidc_inst *inst;
struct vidc_hal_ebd *empty_buf_done;
struct vb2_v4l2_buffer *vbuf;
@@ -2209,22 +2225,42 @@
}
empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+ /* If this is internal EOS buffer, handle it in driver */
+ if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) {
+ dprintk(VIDC_DBG, "Received EOS buffer %pK\n",
+ (void *)(u64)empty_buf_done->packet_buffer);
+ goto exit;
+ }
+
planes[0] = empty_buf_done->packet_buffer;
planes[1] = empty_buf_done->extra_data_buffer;
mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
- if (!mbuf) {
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
dprintk(VIDC_ERR,
"%s: data_addr %x, extradata_addr %x not found\n",
__func__, planes[0], planes[1]);
goto exit;
}
+ vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
+
+ /*
+ * take registeredbufs.lock to update mbuf & vb2 variables together
+ * so that both are in sync else if mbuf and vb2 variables are not
+ * in sync msm_comm_compare_vb2_planes() returns false for the
+ * right buffer due to data_offset field mismatch.
+ */
+ mutex_lock(&inst->registeredbufs.lock);
vb = &mbuf->vvb.vb2_buf;
vb->planes[0].bytesused = response->input_done.filled_len;
if (vb->planes[0].bytesused > vb->planes[0].length)
dprintk(VIDC_INFO, "bytesused overflow length\n");
+ vb->planes[0].data_offset = response->input_done.offset;
+ if (vb->planes[0].data_offset > vb->planes[0].length)
+ dprintk(VIDC_INFO, "data_offset overflow length\n");
+
if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) {
dprintk(VIDC_INFO, "Failed : Unsupported input stream\n");
mbuf->vvb.flags |= V4L2_QCOM_BUF_INPUT_UNSUPPORTED;
@@ -2241,26 +2277,28 @@
if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length;
+ if (vb2) {
+ vbuf = to_vb2_v4l2_buffer(vb2);
+ vbuf->flags |= mbuf->vvb.flags;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ vb2->planes[i].bytesused =
+ mbuf->vvb.vb2_buf.planes[i].bytesused;
+ vb2->planes[i].data_offset =
+ mbuf->vvb.vb2_buf.planes[i].data_offset;
+ }
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
+
update_recon_stats(inst, &empty_buf_done->recon_stats);
msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr);
-
- vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
- if (vb) {
- vbuf = to_vb2_v4l2_buffer(vb);
- vbuf->flags |= mbuf->vvb.flags;
- for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
- vb->planes[i].bytesused =
- mbuf->vvb.vb2_buf.planes[i].bytesused;
- }
/*
* put_buffer should be done before vb2_buffer_done else
* client might queue the same buffer before it is unmapped
- * in put_buffer. also don't use mbuf after put_buffer
- * as it may be freed in put_buffer.
+ * in put_buffer.
*/
msm_comm_put_vidc_buffer(inst, mbuf);
- msm_comm_vb2_buffer_done(inst, vb);
-
+ msm_comm_vb2_buffer_done(inst, vb2);
+ kref_put_mbuf(mbuf);
exit:
put_inst(inst);
}
@@ -2312,7 +2350,7 @@
struct msm_vidc_cb_data_done *response = data;
struct msm_vidc_buffer *mbuf;
struct msm_vidc_inst *inst;
- struct vb2_buffer *vb = NULL;
+ struct vb2_buffer *vb, *vb2;
struct vidc_hal_fbd *fill_buf_done;
struct vb2_v4l2_buffer *vbuf;
enum hal_buffer buffer_type;
@@ -2339,12 +2377,13 @@
buffer_type = msm_comm_get_hal_output_buffer(inst);
if (fill_buf_done->buffer_type == buffer_type) {
mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
- if (!mbuf) {
+ if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
dprintk(VIDC_ERR,
"%s: data_addr %x, extradata_addr %x not found\n",
__func__, planes[0], planes[1]);
goto exit;
}
+ vb2 = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
} else {
if (handle_multi_stream_buffers(inst,
fill_buf_done->packet_buffer1))
@@ -2353,6 +2392,14 @@
&fill_buf_done->packet_buffer1);
goto exit;
}
+
+ /*
+ * take registeredbufs.lock to update mbuf & vb2 variables together
+ * so that both are in sync else if mbuf and vb2 variables are not
+ * in sync msm_comm_compare_vb2_planes() returns false for the
+ * right buffer due to data_offset field mismatch.
+ */
+ mutex_lock(&inst->registeredbufs.lock);
vb = &mbuf->vvb.vb2_buf;
if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME ||
@@ -2364,10 +2411,12 @@
"fbd:Overflow bytesused = %d; length = %d\n",
vb->planes[0].bytesused,
vb->planes[0].length);
- if (vb->planes[0].data_offset != fill_buf_done->offset1)
- dprintk(VIDC_ERR, "%s: data_offset %d vs %d\n",
- __func__, vb->planes[0].data_offset,
- fill_buf_done->offset1);
+ vb->planes[0].data_offset = fill_buf_done->offset1;
+ if (vb->planes[0].data_offset > vb->planes[0].length)
+ dprintk(VIDC_INFO,
+ "fbd:Overflow data_offset = %d; length = %d\n",
+ vb->planes[0].data_offset,
+ vb->planes[0].length);
if (!(fill_buf_done->flags1 & HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
time_usec = fill_buf_done->timestamp_hi;
time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo;
@@ -2425,23 +2474,27 @@
break;
}
- vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf);
- if (vb) {
- vbuf = to_vb2_v4l2_buffer(vb);
+ if (vb2) {
+ vbuf = to_vb2_v4l2_buffer(vb2);
vbuf->flags = mbuf->vvb.flags;
- vb->timestamp = mbuf->vvb.vb2_buf.timestamp;
- for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++)
- vb->planes[i].bytesused =
+ vb2->timestamp = mbuf->vvb.vb2_buf.timestamp;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ vb2->planes[i].bytesused =
mbuf->vvb.vb2_buf.planes[i].bytesused;
+ vb2->planes[i].data_offset =
+ mbuf->vvb.vb2_buf.planes[i].data_offset;
+ }
}
+ mutex_unlock(&inst->registeredbufs.lock);
+
/*
* put_buffer should be done before vb2_buffer_done else
* client might queue the same buffer before it is unmapped
- * in put_buffer. also don't use mbuf after put_buffer
- * as it may be freed in put_buffer.
+ * in put_buffer.
*/
msm_comm_put_vidc_buffer(inst, mbuf);
- msm_comm_vb2_buffer_done(inst, vb);
+ msm_comm_vb2_buffer_done(inst, vb2);
+ kref_put_mbuf(mbuf);
exit:
put_inst(inst);
@@ -2580,31 +2633,26 @@
hdev = inst->core->device;
abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
+ dprintk(VIDC_WARN, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
if (rc) {
dprintk(VIDC_ERR,
"%s session_abort failed rc: %d\n", __func__, rc);
- return rc;
+ goto exit;
}
rc = wait_for_completion_timeout(
&inst->completions[abort_completion],
msecs_to_jiffies(
inst->core->resources.msm_vidc_hw_rsp_timeout));
if (!rc) {
- dprintk(VIDC_ERR,
- "%s: Wait interrupted or timed out [%pK]: %d\n",
- __func__, inst, abort_completion);
- call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
- dprintk(VIDC_ERR,
- "ABORT timeout can potentially crash the system\n");
- msm_comm_print_debug_info(inst);
-
- msm_vidc_handle_hw_error(inst->core);
+ dprintk(VIDC_ERR, "%s: inst %pK abort timed out\n",
+ __func__, inst);
+ msm_comm_generate_sys_error(inst);
rc = -EBUSY;
} else {
rc = 0;
}
- msm_comm_session_clean(inst);
+exit:
return rc;
}
@@ -2668,8 +2716,6 @@
int msm_comm_check_core_init(struct msm_vidc_core *core)
{
int rc = 0;
- struct hfi_device *hdev;
- struct msm_vidc_inst *inst = NULL;
mutex_lock(&core->lock);
if (core->state >= VIDC_CORE_INIT_DONE) {
@@ -2678,29 +2724,12 @@
goto exit;
}
dprintk(VIDC_DBG, "Waiting for SYS_INIT_DONE\n");
- hdev = (struct hfi_device *)core->device;
rc = wait_for_completion_timeout(
&core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
msecs_to_jiffies(core->resources.msm_vidc_hw_rsp_timeout));
if (!rc) {
dprintk(VIDC_ERR, "%s: Wait interrupted or timed out: %d\n",
__func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
- call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
- dprintk(VIDC_ERR,
- "SYS_INIT timeout can potentially crash the system\n");
- /*
- * For SYS_INIT, there will not be any inst pointer.
- * Just grab one of the inst from instances list and
- * use it.
- */
- inst = list_first_entry(&core->instances,
- struct msm_vidc_inst, list);
-
- mutex_unlock(&core->lock);
- msm_comm_print_debug_info(inst);
- mutex_lock(&core->lock);
-
- msm_vidc_handle_hw_error(core);
rc = -EIO;
goto exit;
} else {
@@ -2759,7 +2788,7 @@
"%s: capabilities memory is expected to be freed\n",
__func__);
}
-
+ dprintk(VIDC_DBG, "%s: core %pK\n", __func__, core);
rc = call_hfi_op(hdev, core_init, hdev->hfi_device_data);
if (rc) {
dprintk(VIDC_ERR, "Failed to init core, id = %d\n",
@@ -2819,13 +2848,12 @@
* e.g. thumbnail generation.
*/
schedule_delayed_work(&core->fw_unload_work,
- msecs_to_jiffies(core->state == VIDC_CORE_INVALID ?
- 0 :
- core->resources.msm_vidc_firmware_unload_delay));
+ msecs_to_jiffies(core->state == VIDC_CORE_INIT_DONE ?
+ core->resources.msm_vidc_firmware_unload_delay : 0));
dprintk(VIDC_DBG, "firmware unload delayed by %u ms\n",
- core->state == VIDC_CORE_INVALID ?
- 0 : core->resources.msm_vidc_firmware_unload_delay);
+ core->state == VIDC_CORE_INIT_DONE ?
+ core->resources.msm_vidc_firmware_unload_delay : 0);
}
core_already_uninited:
@@ -2869,6 +2897,7 @@
msm_comm_init_clocks_and_bus_data(inst);
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
inst, get_hal_domain(inst->session_type),
get_hal_codec(fourcc),
@@ -2934,19 +2963,17 @@
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
-
- core = inst->core;
- if (core->state == VIDC_CORE_INVALID) {
- dprintk(VIDC_ERR,
- "Core is in bad state can't do load res\n");
- return -EINVAL;
- }
-
if (inst->state == MSM_VIDC_CORE_INVALID) {
dprintk(VIDC_ERR,
- "Instance is in invalid state can't do load res\n");
+ "%s: inst %pK is in invalid state\n", __func__, inst);
return -EINVAL;
}
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
+ dprintk(VIDC_INFO, "inst: %pK is already in state: %d\n",
+ inst, inst->state);
+ goto exit;
+ }
+ core = inst->core;
num_mbs_per_sec =
msm_comm_get_load(core, MSM_VIDC_DECODER, quirks) +
@@ -2959,18 +2986,12 @@
dprintk(VIDC_ERR, "HW is overloaded, needed: %d max: %d\n",
num_mbs_per_sec, max_load_adj);
msm_vidc_print_running_insts(core);
- inst->state = MSM_VIDC_CORE_INVALID;
msm_comm_kill_session(inst);
return -EBUSY;
}
hdev = core->device;
- if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
- dprintk(VIDC_INFO, "inst: %pK is already in state: %d\n",
- inst, inst->state);
- goto exit;
- }
-
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_load_res, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -2991,21 +3012,19 @@
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- inst->core->state == VIDC_CORE_INVALID) {
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
dprintk(VIDC_ERR,
- "Core is in bad state can't do start\n");
+ "%s: inst %pK is in invalid\n", __func__, inst);
return -EINVAL;
}
-
- hdev = inst->core->device;
-
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_START)) {
dprintk(VIDC_INFO,
"inst: %pK is already in state: %d\n",
inst, inst->state);
goto exit;
}
+ hdev = inst->core->device;
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_start, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -3026,18 +3045,23 @@
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
-
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR,
+ "%s: inst %pK is in invalid state\n", __func__, inst);
+ return -EINVAL;
+ }
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_STOP)) {
dprintk(VIDC_INFO,
"inst: %pK is already in state: %d\n",
inst, inst->state);
goto exit;
}
- dprintk(VIDC_DBG, "Send Stop to hal\n");
+ hdev = inst->core->device;
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_stop, (void *) inst->session);
if (rc) {
- dprintk(VIDC_ERR, "Failed to send stop\n");
+ dprintk(VIDC_ERR, "%s: inst %pK session_stop failed\n",
+ __func__, inst);
goto exit;
}
change_inst_state(inst, MSM_VIDC_STOP);
@@ -3054,16 +3078,19 @@
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
-
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR,
+ "%s: inst %pK is in invalid state\n", __func__, inst);
+ return -EINVAL;
+ }
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_RELEASE_RESOURCES)) {
dprintk(VIDC_INFO,
"inst: %pK is already in state: %d\n",
inst, inst->state);
goto exit;
}
- dprintk(VIDC_DBG,
- "Send release res to hal\n");
+ hdev = inst->core->device;
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_release_res, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -3085,15 +3112,14 @@
dprintk(VIDC_ERR, "%s invalid params\n", __func__);
return -EINVAL;
}
- hdev = inst->core->device;
if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_CLOSE)) {
dprintk(VIDC_INFO,
"inst: %pK is already in state: %d\n",
inst, inst->state);
goto exit;
}
- dprintk(VIDC_DBG,
- "Send session close to hal\n");
+ hdev = inst->core->device;
+ dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_end, (void *) inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -3126,7 +3152,7 @@
}
mutex_lock(&core->lock);
- if (core->state == VIDC_CORE_INVALID) {
+ if (core->state != VIDC_CORE_INIT_DONE) {
dprintk(VIDC_ERR,
"%s - fw is not in proper state, skip suspend\n",
__func__);
@@ -3195,6 +3221,11 @@
buffer_type);
return 0;
}
+
+ /* For DPB buffers, Always use FW count */
+ output_buf->buffer_count_actual = output_buf->buffer_count_min_host =
+ output_buf->buffer_count_min;
+
dprintk(VIDC_DBG,
"output: num = %d, size = %d\n",
output_buf->buffer_count_actual,
@@ -3484,30 +3515,23 @@
{
int rc = 0;
int flipped_state;
- struct msm_vidc_core *core;
if (!inst) {
- dprintk(VIDC_ERR,
- "Invalid instance pointer = %pK\n", inst);
+ dprintk(VIDC_ERR, "%s: invalid params %pK", __func__, inst);
return -EINVAL;
}
dprintk(VIDC_DBG,
"Trying to move inst: %pK from: %#x to %#x\n",
inst, inst->state, state);
- core = inst->core;
- if (!core) {
- dprintk(VIDC_ERR,
- "Invalid core pointer = %pK\n", inst);
+
+ mutex_lock(&inst->sync_lock);
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR, "%s: inst %pK is in invalid\n",
+ __func__, inst);
+ mutex_unlock(&inst->sync_lock);
return -EINVAL;
}
- mutex_lock(&inst->sync_lock);
- if (inst->state == MSM_VIDC_CORE_INVALID ||
- core->state == VIDC_CORE_INVALID) {
- dprintk(VIDC_ERR,
- "Core is in bad state can't change the state\n");
- rc = -EINVAL;
- goto exit;
- }
+
flipped_state = get_flipped_state(inst->state, state);
dprintk(VIDC_DBG,
"flipped_state = %#x\n", flipped_state);
@@ -3587,15 +3611,17 @@
rc = -EINVAL;
break;
}
-exit:
mutex_unlock(&inst->sync_lock);
- if (rc)
+
+ if (rc) {
dprintk(VIDC_ERR,
"Failed to move from state: %d to %d\n",
inst->state, state);
- else
+ msm_comm_kill_session(inst);
+ } else {
trace_msm_vidc_common_state_change((void *)inst,
inst->state, state);
+ }
return rc;
}
@@ -3625,14 +3651,6 @@
switch (which_cmd) {
case V4L2_QCOM_CMD_FLUSH:
- if (core->state != VIDC_CORE_INVALID &&
- inst->state == MSM_VIDC_CORE_INVALID) {
- rc = msm_comm_kill_session(inst);
- if (rc)
- dprintk(VIDC_ERR,
- "Fail to clean session: %d\n",
- rc);
- }
rc = msm_comm_flush(inst, flags);
if (rc) {
dprintk(VIDC_ERR,
@@ -3677,6 +3695,64 @@
rc = msm_comm_session_continue(inst);
break;
}
+ case V4L2_DEC_CMD_STOP:
+ {
+ struct vidc_frame_data data = {0};
+ struct hfi_device *hdev;
+ struct eos_buf *binfo;
+ u32 smem_flags = 0;
+
+ get_inst(inst->core, inst);
+ if (inst->session_type != MSM_VIDC_DECODER) {
+ dprintk(VIDC_DBG,
+ "Non-Decoder session. DEC_STOP is not valid\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ dprintk(VIDC_ERR, "%s: Out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ if (inst->flags & VIDC_SECURE)
+ smem_flags |= SMEM_SECURE;
+
+ rc = msm_comm_smem_alloc(inst,
+ SZ_4K, 1, smem_flags,
+ HAL_BUFFER_INPUT, 0, &binfo->smem);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to allocate output memory\n");
+ goto exit;
+ }
+
+ mutex_lock(&inst->eosbufs.lock);
+ list_add_tail(&binfo->list, &inst->eosbufs.list);
+ mutex_unlock(&inst->eosbufs.lock);
+
+ data.alloc_len = binfo->smem.size;
+ data.device_addr = binfo->smem.device_addr;
+ data.clnt_data = data.device_addr;
+ data.buffer_type = HAL_BUFFER_INPUT;
+ data.filled_len = 0;
+ data.offset = 0;
+ data.flags = HAL_BUFFERFLAG_EOS;
+ data.timestamp = LLONG_MAX;
+ data.extradata_addr = data.device_addr;
+ data.extradata_size = 0;
+ dprintk(VIDC_DBG, "Queueing EOS buffer %pK\n",
+ (void *)(u64)data.device_addr);
+ hdev = inst->core->device;
+
+ rc = call_hfi_op(hdev, session_etb, inst->session,
+ &data);
+exit:
+ put_inst(inst);
+ break;
+ }
default:
dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
rc = -ENOTSUPP;
@@ -3763,7 +3839,7 @@
continue;
/* count only deferred buffers */
- if (!mbuf->deferred)
+ if (!(mbuf->flags & MSM_VIDC_FLAG_DEFERRED))
continue;
++count;
@@ -3791,7 +3867,7 @@
continue;
/* count only deferred buffers */
- if (!mbuf->deferred)
+ if (!(mbuf->flags & MSM_VIDC_FLAG_DEFERRED))
continue;
++count;
@@ -3839,6 +3915,41 @@
}
}
+static int msm_comm_qbuf_rbr(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
+{
+ int rc = 0;
+ struct hfi_device *hdev;
+ struct vidc_frame_data frame_data = {0};
+
+ if (!inst || !inst->core || !inst->core->device || !mbuf) {
+ dprintk(VIDC_ERR, "%s: Invalid arguments\n", __func__);
+ return -EINVAL;
+ }
+
+ hdev = inst->core->device;
+
+ if (inst->state == MSM_VIDC_CORE_INVALID) {
+ dprintk(VIDC_ERR, "%s: inst is in bad state\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = msm_comm_scale_clocks_and_bus(inst);
+ populate_frame_data(&frame_data, mbuf, inst);
+
+ rc = call_hfi_op(hdev, session_ftb, inst->session, &frame_data);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to issue ftb: %d\n", rc);
+ goto err_bad_input;
+ }
+
+ log_frame(inst, &frame_data, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+err_bad_input:
+ return rc;
+}
+
+
/*
* Attempts to queue `vb` to hardware. If, for various reasons, the buffer
* cannot be queued to hardware, the buffer will be staged for commit in the
@@ -3870,10 +3981,6 @@
return -EINVAL;
}
- /* initially assume every buffer is going to be deferred */
- if (mbuf)
- mbuf->deferred = true;
-
batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE)
== V4L2_VIDC_QBUF_BATCHED;
capture_count = (batch_mode ? &count_single_batch : &count_buffers)
@@ -3901,7 +4008,7 @@
if (defer) {
if (mbuf) {
- mbuf->deferred = true;
+ mbuf->flags |= MSM_VIDC_FLAG_DEFERRED;
print_vidc_buffer(VIDC_DBG, "deferred qbuf",
inst, mbuf);
}
@@ -3942,7 +4049,7 @@
list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
struct vidc_frame_data *frame_data = NULL;
- if (!temp->deferred)
+ if (!(temp->flags & MSM_VIDC_FLAG_DEFERRED))
continue;
switch (temp->vvb.vb2_buf.type) {
@@ -3964,7 +4071,7 @@
populate_frame_data(frame_data, temp, inst);
/* this buffer going to be queued (not deferred) */
- temp->deferred = false;
+ temp->flags &= ~MSM_VIDC_FLAG_DEFERRED;
print_vidc_buffer(VIDC_DBG, "qbuf", inst, temp);
}
@@ -4044,7 +4151,7 @@
return rc;
}
-static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
+int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
{
int extra_buffers;
struct hal_buffer_requirements *bufreq;
@@ -4058,9 +4165,14 @@
return -EINVAL;
}
extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
-
bufreq->buffer_count_min_host = bufreq->buffer_count_min +
extra_buffers;
+ bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT);
+ if (bufreq) {
+ if (bufreq->buffer_count_min)
+ bufreq->buffer_count_min_host =
+ bufreq->buffer_count_min + extra_buffers;
+ }
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
@@ -4075,7 +4187,6 @@
}
/* For DPB buffers, no need to add Extra buffers */
-
bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min;
@@ -4093,6 +4204,20 @@
bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min + extra_buffers;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_OUTPUT2);
+ if (!bufreq) {
+ dprintk(VIDC_DBG,
+ "No buffer requirements : %x\n",
+ HAL_BUFFER_EXTRADATA_OUTPUT2);
+ } else {
+ if (bufreq->buffer_count_min) {
+ bufreq->buffer_count_min_host =
+ bufreq->buffer_count_actual =
+ bufreq->buffer_count_min + extra_buffers;
+ }
+ }
} else {
bufreq = get_buff_req_buffer(inst,
@@ -4109,6 +4234,20 @@
bufreq->buffer_count_min_host = bufreq->buffer_count_actual =
bufreq->buffer_count_min + extra_buffers;
+
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_OUTPUT);
+ if (!bufreq) {
+ dprintk(VIDC_DBG,
+ "No buffer requirements : %x\n",
+ HAL_BUFFER_EXTRADATA_OUTPUT);
+ } else {
+ if (bufreq->buffer_count_min) {
+ bufreq->buffer_count_min_host =
+ bufreq->buffer_count_actual =
+ bufreq->buffer_count_min + extra_buffers;
+ }
+ }
}
return 0;
@@ -4143,8 +4282,8 @@
req.buffer_count_min, req.buffer_size);
}
}
-
- rc = msm_vidc_update_host_buff_counts(inst);
+ if (inst->session_type == MSM_VIDC_ENCODER)
+ rc = msm_vidc_update_host_buff_counts(inst);
dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n");
dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
@@ -4219,13 +4358,6 @@
"%s: Wait interrupted or timed out [%pK]: %d\n",
__func__, inst,
SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
- inst->state = MSM_VIDC_CORE_INVALID;
- call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
- dprintk(VIDC_ERR,
- "SESS_PROP timeout can potentially crash the system\n");
- msm_comm_print_debug_info(inst);
-
- msm_vidc_handle_hw_error(inst->core);
msm_comm_kill_session(inst);
rc = -ETIMEDOUT;
goto exit;
@@ -4300,9 +4432,7 @@
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
if (inst->buffer_mode_set[CAPTURE_PORT] ==
- HAL_BUFFER_MODE_STATIC &&
- inst->state != MSM_VIDC_CORE_INVALID &&
- core->state != VIDC_CORE_INVALID) {
+ HAL_BUFFER_MODE_STATIC) {
buffer_info.response_required = false;
rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
@@ -4408,26 +4538,23 @@
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
- if (inst->state != MSM_VIDC_CORE_INVALID &&
- core->state != VIDC_CORE_INVALID) {
- buffer_info.response_required = true;
- rc = call_hfi_op(hdev, session_release_buffers,
+ buffer_info.response_required = true;
+ rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
- if (rc) {
- dprintk(VIDC_WARN,
- "Rel scrtch buf fail:%x, %d\n",
- buffer_info.align_device_addr,
- buffer_info.buffer_size);
- }
+ if (!rc) {
mutex_unlock(&inst->scratchbufs.lock);
rc = wait_for_sess_signal_receipt(inst,
HAL_SESSION_RELEASE_BUFFER_DONE);
- if (rc) {
- change_inst_state(inst,
- MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
- }
+ if (rc)
+ dprintk(VIDC_WARN,
+ "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
mutex_lock(&inst->scratchbufs.lock);
+ } else {
+ dprintk(VIDC_WARN,
+ "Rel scrtch buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
+ buffer_info.buffer_size);
}
/*If scratch buffers can be reused, do not free the buffers*/
@@ -4443,6 +4570,26 @@
return rc;
}
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst)
+{
+ struct eos_buf *buf, *next;
+
+ if (!inst) {
+ dprintk(VIDC_ERR,
+ "Invalid instance pointer = %pK\n", inst);
+ return;
+ }
+
+ mutex_lock(&inst->eosbufs.lock);
+ list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
+ list_del(&buf->list);
+ kfree(buf);
+ }
+ INIT_LIST_HEAD(&inst->eosbufs.list);
+ mutex_unlock(&inst->eosbufs.lock);
+}
+
+
int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst)
{
struct recon_buf *buf, *next;
@@ -4499,25 +4646,23 @@
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr = handle->device_addr;
- if (inst->state != MSM_VIDC_CORE_INVALID &&
- core->state != VIDC_CORE_INVALID) {
- buffer_info.response_required = true;
- rc = call_hfi_op(hdev, session_release_buffers,
+ buffer_info.response_required = true;
+ rc = call_hfi_op(hdev, session_release_buffers,
(void *)inst->session, &buffer_info);
- if (rc) {
- dprintk(VIDC_WARN,
- "Rel prst buf fail:%x, %d\n",
- buffer_info.align_device_addr,
- buffer_info.buffer_size);
- }
+ if (!rc) {
mutex_unlock(&inst->persistbufs.lock);
rc = wait_for_sess_signal_receipt(inst,
HAL_SESSION_RELEASE_BUFFER_DONE);
- if (rc) {
- change_inst_state(inst, MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
- }
+ if (rc)
+ dprintk(VIDC_WARN,
+ "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
mutex_lock(&inst->persistbufs.lock);
+ } else {
+ dprintk(VIDC_WARN,
+ "Rel prst buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
+ buffer_info.buffer_size);
}
list_del(&buf->list);
msm_comm_smem_free(inst, handle);
@@ -4644,8 +4789,7 @@
return 0;
}
- if (!list_empty(&inst->reconbufs.list))
- msm_comm_release_recon_buffers(inst);
+ msm_comm_release_recon_buffers(inst);
for (i = 0; i < internal_buf->buffer_count_actual; i++) {
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
@@ -4703,10 +4847,16 @@
&inst->bufq[port].vb2_bufq.queued_list) {
struct vb2_buffer *vb = container_of(ptr,
struct vb2_buffer, queued_entry);
- vb->planes[0].bytesused = 0;
- print_vb2_buffer(VIDC_ERR, "flush in invalid",
- inst, vb);
- vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ if (vb->state == VB2_BUF_STATE_ACTIVE) {
+ vb->planes[0].bytesused = 0;
+ print_vb2_buffer(VIDC_ERR, "flush in invalid",
+ inst, vb);
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ } else {
+ dprintk(VIDC_WARN,
+ "%s VB is in state %d not in ACTIVE state\n"
+ , __func__, vb->state);
+ }
}
mutex_unlock(&inst->bufq[port].lock);
}
@@ -4716,7 +4866,7 @@
int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
{
- int rc = 0;
+ int i, rc = 0;
bool ip_flush = false;
bool op_flush = false;
struct msm_vidc_buffer *mbuf, *next;
@@ -4741,8 +4891,6 @@
return 0;
}
- /* Finish FLUSH As Soon As Possible. */
-
msm_clock_data_reset(inst);
if (inst->state == MSM_VIDC_CORE_INVALID) {
@@ -4755,23 +4903,41 @@
mutex_lock(&inst->registeredbufs.lock);
list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
- /* flush only deferred buffers (which are not queued yet) */
- if (!mbuf->deferred)
- continue;
-
- /* don't flush input buffers if flush not requested on it */
+ /* don't flush input buffers if input flush is not requested */
if (!ip_flush && mbuf->vvb.vb2_buf.type ==
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
continue;
+ /* flush only deferred or rbr pending buffers */
+ if (!(mbuf->flags & MSM_VIDC_FLAG_DEFERRED ||
+ mbuf->flags & MSM_VIDC_FLAG_RBR_PENDING))
+ continue;
+
+ /*
+ * flush buffers which are queued by client already,
+ * the refcount will be two or more for those buffers.
+ */
+ if (!(mbuf->smem[0].refcount >= 2))
+ continue;
+
print_vidc_buffer(VIDC_DBG, "flush buf", inst, mbuf);
msm_comm_flush_vidc_buffer(inst, mbuf);
- msm_comm_unmap_vidc_buffer(inst, mbuf);
- /* remove from list */
- list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "dqbuf: unmap failed.", inst, mbuf);
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
+ print_vidc_buffer(VIDC_ERR,
+ "dqbuf: unmap failed..", inst, mbuf);
+ }
+ if (!mbuf->smem[0].refcount) {
+ list_del(&mbuf->list);
+ kref_put_mbuf(mbuf);
+ } else {
+ /* buffer is no more a deferred buffer */
+ mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED;
+ }
}
mutex_unlock(&inst->registeredbufs.lock);
@@ -4893,6 +5059,9 @@
case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
ret = HAL_EXTRADATA_VPX_COLORSPACE;
break;
+ case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO:
+ ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO;
+ break;
default:
dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
break;
@@ -4958,6 +5127,7 @@
{
u32 x_min, x_max, y_min, y_max;
u32 input_height, input_width, output_height, output_width;
+ u32 rotation;
input_height = inst->prop.height[OUTPUT_PORT];
input_width = inst->prop.width[OUTPUT_PORT];
@@ -4993,6 +5163,20 @@
return 0;
}
+ rotation = msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
+
+ if ((output_width != output_height) &&
+ (rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
+ rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
+
+ output_width = inst->prop.height[CAPTURE_PORT];
+ output_height = inst->prop.width[CAPTURE_PORT];
+ dprintk(VIDC_DBG,
+ "Rotation=%u Swapped Output W=%u H=%u to check scaling",
+ rotation, output_width, output_height);
+ }
+
x_min = (1<<16)/inst->capability.scale_x.min;
y_min = (1<<16)/inst->capability.scale_y.min;
x_max = inst->capability.scale_x.max >> 16;
@@ -5047,8 +5231,6 @@
core = inst->core;
rc = msm_vidc_load_supported(inst);
if (rc) {
- change_inst_state(inst, MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
dprintk(VIDC_WARN,
"%s: Hardware is overloaded\n", __func__);
return rc;
@@ -5075,7 +5257,7 @@
if (!rc && inst->prop.width[CAPTURE_PORT] >
capability->width.max) {
dprintk(VIDC_ERR,
- "Unsupported width = %u supported max width = %u",
+ "Unsupported width = %u supported max width = %u\n",
inst->prop.width[CAPTURE_PORT],
capability->width.max);
rc = -ENOTSUPP;
@@ -5097,8 +5279,6 @@
}
}
if (rc) {
- change_inst_state(inst, MSM_VIDC_CORE_INVALID);
- msm_comm_kill_session(inst);
dprintk(VIDC_ERR,
"%s: Resolution unsupported\n", __func__);
}
@@ -5110,12 +5290,11 @@
enum hal_command_response cmd = HAL_SESSION_ERROR;
struct msm_vidc_cb_cmd_done response = {0};
- dprintk(VIDC_WARN, "msm_comm_generate_session_error\n");
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
return;
}
-
+ dprintk(VIDC_WARN, "%s: inst %pK\n", __func__, inst);
response.session_id = inst;
response.status = VIDC_ERR_FAIL;
handle_session_error(cmd, (void *)&response);
@@ -5131,6 +5310,7 @@
dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
return;
}
+ dprintk(VIDC_WARN, "%s: inst %pK\n", __func__, inst);
core = inst->core;
response.device_id = (u32) core->id;
handle_sys_error(cmd, (void *) &response);
@@ -5145,10 +5325,13 @@
dprintk(VIDC_ERR, "%s: invalid input parameters\n", __func__);
return -EINVAL;
} else if (!inst->session) {
- /* There's no hfi session to kill */
+ dprintk(VIDC_ERR, "%s: no session to kill for inst %pK\n",
+ __func__, inst);
return 0;
}
+ dprintk(VIDC_WARN, "%s: inst %pK, state %d\n", __func__,
+ inst, inst->state);
/*
* We're internally forcibly killing the session, if fw is aware of
* the session send session_abort to firmware to clean up and release
@@ -5157,20 +5340,18 @@
if ((inst->state >= MSM_VIDC_OPEN_DONE &&
inst->state < MSM_VIDC_CLOSE_DONE) ||
inst->state == MSM_VIDC_CORE_INVALID) {
- if (msm_comm_session_abort(inst)) {
- msm_comm_generate_sys_error(inst);
- return 0;
+ rc = msm_comm_session_abort(inst);
+ if (rc) {
+ dprintk(VIDC_WARN, "%s: inst %pK abort failed\n",
+ __func__, inst);
+ change_inst_state(inst, MSM_VIDC_CORE_INVALID);
}
- change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
- msm_comm_generate_session_error(inst);
- } else {
- dprintk(VIDC_WARN,
- "Inactive session %pK, triggering an internal session error\n",
- inst);
- msm_comm_generate_session_error(inst);
-
}
+ change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
+ msm_comm_session_clean(inst);
+
+ dprintk(VIDC_WARN, "%s: inst %pK handled\n", __func__, inst);
return rc;
}
@@ -5317,8 +5498,8 @@
}
} else if (inst->session_type == MSM_VIDC_ENCODER) {
if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- if (!i) /* yuv */
- skip = true;
+ /* yuv and extradata */
+ skip = true;
} else if (b->type ==
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (!i) /* bitstream */
@@ -5553,32 +5734,6 @@
mutex_unlock(&inst->outputbufs.lock);
}
-static void msm_comm_print_debug_info(struct msm_vidc_inst *inst)
-{
- struct msm_vidc_core *core = NULL;
- struct msm_vidc_inst *temp = NULL;
-
- if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
- __func__, inst, core);
- return;
- }
- core = inst->core;
-
- dprintk(VIDC_ERR, "Venus core frequency = %lu", core->curr_freq);
- mutex_lock(&core->lock);
- dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
- msm_comm_print_inst_info(inst);
- dprintk(VIDC_ERR, "Printing remaining instances info\n");
- list_for_each_entry(temp, &core->instances, list) {
- /* inst already printed above. Hence don't repeat.*/
- if (temp == inst)
- continue;
- msm_comm_print_inst_info(temp);
- }
- mutex_unlock(&core->lock);
-}
-
int msm_comm_session_continue(void *instance)
{
struct msm_vidc_inst *inst = instance;
@@ -5590,8 +5745,7 @@
hdev = inst->core->device;
mutex_lock(&inst->lock);
if (inst->state >= MSM_VIDC_RELEASE_RESOURCES_DONE ||
- inst->state < MSM_VIDC_START_DONE ||
- inst->core->state == VIDC_CORE_INVALID) {
+ inst->state < MSM_VIDC_START_DONE) {
dprintk(VIDC_DBG,
"Inst %pK : Not in valid state to call %s\n",
inst, __func__);
@@ -5659,27 +5813,27 @@
if (vb2->num_planes == 1)
dprintk(tag,
- "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d\n",
+ "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d mflags 0x%x\n",
str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
vb2->index, vb2->planes[0].m.fd,
vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
vb2->planes[0].length, vb2->planes[0].bytesused,
mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
- mbuf->smem[0].refcount);
+ mbuf->smem[0].refcount, mbuf->flags);
else
dprintk(tag,
- "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d, extradata: fd %d off %d daddr %x size %d filled %d refcnt %d\n",
+ "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d mflags 0x%x, extradata: fd %d off %d daddr %x size %d filled %d refcnt %d\n",
str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
vb2->index, vb2->planes[0].m.fd,
vb2->planes[0].data_offset, mbuf->smem[0].device_addr,
vb2->planes[0].length, vb2->planes[0].bytesused,
mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp,
- mbuf->smem[0].refcount, vb2->planes[1].m.fd,
- vb2->planes[1].data_offset, mbuf->smem[1].device_addr,
- vb2->planes[1].length, vb2->planes[1].bytesused,
- mbuf->smem[1].refcount);
+ mbuf->smem[0].refcount, mbuf->flags,
+ vb2->planes[1].m.fd, vb2->planes[1].data_offset,
+ mbuf->smem[1].device_addr, vb2->planes[1].length,
+ vb2->planes[1].bytesused, mbuf->smem[1].refcount);
}
void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
@@ -5698,13 +5852,14 @@
vb2->planes[0].bytesused);
else
dprintk(tag,
- "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
vb2->index, vb2->planes[0].m.fd,
vb2->planes[0].data_offset, vb2->planes[0].length,
vb2->planes[0].bytesused, vb2->planes[1].m.fd,
- vb2->planes[1].data_offset, vb2->planes[1].length);
+ vb2->planes[1].data_offset, vb2->planes[1].length,
+ vb2->planes[1].bytesused);
}
void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
@@ -5724,7 +5879,7 @@
v4l2->m.planes[0].bytesused);
else
dprintk(tag,
- "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n",
+ "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d filled %d\n",
str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
"OUTPUT" : "CAPTURE", hash32_ptr(inst->session),
v4l2->index, v4l2->m.planes[0].m.fd,
@@ -5733,7 +5888,8 @@
v4l2->m.planes[0].bytesused,
v4l2->m.planes[1].m.fd,
v4l2->m.planes[1].data_offset,
- v4l2->m.planes[1].length);
+ v4l2->m.planes[1].length,
+ v4l2->m.planes[1].bytesused);
}
bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst,
@@ -5924,8 +6080,11 @@
* always compare dma_buf addresses which is guaranteed
* to be same across the processes (duplicate fds).
*/
- dma_planes[i] = (unsigned long)dma_buf_get(vb2->planes[i].m.fd);
- dma_buf_put((struct dma_buf *)dma_planes[i]);
+ dma_planes[i] = (unsigned long)msm_smem_get_dma_buf(
+ vb2->planes[i].m.fd);
+ if (!dma_planes[i])
+ return NULL;
+ msm_smem_put_dma_buf((struct dma_buf *)dma_planes[i]);
}
mutex_lock(&inst->registeredbufs.lock);
@@ -5945,8 +6104,12 @@
rc = -ENOMEM;
goto exit;
}
+ kref_init(&mbuf->kref);
}
+ /* Initially assume all the buffer are going to be deferred */
+ mbuf->flags |= MSM_VIDC_FLAG_DEFERRED;
+
vbuf = to_vb2_v4l2_buffer(vb2);
memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer));
vb = &mbuf->vvb.vb2_buf;
@@ -5993,6 +6156,16 @@
if (found_plane0)
rc = -EEXIST;
}
+ /*
+ * If RBR pending on this buffer then enable RBR_PENDING flag
+ * and clear the DEFERRED flag to avoid this buffer getting
+ * queued to video hardware in msm_comm_qbuf() which tries to
+ * queue all the DEFERRED buffers.
+ */
+ if (rc == -EEXIST) {
+ mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
+ mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED;
+ }
}
/* add the new buffer to list */
@@ -6008,11 +6181,11 @@
return mbuf;
exit:
- mutex_unlock(&inst->registeredbufs.lock);
dprintk(VIDC_ERR, "%s: rc %d\n", __func__, rc);
msm_comm_unmap_vidc_buffer(inst, mbuf);
if (!found)
- kfree(mbuf);
+ kref_put_mbuf(mbuf);
+ mutex_unlock(&inst->registeredbufs.lock);
return ERR_PTR(rc);
}
@@ -6064,33 +6237,39 @@
*/
if (!mbuf->smem[0].refcount) {
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
}
unlock:
mutex_unlock(&inst->registeredbufs.lock);
}
-void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes)
+void handle_release_buffer_reference(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf)
{
int rc = 0;
- struct msm_vidc_buffer *mbuf = NULL;
+ struct msm_vidc_buffer *temp;
bool found = false;
int i = 0;
mutex_lock(&inst->registeredbufs.lock);
found = false;
- list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_planes(mbuf, planes)) {
+ /* check if mbuf was not removed by any chance */
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_vb2_planes(inst, mbuf,
+ &temp->vvb.vb2_buf)) {
found = true;
break;
}
}
if (found) {
+ /* send RBR event to client */
msm_vidc_queue_rbr_event(inst,
mbuf->vvb.vb2_buf.planes[0].m.fd,
mbuf->vvb.vb2_buf.planes[0].data_offset);
+ /* clear RBR_PENDING flag */
+ mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING;
+
for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
print_vidc_buffer(VIDC_ERR,
@@ -6099,13 +6278,10 @@
/* refcount is not zero if client queued the same buffer */
if (!mbuf->smem[0].refcount) {
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
}
} else {
- dprintk(VIDC_ERR,
- "%s: data_addr %x extradata_addr %x not found\n",
- __func__, planes[0], planes[1]);
+ print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf);
goto unlock;
}
@@ -6119,8 +6295,9 @@
* and if found queue it to video hw (if not flushing).
*/
found = false;
- list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (msm_comm_compare_vb2_plane(inst, mbuf,
+ &temp->vvb.vb2_buf, 0)) {
found = true;
break;
}
@@ -6128,25 +6305,28 @@
if (!found)
goto unlock;
- /* found means client queued the buffer already */
+ /* buffer found means client queued the buffer already */
if (inst->in_reconfig || inst->in_flush) {
print_vidc_buffer(VIDC_DBG, "rbr flush buf", inst, mbuf);
msm_comm_flush_vidc_buffer(inst, mbuf);
msm_comm_unmap_vidc_buffer(inst, mbuf);
/* remove from list */
list_del(&mbuf->list);
- kfree(mbuf);
- mbuf = NULL;
+ kref_put_mbuf(mbuf);
/* don't queue the buffer */
found = false;
}
+ /* clear DEFERRED flag, if any, as the buffer is going to be queued */
+ if (found)
+ mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED;
+
unlock:
mutex_unlock(&inst->registeredbufs.lock);
if (found) {
print_vidc_buffer(VIDC_DBG, "rbr qbuf", inst, mbuf);
- rc = msm_comm_qbuf(inst, mbuf);
+ rc = msm_comm_qbuf_rbr(inst, mbuf);
if (rc)
print_vidc_buffer(VIDC_ERR,
"rbr qbuf failed", inst, mbuf);
@@ -6183,3 +6363,41 @@
return rc;
}
+static void kref_free_mbuf(struct kref *kref)
+{
+ struct msm_vidc_buffer *mbuf = container_of(kref,
+ struct msm_vidc_buffer, kref);
+
+ kfree(mbuf);
+}
+
+void kref_put_mbuf(struct msm_vidc_buffer *mbuf)
+{
+ if (!mbuf)
+ return;
+
+ kref_put(&mbuf->kref, kref_free_mbuf);
+}
+
+bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
+{
+ struct msm_vidc_buffer *temp;
+ bool matches = false;
+ bool ret = false;
+
+ if (!inst || !mbuf)
+ return false;
+
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
+ if (temp == mbuf) {
+ matches = true;
+ break;
+ }
+ }
+ ret = (matches && kref_get_unless_zero(&mbuf->kref)) ? true : false;
+ mutex_unlock(&inst->registeredbufs.lock);
+
+ return ret;
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 5c653f5..4a06f19 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -56,11 +56,13 @@
bool check_for_reuse);
int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst);
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst);
int msm_comm_release_output_buffers(struct msm_vidc_inst *inst,
bool force_release);
void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst);
int msm_comm_force_cleanup(struct msm_vidc_inst *inst);
int msm_comm_suspend(int core_id);
+int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst);
enum hal_extradata_id msm_comm_get_hal_extradata_index(
enum v4l2_mpeg_vidc_extradata index);
struct hal_buffer_requirements *get_buff_req_buffer(
@@ -107,7 +109,6 @@
u32 get_frame_size_rgba(int plane, u32 height, u32 width);
u32 get_frame_size_nv21(int plane, u32 height, u32 width);
u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width);
-void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst);
struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer(
struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes(
@@ -116,7 +117,8 @@
struct vb2_buffer *vb2);
void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
struct msm_vidc_buffer *mbuf);
-void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes);
+void handle_release_buffer_reference(struct msm_vidc_inst *inst,
+ struct msm_vidc_buffer *mbuf);
int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
struct vb2_buffer *vb);
int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst,
@@ -145,4 +147,7 @@
struct vb2_buffer *vb2);
void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
struct v4l2_buffer *v4l2);
+void kref_put_mbuf(struct msm_vidc_buffer *mbuf);
+bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 58c3b0f..5be1ee2 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -430,7 +430,8 @@
case MSM_VIDC_DEBUGFS_EVENT_FBD:
inst->count.fbd++;
inst->debug.samples++;
- if (inst->count.ebd && inst->count.fbd == inst->count.ftb) {
+ if (inst->count.fbd &&
+ inst->count.fbd == inst->count.ftb) {
toc(inst, FRAME_PROCESSING);
dprintk(VIDC_PROF, "FBD: FW needs output buffers\n");
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 22772ef..195410d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -49,6 +49,7 @@
#define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
#define MAX_NUM_CAPTURE_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+#define MAX_SUPPORTED_INSTANCES 16
/* Maintains the number of FTB's between each FBD over a window */
#define DCVS_FTB_WINDOW 16
@@ -83,7 +84,6 @@
VIDC_CORE_UNINIT = 0,
VIDC_CORE_INIT,
VIDC_CORE_INIT_DONE,
- VIDC_CORE_INVALID
};
/*
@@ -144,6 +144,12 @@
unsigned long freq;
};
+struct vidc_input_cr_data {
+ struct list_head list;
+ u32 index;
+ u32 input_cr;
+};
+
struct recon_buf {
struct list_head list;
u32 buffer_index;
@@ -151,6 +157,11 @@
u32 CF;
};
+struct eos_buf {
+ struct list_head list;
+ struct msm_smem smem;
+};
+
struct internal_buf {
struct list_head list;
enum hal_buffer buffer_type;
@@ -243,6 +254,7 @@
int buffer_counter;
int load;
int load_low;
+ int load_norm;
int load_high;
int min_threshold;
int max_threshold;
@@ -260,7 +272,6 @@
u32 opb_fourcc;
enum hal_work_mode work_mode;
bool low_latency_mode;
- bool use_sys_cache;
};
struct profile_data {
@@ -322,11 +333,13 @@
struct msm_vidc_format fmts[MAX_PORT_NUM];
struct buf_queue bufq[MAX_PORT_NUM];
struct msm_vidc_list freqs;
+ struct msm_vidc_list input_crs;
struct msm_vidc_list scratchbufs;
struct msm_vidc_list persistbufs;
struct msm_vidc_list pending_getpropq;
struct msm_vidc_list outputbufs;
struct msm_vidc_list reconbufs;
+ struct msm_vidc_list eosbufs;
struct msm_vidc_list registeredbufs;
struct buffer_requirements buff_req;
struct smem_client *mem_client;
@@ -386,11 +399,17 @@
int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst);
void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type);
+enum msm_vidc_flags {
+ MSM_VIDC_FLAG_DEFERRED = BIT(0),
+ MSM_VIDC_FLAG_RBR_PENDING = BIT(1),
+};
+
struct msm_vidc_buffer {
struct list_head list;
+ struct kref kref;
struct msm_smem smem[VIDEO_MAX_PLANES];
struct vb2_v4l2_buffer vvb;
- bool deferred;
+ enum msm_vidc_flags flags;
};
void msm_comm_handle_thermal_event(void);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 25f22c7..8a701cb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -53,14 +53,14 @@
static struct msm_vidc_common_data default_common_data[] = {
{
- .key = "qcon,never-unload-fw",
+ .key = "qcom,never-unload-fw",
.value = 1,
},
};
static struct msm_vidc_common_data sdm845_common_data[] = {
{
- .key = "qcon,never-unload-fw",
+ .key = "qcom,never-unload-fw",
.value = 1,
},
{
@@ -97,7 +97,7 @@
},
{
.key = "qcom,hw-resp-timeout",
- .value = 2000,
+ .value = 250,
},
};
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 062795f..039b457 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -27,6 +27,8 @@
CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1,
};
+#define PERF_GOV "performance"
+
static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
{
return NULL;
@@ -274,12 +276,12 @@
"cache-slice-names", c, &vsc->name);
}
- res->sys_cache_enabled = true;
+ res->sys_cache_present = true;
return 0;
err_load_subcache_table_fail:
- res->sys_cache_enabled = false;
+ res->sys_cache_present = false;
subcaches->count = 0;
subcaches->subcache_tbl = NULL;
@@ -369,20 +371,20 @@
return 0;
}
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+{
+ /* want to sort in reverse so flip the comparison */
+ return ((struct allowed_clock_rates_table *)b)->clock_rate -
+ ((struct allowed_clock_rates_table *)a)->clock_rate;
+}
+
static int msm_vidc_load_allowed_clocks_table(
struct msm_vidc_platform_resources *res)
{
int rc = 0;
struct platform_device *pdev = res->pdev;
- /* A comparator to compare loads (needed later on) */
- int cmp(const void *a, const void *b)
- {
- /* want to sort in reverse so flip the comparison */
- return ((struct allowed_clock_rates_table *)b)->clock_rate -
- ((struct allowed_clock_rates_table *)a)->clock_rate;
- }
-
if (!of_find_property(pdev->dev.of_node,
"qcom,allowed-clock-rates", NULL)) {
dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n");
@@ -426,6 +428,8 @@
buses->bus_tbl = temp_table;
bus = &buses->bus_tbl[buses->count];
+ memset(bus, 0x0, sizeof(struct bus_info));
+
rc = of_property_read_string(dev->of_node, "label", &temp_name);
if (rc) {
dprintk(VIDC_ERR, "'label' not found in node\n");
@@ -457,9 +461,12 @@
rc = 0;
dprintk(VIDC_DBG,
"'qcom,bus-governor' not found, default to performance governor\n");
- bus->governor = "performance";
+ bus->governor = PERF_GOV;
}
+ if (!strcmp(bus->governor, PERF_GOV))
+ bus->is_prfm_gov_used = true;
+
rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
range, ARRAY_SIZE(range));
if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index dda5e80..8888980 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -112,6 +112,7 @@
struct devfreq_dev_profile devfreq_prof;
struct devfreq *devfreq;
struct msm_bus_client_handle *client;
+ bool is_prfm_gov_used;
};
struct bus_set {
@@ -160,7 +161,7 @@
uint32_t dcvs_tbl_size;
struct dcvs_limit *dcvs_limit;
bool sys_cache_present;
- bool sys_cache_enabled;
+ bool sys_cache_res_set;
struct subcache_set subcache_set;
struct reg_set reg_set;
struct addr_set qdss_addr_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index f8d8842..8064f4c 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -90,6 +90,8 @@
static int __initialize_packetization(struct venus_hfi_device *device);
static struct hal_session *__get_session(struct venus_hfi_device *device,
u32 session_id);
+static bool __is_session_valid(struct venus_hfi_device *device,
+ struct hal_session *session, const char *func);
static int __set_clocks(struct venus_hfi_device *device, u32 freq);
static int __iface_cmdq_write(struct venus_hfi_device *device,
void *pkt);
@@ -97,9 +99,10 @@
static void __unload_fw(struct venus_hfi_device *device);
static int __tzbsp_set_video_state(enum tzbsp_video_state state);
static int __enable_subcaches(struct venus_hfi_device *device);
+static int __set_subcaches(struct venus_hfi_device *device);
+static int __release_subcaches(struct venus_hfi_device *device);
static int __disable_subcaches(struct venus_hfi_device *device);
-
/**
* Utility function to enforce some of our assumptions. Spam calls to this
* in hotspots in code to double check some of the assumptions that we hold.
@@ -809,21 +812,22 @@
int rc = 0;
struct bus_info *bus = NULL;
+ kfree(device->bus_vote.data);
+ device->bus_vote.data = NULL;
+ device->bus_vote.data_count = 0;
+
venus_hfi_for_each_bus(device, bus) {
- int local_rc = 0;
unsigned long zero = 0;
- rc = devfreq_suspend_device(bus->devfreq);
+ if (!bus->is_prfm_gov_used)
+ rc = devfreq_suspend_device(bus->devfreq);
+ else
+ rc = __devfreq_target(bus->dev, &zero, 0);
+
if (rc)
goto err_unknown_device;
-
- local_rc = __devfreq_target(bus->dev, &zero, 0);
- rc = rc ?: local_rc;
}
- if (rc)
- dprintk(VIDC_WARN, "Failed to unvote some buses\n");
-
err_unknown_device:
return rc;
}
@@ -857,15 +861,14 @@
venus_hfi_for_each_bus(device, bus) {
if (bus && bus->devfreq) {
- /* NOP if already resume */
- rc = devfreq_resume_device(bus->devfreq);
- if (rc)
- goto err_no_mem;
-
- /* Kick devfreq awake incase _resume() didn't do it */
-
- bus->devfreq->nb.notifier_call(
- &bus->devfreq->nb, 0, NULL);
+ if (!bus->is_prfm_gov_used) {
+ rc = devfreq_resume_device(bus->devfreq);
+ if (rc)
+ goto err_no_mem;
+ } else {
+ bus->devfreq->nb.notifier_call(
+ &bus->devfreq->nb, 0, NULL);
+ }
}
}
@@ -1668,11 +1671,9 @@
static int venus_hfi_core_init(void *device)
{
+ int rc = 0;
struct hfi_cmd_sys_init_packet pkt;
struct hfi_cmd_sys_get_property_packet version_pkt;
- int rc = 0;
- struct list_head *ptr, *next;
- struct hal_session *session = NULL;
struct venus_hfi_device *dev;
if (!device) {
@@ -1681,8 +1682,16 @@
}
dev = device;
+
+ dprintk(VIDC_DBG, "Core initializing\n");
+
mutex_lock(&dev->lock);
+ dev->bus_vote.data =
+ kzalloc(sizeof(struct vidc_bus_vote_data), GFP_KERNEL);
+ dev->bus_vote.data_count = 1;
+ dev->bus_vote.data->power_mode = VIDC_POWER_TURBO;
+
rc = __load_fw(dev);
if (rc) {
dprintk(VIDC_ERR, "Failed to load Venus FW\n");
@@ -1691,20 +1700,6 @@
__set_state(dev, VENUS_STATE_INIT);
- list_for_each_safe(ptr, next, &dev->sess_head) {
- /*
- * This means that session list is not empty. Kick stale
- * sessions out of our valid instance list, but keep the
- * list_head inited so that list_del (in the future, called
- * by session_clean()) will be valid. When client doesn't close
- * them, then it is a genuine leak which driver can't fix.
- */
- session = list_entry(ptr, struct hal_session, list);
- list_del_init(&session->list);
- }
-
- INIT_LIST_HEAD(&dev->sess_head);
-
if (!dev->hal_client) {
dev->hal_client = msm_smem_new_client(
SMEM_ION, dev->res, MSM_VIDC_UNKNOWN);
@@ -1753,10 +1748,8 @@
dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
rc = __enable_subcaches(device);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed to enable subcaches, err = %d\n", rc);
- }
+ if (!rc)
+ __set_subcaches(device);
if (dev->res->pm_qos_latency_us) {
#ifdef CONFIG_SMP
@@ -1766,21 +1759,23 @@
pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY,
dev->res->pm_qos_latency_us);
}
-
+ dprintk(VIDC_DBG, "Core inited successfully\n");
mutex_unlock(&dev->lock);
return rc;
err_core_init:
__set_state(dev, VENUS_STATE_DEINIT);
__unload_fw(dev);
err_load_fw:
+ dprintk(VIDC_ERR, "Core init failed\n");
mutex_unlock(&dev->lock);
return rc;
}
static int venus_hfi_core_release(void *dev)
{
- struct venus_hfi_device *device = dev;
int rc = 0;
+ struct venus_hfi_device *device = dev;
+ struct hal_session *session, *next;
if (!device) {
dprintk(VIDC_ERR, "invalid device\n");
@@ -1788,7 +1783,7 @@
}
mutex_lock(&device->lock);
-
+ dprintk(VIDC_DBG, "Core releasing\n");
if (device->res->pm_qos_latency_us &&
pm_qos_request_active(&device->qos))
pm_qos_remove_request(&device->qos);
@@ -1797,6 +1792,11 @@
__set_state(device, VENUS_STATE_DEINIT);
__unload_fw(device);
+ /* unlink all sessions from device */
+ list_for_each_entry_safe(session, next, &device->sess_head, list)
+ list_del(&session->list);
+
+ dprintk(VIDC_DBG, "Core released successfully\n");
mutex_unlock(&device->lock);
return rc;
@@ -1937,6 +1937,10 @@
mutex_lock(&device->lock);
dprintk(VIDC_INFO, "in set_prop,with prop id: %#x\n", ptype);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_set_prop;
+ }
rc = call_hfi_pkt_op(device, session_set_property,
pkt, session, ptype, pdata);
@@ -1979,6 +1983,10 @@
mutex_lock(&device->lock);
dprintk(VIDC_INFO, "%s: property id: %d\n", __func__, ptype);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_create_pkt;
+ }
rc = call_hfi_pkt_op(device, session_get_property,
&pkt, session, ptype);
@@ -2010,8 +2018,25 @@
static void __session_clean(struct hal_session *session)
{
+ struct hal_session *temp, *next;
+ struct venus_hfi_device *device;
+
+ if (!session || !session->device) {
+ dprintk(VIDC_WARN, "%s: invalid params\n", __func__);
+ return;
+ }
+ device = session->device;
dprintk(VIDC_DBG, "deleted the session: %pK\n", session);
- list_del(&session->list);
+ /*
+ * session might have been removed from the device list in
+ * core_release, so check and remove if it is in the list
+ */
+ list_for_each_entry_safe(temp, next, &device->sess_head, list) {
+ if (session == temp) {
+ list_del(&session->list);
+ break;
+ }
+ }
/* Poison the session handle with zeros */
*session = (struct hal_session){ {0} };
kfree(session);
@@ -2105,6 +2130,9 @@
int rc = 0;
struct venus_hfi_device *device = session->device;
+ if (!__is_session_valid(device, session, __func__))
+ return -EINVAL;
+
rc = call_hfi_pkt_op(device, session_cmd,
&pkt, pkt_type, session);
if (rc == -EPERM)
@@ -2190,6 +2218,10 @@
device = session->device;
mutex_lock(&device->lock);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_create_pkt;
+ }
if (buffer_info->buffer_type == HAL_BUFFER_INPUT) {
/*
* Hardware doesn't care about input buffers being
@@ -2234,6 +2266,10 @@
device = session->device;
mutex_lock(&device->lock);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_create_pkt;
+ }
if (buffer_info->buffer_type == HAL_BUFFER_INPUT) {
rc = 0;
goto err_create_pkt;
@@ -2368,6 +2404,9 @@
int rc = 0;
struct venus_hfi_device *device = session->device;
+ if (!__is_session_valid(device, session, __func__))
+ return -EINVAL;
+
if (session->is_decoder) {
struct hfi_cmd_session_empty_buffer_compressed_packet pkt;
@@ -2437,6 +2476,9 @@
struct venus_hfi_device *device = session->device;
struct hfi_cmd_session_fill_buffer_packet pkt;
+ if (!__is_session_valid(device, session, __func__))
+ return -EINVAL;
+
rc = call_hfi_pkt_op(device, session_ftb,
&pkt, session, output_frame);
if (rc) {
@@ -2490,6 +2532,12 @@
device = session->device;
mutex_lock(&device->lock);
+
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_etbs_and_ftbs;
+ }
+
for (c = 0; c < num_ftbs; ++c) {
rc = __session_ftb(session, &ftbs[c], true);
if (rc) {
@@ -2537,6 +2585,10 @@
device = session->device;
mutex_lock(&device->lock);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_create_pkt;
+ }
rc = call_hfi_pkt_op(device, session_get_buf_req,
&pkt, session);
if (rc) {
@@ -2567,6 +2619,10 @@
device = session->device;
mutex_lock(&device->lock);
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -EINVAL;
+ goto err_create_pkt;
+ }
rc = call_hfi_pkt_op(device, session_flush,
&pkt, session, flush_mode);
if (rc) {
@@ -2747,9 +2803,11 @@
}
}
+ __flush_debug_queue(device, device->raw_packet);
+
rc = __suspend(device);
if (rc)
- dprintk(VIDC_ERR, "Failed venus power off\n");
+ dprintk(VIDC_ERR, "Failed __suspend\n");
/* Cancel pending delayed works if any */
cancel_delayed_work(&venus_hfi_pm_work);
@@ -2844,6 +2902,24 @@
kfree(packet);
}
+static bool __is_session_valid(struct venus_hfi_device *device,
+ struct hal_session *session, const char *func)
+{
+ struct hal_session *temp = NULL;
+
+ if (!device || !session)
+ goto invalid;
+
+ list_for_each_entry(temp, &device->sess_head, list)
+ if (session == temp)
+ return true;
+
+invalid:
+ dprintk(VIDC_WARN, "%s: device %pK, invalid session %pK\n",
+ func, device, session);
+ return false;
+}
+
static struct hal_session *__get_session(struct venus_hfi_device *device,
u32 session_id)
{
@@ -3330,6 +3406,15 @@
.exit = NULL,
};
+ if (!strcmp(bus->governor, "msm-vidc-llcc")) {
+ if (msm_vidc_syscache_disable) {
+ dprintk(VIDC_DBG,
+ "Skipping LLC bus init %s: %s\n",
+ bus->name, bus->governor);
+ continue;
+ }
+ }
+
/*
* This is stupid, but there's no other easy way to ahold
* of struct bus_info in venus_hfi_devfreq_*()
@@ -3688,39 +3773,64 @@
int rc = 0;
u32 c = 0;
struct subcache_info *sinfo;
- u32 resource[VIDC_MAX_SUBCACHE_SIZE];
- struct hfi_resource_syscache_info_type *sc_res_info;
- struct hfi_resource_subcache_type *sc_res;
- struct vidc_resource_hdr rhdr;
if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
return 0;
- memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
-
- sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
- sc_res = &(sc_res_info->rg_subcache_entries[0]);
-
/* Activate subcaches */
venus_hfi_for_each_subcache(device, sinfo) {
rc = llcc_slice_activate(sinfo->subcache);
if (rc) {
dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
sinfo->name, rc);
- continue;
+ goto err_activate_fail;
}
sinfo->isactive = true;
-
- /* Update the entry */
- sc_res[c].size = sinfo->subcache->llcc_slice_size;
- sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
- dprintk(VIDC_DBG, "Activate subcache %s\n", sinfo->name);
+ dprintk(VIDC_DBG, "Activated subcache %s\n", sinfo->name);
c++;
}
+ dprintk(VIDC_DBG, "Activated %d Subcaches to Venus\n", c);
+
+ return 0;
+
+err_activate_fail:
+ __release_subcaches(device);
+ __disable_subcaches(device);
+ return -EINVAL;
+}
+
+static int __set_subcaches(struct venus_hfi_device *device)
+{
+ int rc = 0;
+ u32 c = 0;
+ struct subcache_info *sinfo;
+ u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+ struct hfi_resource_syscache_info_type *sc_res_info;
+ struct hfi_resource_subcache_type *sc_res;
+ struct vidc_resource_hdr rhdr;
+
+ if (device->res->sys_cache_res_set) {
+ dprintk(VIDC_DBG, "Subcaches already set to Venus\n");
+ return 0;
+ }
+
+ memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+ sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+ sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+ venus_hfi_for_each_subcache(device, sinfo) {
+ if (sinfo->isactive == true) {
+ sc_res[c].size = sinfo->subcache->llcc_slice_size;
+ sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
+ c++;
+ }
+ }
+
/* Set resource to Venus for activated subcaches */
if (c) {
- dprintk(VIDC_DBG, "Setting Subcaches\n");
+ dprintk(VIDC_DBG, "Setting %d Subcaches\n", c);
rhdr.resource_handle = sc_res_info; /* cookie */
rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
@@ -3739,9 +3849,8 @@
sinfo->isset = true;
}
- dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
-
- device->res->sys_cache_enabled = true;
+ dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
+ device->res->sys_cache_res_set = true;
return 0;
@@ -3751,7 +3860,7 @@
return rc;
}
-static int __disable_subcaches(struct venus_hfi_device *device)
+static int __release_subcaches(struct venus_hfi_device *device)
{
struct subcache_info *sinfo;
int rc = 0;
@@ -3764,8 +3873,6 @@
if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
return 0;
- dprintk(VIDC_DBG, "Disabling Subcaches\n");
-
memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
@@ -3783,16 +3890,29 @@
}
if (c > 0) {
+ dprintk(VIDC_DBG, "Releasing %d subcaches\n", c);
rhdr.resource_handle = sc_res_info; /* cookie */
rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
rc = __core_release_resource(device, &rhdr);
if (rc)
- dprintk(VIDC_ERR, "Failed to release subcaches\n");
-
- dprintk(VIDC_DBG, "Release %d subcaches\n", c);
+ dprintk(VIDC_ERR,
+ "Failed to release %d subcaches\n", c);
}
+ device->res->sys_cache_res_set = false;
+
+ return rc;
+}
+
+static int __disable_subcaches(struct venus_hfi_device *device)
+{
+ struct subcache_info *sinfo;
+ int rc = 0;
+
+ if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
+ return 0;
+
/* De-activate subcaches */
venus_hfi_for_each_subcache_reverse(device, sinfo) {
if (sinfo->isactive == true) {
@@ -3808,8 +3928,6 @@
}
}
- device->res->sys_cache_enabled = false;
-
return rc;
}
@@ -3909,10 +4027,7 @@
return 0;
}
- dprintk(VIDC_PROF, "Entering power collapse\n");
-
- if (__disable_subcaches(device))
- dprintk(VIDC_ERR, "Failed to disable subcaches\n");
+ dprintk(VIDC_PROF, "Entering suspend\n");
if (device->res->pm_qos_latency_us &&
pm_qos_request_active(&device->qos))
@@ -3924,8 +4039,10 @@
goto err_tzbsp_suspend;
}
+ __disable_subcaches(device);
+
__venus_power_off(device);
- dprintk(VIDC_PROF, "Venus power collapsed\n");
+ dprintk(VIDC_PROF, "Venus power off\n");
return rc;
err_tzbsp_suspend:
@@ -3986,10 +4103,8 @@
__sys_set_debug(device, msm_vidc_fw_debug);
rc = __enable_subcaches(device);
- if (rc) {
- dprintk(VIDC_WARN,
- "Failed to enable subcaches, err = %d\n", rc);
- }
+ if (!rc)
+ __set_subcaches(device);
dprintk(VIDC_PROF, "Resumed from power collapse\n");
exit:
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 5601f1b..8e9e51f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -188,6 +188,12 @@
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E)
#define HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA \
(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F)
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_REMAPPING_INFO_SEI_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0020)
+#define HFI_PROPERTY_PARAM_VDEC_DOWN_SCALAR \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021)
+#define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022)
#define HFI_PROPERTY_CONFIG_VDEC_OX_START \
(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 47ce0ba..e854b43 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -125,6 +125,7 @@
HAL_EXTRADATA_PQ_INFO,
HAL_EXTRADATA_VUI_DISPLAY_INFO,
HAL_EXTRADATA_VPX_COLORSPACE,
+ HAL_EXTRADATA_UBWC_CR_STATS_INFO,
};
enum hal_property {
@@ -995,6 +996,11 @@
struct hal_buffer_requirements buffer[HAL_BUFFER_MAX];
};
+struct hal_conceal_color {
+ u32 conceal_color_8bit;
+ u32 conceal_color_10bit;
+};
+
union hal_get_property {
struct hal_frame_rate frame_rate;
struct hal_uncompressed_format_select format_select;
@@ -1044,6 +1050,7 @@
struct hal_buffer_alloc_mode buffer_alloc_mode;
struct buffer_requirements buf_req;
enum hal_h264_entropy h264_entropy;
+ struct hal_conceal_color conceal_color;
};
/* HAL Response */
@@ -1338,12 +1345,13 @@
int output_height, output_width;
int compression_ratio;
int complexity_factor;
+ int input_cr;
+ bool use_dpb_read;
unsigned int lcu_size;
enum msm_vidc_power_mode power_mode;
- struct imem_ab_table *imem_ab_tbl;
enum hal_work_mode work_mode;
- unsigned long bitrate;
- u32 imem_ab_tbl_size;
+ bool use_sys_cache;
+ bool b_frames_enabled;
};
struct vidc_clk_scale_data {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 616fc09..6a239b3 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -81,6 +81,7 @@
#define HFI_VIDEO_CODEC_VP8 0x00001000
#define HFI_VIDEO_CODEC_HEVC 0x00002000
#define HFI_VIDEO_CODEC_VP9 0x00004000
+#define HFI_VIDEO_CODEC_TME 0x00008000
#define HFI_PROFILE_UNKNOWN 0x00000000
#define HFI_H264_PROFILE_BASELINE 0x00000001
@@ -150,6 +151,13 @@
#define HFI_HEVC_TIER_MAIN 0x1
#define HFI_HEVC_TIER_HIGH0 0x2
+#define HFI_TME_PROFILE_DEFAULT 0x00000001
+#define HFI_TME_PROFILE_FRC 0x00000002
+#define HFI_TME_PROFILE_ASW 0x00000004
+#define HFI_TME_PROFILE_DFS_BOKEH 0x00000008
+
+#define HFI_TME_LEVEL_INTEGER 0x00000001
+
#define HFI_BUFFER_INPUT (HFI_COMMON_BASE + 0x1)
#define HFI_BUFFER_OUTPUT (HFI_COMMON_BASE + 0x2)
#define HFI_BUFFER_OUTPUT2 (HFI_COMMON_BASE + 0x3)
@@ -229,6 +237,8 @@
(HFI_PROPERTY_PARAM_COMMON_START + 0x011)
#define HFI_PROPERTY_PARAM_WORK_MODE \
(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
+#define HFI_PROPERTY_TME_VERSION_SUPPORTED \
+ (HFI_PROPERTY_PARAM_COMMON_START + 0x016)
#define HFI_PROPERTY_CONFIG_COMMON_START \
(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
@@ -339,7 +349,9 @@
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E)
#define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F)
-#define HFI_PROPERTY_CONFIG_VENC_SESSION_QP \
+#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \
+ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x010)
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \
(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
#define HFI_PROPERTY_PARAM_VPE_COMMON_START \
@@ -349,8 +361,6 @@
#define HFI_PROPERTY_CONFIG_VPE_COMMON_START \
(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
-#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \
- (HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
struct hfi_pic_struct {
u32 progressive_only;
@@ -478,7 +488,8 @@
};
struct hfi_conceal_color {
- u32 conceal_color;
+ u32 conceal_color_8bit;
+ u32 conceal_color_10bit;
};
struct hfi_intra_period {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 52dc794..1da2c94 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1099,10 +1099,10 @@
struct s5p_jpeg_ctx *ctx)
{
int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
- unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
- sof_len = 0;
- unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
- dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+ unsigned int height = 0, width = 0, word, subsampling = 0;
+ unsigned int sos = 0, sof = 0, sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 86cc70fe25..2d4b836 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1629,7 +1629,7 @@
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
if (!nomouse) {
- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ ictx->pad_mouse = !ictx->pad_mouse;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
spin_unlock_irqrestore(&ictx->kc_lock, flags);
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e6e5e90..b3659448 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -931,7 +931,7 @@
static int xc5000_set_tv_freq(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
- u16 pll_lock_status;
+ u16 pll_lock_status = 0;
int ret;
tune_channel:
@@ -1040,7 +1040,6 @@
return 0;
}
-
static int xc5000_set_params(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1133,7 +1132,7 @@
const struct xc5000_fw_cfg *desired_fw = xc5000_assign_firmware(priv->chip_id);
const struct firmware *fw;
int ret, i;
- u16 pll_lock_status;
+ u16 pll_lock_status = 0;
u16 fw_ck;
cancel_delayed_work(&priv->timer_sleep);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 283495c..aab8eee 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -320,7 +320,7 @@
static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
u8 index, u8 *wdata)
{
- int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ int ret = mxl111sf_ctrl_msg(state, wdata[0],
&wdata[1], 25, NULL, 0);
mxl_fail(ret);
@@ -330,7 +330,7 @@
static int mxl111sf_i2c_get_data(struct mxl111sf_state *state,
u8 index, u8 *wdata, u8 *rdata)
{
- int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ int ret = mxl111sf_ctrl_msg(state, wdata[0],
&wdata[1], 25, rdata, 24);
mxl_fail(ret);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 5d676b5..f1f4486 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -24,9 +24,6 @@
#include "lgdt3305.h"
#include "lg2160.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level "
@@ -56,27 +53,34 @@
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
+ struct dvb_usb_device *d = state->d;
int wo = (rbuf == NULL || rlen == 0); /* write-only */
int ret;
- u8 sndbuf[MAX_XFER_SIZE];
- if (1 + wlen > sizeof(sndbuf)) {
+ if (1 + wlen > MXL_MAX_XFER_SIZE) {
pr_warn("%s: len=%d is too big!\n", __func__, wlen);
return -EOPNOTSUPP;
}
pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
- memset(sndbuf, 0, 1+wlen);
+ mutex_lock(&state->msg_lock);
+ memset(state->sndbuf, 0, 1+wlen);
+ memset(state->rcvbuf, 0, rlen);
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ state->sndbuf[0] = cmd;
+ memcpy(&state->sndbuf[1], wbuf, wlen);
- ret = (wo) ? dvb_usbv2_generic_write(d, sndbuf, 1+wlen) :
- dvb_usbv2_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen);
+ ret = (wo) ? dvb_usbv2_generic_write(d, state->sndbuf, 1+wlen) :
+ dvb_usbv2_generic_rw(d, state->sndbuf, 1+wlen, state->rcvbuf,
+ rlen);
+
+ memcpy(rbuf, state->rcvbuf, rlen);
+ mutex_unlock(&state->msg_lock);
+
mxl_fail(ret);
return ret;
@@ -92,7 +96,7 @@
u8 buf[2];
int ret;
- ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_READ, &addr, 1, buf, 2);
+ ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_READ, &addr, 1, buf, 2);
if (mxl_fail(ret)) {
mxl_debug("error reading reg: 0x%02x", addr);
goto fail;
@@ -118,7 +122,7 @@
pr_debug("W: (0x%02x, 0x%02x)\n", addr, data);
- ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
+ ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
if (mxl_fail(ret))
pr_err("error writing reg: 0x%02x, val: 0x%02x", addr, data);
return ret;
@@ -922,6 +926,8 @@
static u8 eeprom[256];
struct i2c_client c;
+ mutex_init(&state->msg_lock);
+
ret = get_chip_info(state);
if (mxl_fail(ret))
pr_err("failed to get chip info during probe");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 846260e..3e6f588 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -19,6 +19,9 @@
#include <media/tveeprom.h>
#include <media/media-entity.h>
+/* Max transfer size done by I2C transfer functions */
+#define MXL_MAX_XFER_SIZE 64
+
#define MXL_EP1_REG_READ 1
#define MXL_EP2_REG_WRITE 2
#define MXL_EP3_INTERRUPT 3
@@ -86,6 +89,9 @@
struct mutex fe_lock;
u8 num_frontends;
struct mxl111sf_adap_state adap_state[3];
+ u8 sndbuf[MXL_MAX_XFER_SIZE];
+ u8 rcvbuf[MXL_MAX_XFER_SIZE];
+ struct mutex msg_lock;
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
struct media_entity tuner;
struct media_pad tuner_pads[2];
@@ -108,7 +114,7 @@
/* needed for hardware i2c functions in mxl111sf-i2c.c:
* mxl111sf_i2c_send_data / mxl111sf_i2c_get_data */
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen);
#define mxl_printk(kern, fmt, arg...) \
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd0c..7613d1f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -123,15 +123,10 @@
memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw);
- if (!eeprom) return -EINVAL;
+ if (!eeprom)
+ return -EINVAL;
- {
- struct i2c_client fake_client;
- /* Newer version expects a useless client interface */
- fake_client.addr = hdw->eeprom_addr;
- fake_client.adapter = &hdw->i2c_adap;
- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
- }
+ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index c2ee6e3..252ab99 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1992,6 +1992,9 @@
if (!found)
return -ENOENT;
+ if (ctrl->info.size < mapping->size)
+ return -EINVAL;
+
if (mutex_lock_interruptible(&chain->ctrl_mutex))
return -ERESTARTSYS;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202..9ccf7f5 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1239e68..71341a7 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -8,7 +8,7 @@
config MFD_CORE
tristate
select IRQ_DOMAIN
- default n
+ default y
config MFD_CS5535
tristate "AMD CS5535 and CS5536 southbridge core functions"
@@ -1636,61 +1636,19 @@
power supply enable or disable. This driver acts as interface
between codec and regulator framework.
-config WCD9XXX_CODEC_UTIL
- tristate "WCD9XXX Codec Utils"
- select MFD_CORE
- help
- WCD9XXX Util driver provides APIs for WCD drivers to reset,
- suspend/resume, regmap bus callback functions and read/write
- functions. This driver also hides the underlying bus related
- functionalities.
-
-config WCD9330_CODEC
- tristate "WCD9330 Codec"
- select SLIMBUS
- select MFD_CORE
- select WCD9XXX_CODEC_UTIL
- select MSM_CDC_SUPPLY
- select REGMAP_ALLOW_WRITE_DEBUGFS
- help
- Enables the WCD9xxx codec core driver. The core driver provides
- read/write capability to registers which are part of the
- WCD9330 core and gives the ability to use the WCD9330 codec.
- The WCD9330 codec support either I2C/I2S or Slimbus for
- control and data exchnage with master processor.
-
-config WCD9335_CODEC
- tristate "WCD9335 Codec"
+config WCD9XXX_CODEC_CORE
+ tristate "WCD9XXX Codec Core"
select SLIMBUS
select SOUNDWIRE_WCD_CTRL
- select MFD_CORE
- select WCD9XXX_CODEC_UTIL
- select MSM_CDC_SUPPLY
- select MSM_CDC_PINCTRL
- select REGMAP_ALLOW_WRITE_DEBUGFS
- help
- Enables the WCD9xxx codec core driver. The core driver provides
- read/write capability to registers which are part of the
- WCD9335 core and gives the ability to use the WCD9335 codec.
- The WCD9335 codec support either I2C/I2S or Slimbus for
- control and data exchnage with master processor.
-
-config WCD934X_CODEC
- tristate "WCD934X Codec"
- depends on SLIMBUS
- select SOUNDWIRE_WCD_CTRL
- select MFD_CORE
- select WCD9XXX_CODEC_UTIL
select MSM_CDC_SUPPLY
select MSM_CDC_PINCTRL
select REGMAP_ALLOW_WRITE_DEBUGFS
select PINCTRL_WCD
help
- Enables the WCD9xxx codec core driver. The core driver provides
- read/write capability to registers which are part of the
- WCD934X core and gives the ability to use the WCD934X codec.
- The WCD934X codec supports either I2C/I2S or Slimbus for
- control and data exchange with master processor.
+ WCD9XXX Core driver provides APIs for WCD drivers to reset,
+ suspend/resume, regmap bus callback functions and read/write
+ functions. This driver also hides the underlying bus related
+ functionalities.
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2fe74b..0ce70f3 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -205,16 +205,13 @@
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
-obj-$(CONFIG_MSM_CDC_PINCTRL) += msm-cdc-pinctrl.o
-obj-$(CONFIG_MSM_CDC_SUPPLY) += msm-cdc-supply.o
-obj-$(CONFIG_WCD9XXX_CODEC_UTIL) += wcd9xxx-utils.o
-obj-$(CONFIG_WCD9330_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
- wcd9330-regmap.o
-obj-$(CONFIG_WCD9335_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
- wcd9335-regmap.o wcd9335-tables.o
-obj-$(CONFIG_WCD934X_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\
- wcd934x-regmap.o wcd934x-tables.o
-
+wcd-core-objs := wcd9xxx-rst.o wcd9xxx-core-init.o \
+ wcd9xxx-core.o wcd9xxx-irq.o \
+ wcd9xxx-slimslave.o wcd9xxx-utils.o \
+ wcd934x-regmap.o wcd934x-tables.o \
+ wcd9335-regmap.o wcd9335-tables.o \
+ msm-cdc-pinctrl.o msm-cdc-supply.o
+obj-$(CONFIG_WCD9XXX_CODEC_CORE) += wcd-core.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/msm-cdc-pinctrl.c b/drivers/mfd/msm-cdc-pinctrl.c
index 9622256..859a75f 100644
--- a/drivers/mfd/msm-cdc-pinctrl.c
+++ b/drivers/mfd/msm-cdc-pinctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -239,7 +239,15 @@
.probe = msm_cdc_pinctrl_probe,
.remove = msm_cdc_pinctrl_remove,
};
-module_platform_driver(msm_cdc_pinctrl_driver);
+int msm_cdc_pinctrl_drv_init(void)
+{
+ return platform_driver_register(&msm_cdc_pinctrl_driver);
+}
+
+void msm_cdc_pinctrl_drv_exit(void)
+{
+ platform_driver_unregister(&msm_cdc_pinctrl_driver);
+}
MODULE_DESCRIPTION("MSM CODEC pin control platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 1aa74c4..9d167c9 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -377,8 +377,8 @@
* and use SDR Mode
*/
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else if (pdata->port_mode[i] ==
OMAP_EHCI_PORT_MODE_HSIC) {
/*
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 2280770..0977563 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -118,12 +118,23 @@
.fast_io = true,
};
+static const struct regmap_config spmi_regmap_can_sleep_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .fast_io = false,
+};
+
static int pmic_spmi_probe(struct spmi_device *sdev)
{
struct device_node *root = sdev->dev.of_node;
struct regmap *regmap;
- regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
+ if (of_property_read_bool(root, "qcom,can-sleep"))
+ regmap = devm_regmap_init_spmi_ext(sdev,
+ &spmi_regmap_can_sleep_config);
+ else
+ regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/mfd/wcd9330-regmap.c b/drivers/mfd/wcd9330-regmap.c
deleted file mode 100644
index 878ea72..0000000
--- a/drivers/mfd/wcd9330-regmap.c
+++ /dev/null
@@ -1,990 +0,0 @@
-/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wcd9xxx-regmap.h"
-
-static struct reg_default wcd9330_defaults[] = {
- { TOMTOM_A_CHIP_CTL, TOMTOM_A_CHIP_CTL__POR },
- { TOMTOM_A_CHIP_STATUS, TOMTOM_A_CHIP_STATUS__POR },
- { TOMTOM_A_CHIP_ID_BYTE_0, TOMTOM_A_CHIP_ID_BYTE_0__POR },
- { TOMTOM_A_CHIP_ID_BYTE_1, TOMTOM_A_CHIP_ID_BYTE_1__POR },
- { TOMTOM_A_CHIP_ID_BYTE_2, TOMTOM_A_CHIP_ID_BYTE_2__POR },
- { TOMTOM_A_CHIP_ID_BYTE_3, TOMTOM_A_CHIP_ID_BYTE_3__POR },
- { TOMTOM_A_CHIP_I2C_SLAVE_ID, TOMTOM_A_CHIP_I2C_SLAVE_ID__POR },
- { TOMTOM_A_SLAVE_ID_1, TOMTOM_A_SLAVE_ID_1__POR },
- { TOMTOM_A_SLAVE_ID_2, TOMTOM_A_SLAVE_ID_2__POR },
- { TOMTOM_A_SLAVE_ID_3, TOMTOM_A_SLAVE_ID_3__POR },
- { TOMTOM_A_PIN_CTL_OE0, TOMTOM_A_PIN_CTL_OE0__POR },
- { TOMTOM_A_PIN_CTL_OE1, TOMTOM_A_PIN_CTL_OE1__POR },
- { TOMTOM_A_PIN_CTL_OE2, TOMTOM_A_PIN_CTL_OE2__POR },
- { TOMTOM_A_PIN_CTL_DATA0, TOMTOM_A_PIN_CTL_DATA0__POR },
- { TOMTOM_A_PIN_CTL_DATA1, TOMTOM_A_PIN_CTL_DATA1__POR },
- { TOMTOM_A_PIN_CTL_DATA2, TOMTOM_A_PIN_CTL_DATA2__POR },
- { TOMTOM_A_HDRIVE_GENERIC, TOMTOM_A_HDRIVE_GENERIC__POR },
- { TOMTOM_A_HDRIVE_OVERRIDE, TOMTOM_A_HDRIVE_OVERRIDE__POR },
- { TOMTOM_A_ANA_CSR_WAIT_STATE, TOMTOM_A_ANA_CSR_WAIT_STATE__POR },
- { TOMTOM_A_PROCESS_MONITOR_CTL0, TOMTOM_A_PROCESS_MONITOR_CTL0__POR },
- { TOMTOM_A_PROCESS_MONITOR_CTL1, TOMTOM_A_PROCESS_MONITOR_CTL1__POR },
- { TOMTOM_A_PROCESS_MONITOR_CTL2, TOMTOM_A_PROCESS_MONITOR_CTL2__POR },
- { TOMTOM_A_PROCESS_MONITOR_CTL3, TOMTOM_A_PROCESS_MONITOR_CTL3__POR },
- { TOMTOM_A_QFUSE_CTL, TOMTOM_A_QFUSE_CTL__POR },
- { TOMTOM_A_QFUSE_STATUS, TOMTOM_A_QFUSE_STATUS__POR },
- { TOMTOM_A_QFUSE_DATA_OUT0, TOMTOM_A_QFUSE_DATA_OUT0__POR },
- { TOMTOM_A_QFUSE_DATA_OUT1, TOMTOM_A_QFUSE_DATA_OUT1__POR },
- { TOMTOM_A_QFUSE_DATA_OUT2, TOMTOM_A_QFUSE_DATA_OUT2__POR },
- { TOMTOM_A_QFUSE_DATA_OUT3, TOMTOM_A_QFUSE_DATA_OUT3__POR },
- { TOMTOM_A_QFUSE_DATA_OUT4, TOMTOM_A_QFUSE_DATA_OUT4__POR },
- { TOMTOM_A_QFUSE_DATA_OUT5, TOMTOM_A_QFUSE_DATA_OUT5__POR },
- { TOMTOM_A_QFUSE_DATA_OUT6, TOMTOM_A_QFUSE_DATA_OUT6__POR },
- { TOMTOM_A_QFUSE_DATA_OUT7, TOMTOM_A_QFUSE_DATA_OUT7__POR },
- { TOMTOM_A_CDC_CTL, TOMTOM_A_CDC_CTL__POR },
- { TOMTOM_A_LEAKAGE_CTL, TOMTOM_A_LEAKAGE_CTL__POR },
- { TOMTOM_A_SVASS_MEM_PTR0, TOMTOM_A_SVASS_MEM_PTR0__POR },
- { TOMTOM_A_SVASS_MEM_PTR1, TOMTOM_A_SVASS_MEM_PTR1__POR },
- { TOMTOM_A_SVASS_MEM_PTR2, TOMTOM_A_SVASS_MEM_PTR2__POR },
- { TOMTOM_A_SVASS_MEM_CTL, TOMTOM_A_SVASS_MEM_CTL__POR },
- { TOMTOM_A_SVASS_MEM_BANK, TOMTOM_A_SVASS_MEM_BANK__POR },
- { TOMTOM_A_DMIC_B1_CTL, TOMTOM_A_DMIC_B1_CTL__POR },
- { TOMTOM_A_DMIC_B2_CTL, TOMTOM_A_DMIC_B2_CTL__POR },
- { TOMTOM_A_SVASS_CLKRST_CTL, TOMTOM_A_SVASS_CLKRST_CTL__POR },
- { TOMTOM_A_SVASS_CPAR_CFG, TOMTOM_A_SVASS_CPAR_CFG__POR },
- { TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD,
- TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR },
- { TOMTOM_A_SVASS_CPAR_WDOG_CFG, TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR },
- { TOMTOM_A_SVASS_CFG, TOMTOM_A_SVASS_CFG__POR },
- { TOMTOM_A_SVASS_SPE_CFG, TOMTOM_A_SVASS_SPE_CFG__POR },
- { TOMTOM_A_SVASS_STATUS, TOMTOM_A_SVASS_STATUS__POR },
- { TOMTOM_A_SVASS_INT_MASK, TOMTOM_A_SVASS_INT_MASK__POR },
- { TOMTOM_A_SVASS_INT_STATUS, TOMTOM_A_SVASS_INT_STATUS__POR },
- { TOMTOM_A_SVASS_INT_CLR, TOMTOM_A_SVASS_INT_CLR__POR },
- { TOMTOM_A_SVASS_DEBUG, TOMTOM_A_SVASS_DEBUG__POR },
- { TOMTOM_A_SVASS_SPE_BKUP_INT, TOMTOM_A_SVASS_SPE_BKUP_INT__POR },
- { TOMTOM_A_SVASS_MEM_ACC, TOMTOM_A_SVASS_MEM_ACC__POR },
- { TOMTOM_A_MEM_LEAKAGE_CTL, TOMTOM_A_MEM_LEAKAGE_CTL__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_TRG, TOMTOM_A_SVASS_SPE_INBOX_TRG__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_0, TOMTOM_A_SVASS_SPE_INBOX_0__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_1, TOMTOM_A_SVASS_SPE_INBOX_1__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_2, TOMTOM_A_SVASS_SPE_INBOX_2__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_3, TOMTOM_A_SVASS_SPE_INBOX_3__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_4, TOMTOM_A_SVASS_SPE_INBOX_4__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_5, TOMTOM_A_SVASS_SPE_INBOX_5__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_6, TOMTOM_A_SVASS_SPE_INBOX_6__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_7, TOMTOM_A_SVASS_SPE_INBOX_7__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_8, TOMTOM_A_SVASS_SPE_INBOX_8__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_9, TOMTOM_A_SVASS_SPE_INBOX_9__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_10, TOMTOM_A_SVASS_SPE_INBOX_10__POR },
- { TOMTOM_A_SVASS_SPE_INBOX_11, TOMTOM_A_SVASS_SPE_INBOX_11__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_0, TOMTOM_A_SVASS_SPE_OUTBOX_0__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_1, TOMTOM_A_SVASS_SPE_OUTBOX_1__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_2, TOMTOM_A_SVASS_SPE_OUTBOX_2__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_3, TOMTOM_A_SVASS_SPE_OUTBOX_3__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_4, TOMTOM_A_SVASS_SPE_OUTBOX_4__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_5, TOMTOM_A_SVASS_SPE_OUTBOX_5__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_6, TOMTOM_A_SVASS_SPE_OUTBOX_6__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_7, TOMTOM_A_SVASS_SPE_OUTBOX_7__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_8, TOMTOM_A_SVASS_SPE_OUTBOX_8__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_9, TOMTOM_A_SVASS_SPE_OUTBOX_9__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_10, TOMTOM_A_SVASS_SPE_OUTBOX_10__POR },
- { TOMTOM_A_SVASS_SPE_OUTBOX_11, TOMTOM_A_SVASS_SPE_OUTBOX_11__POR },
- { TOMTOM_A_INTR_MODE, TOMTOM_A_INTR_MODE__POR },
- { TOMTOM_A_INTR1_MASK0, TOMTOM_A_INTR1_MASK0__POR },
- { TOMTOM_A_INTR1_MASK1, TOMTOM_A_INTR1_MASK1__POR },
- { TOMTOM_A_INTR1_MASK2, TOMTOM_A_INTR1_MASK2__POR },
- { TOMTOM_A_INTR1_MASK3, TOMTOM_A_INTR1_MASK3__POR },
- { TOMTOM_A_INTR1_STATUS0, TOMTOM_A_INTR1_STATUS0__POR },
- { TOMTOM_A_INTR1_STATUS1, TOMTOM_A_INTR1_STATUS1__POR },
- { TOMTOM_A_INTR1_STATUS2, TOMTOM_A_INTR1_STATUS2__POR },
- { TOMTOM_A_INTR1_STATUS3, TOMTOM_A_INTR1_STATUS3__POR },
- { TOMTOM_A_INTR1_CLEAR0, TOMTOM_A_INTR1_CLEAR0__POR },
- { TOMTOM_A_INTR1_CLEAR1, TOMTOM_A_INTR1_CLEAR1__POR },
- { TOMTOM_A_INTR1_CLEAR2, TOMTOM_A_INTR1_CLEAR2__POR },
- { TOMTOM_A_INTR1_CLEAR3, TOMTOM_A_INTR1_CLEAR3__POR },
- { TOMTOM_A_INTR1_LEVEL0, TOMTOM_A_INTR1_LEVEL0__POR },
- { TOMTOM_A_INTR1_LEVEL1, TOMTOM_A_INTR1_LEVEL1__POR },
- { TOMTOM_A_INTR1_LEVEL2, TOMTOM_A_INTR1_LEVEL2__POR },
- { TOMTOM_A_INTR1_LEVEL3, TOMTOM_A_INTR1_LEVEL3__POR },
- { TOMTOM_A_INTR1_TEST0, TOMTOM_A_INTR1_TEST0__POR },
- { TOMTOM_A_INTR1_TEST1, TOMTOM_A_INTR1_TEST1__POR },
- { TOMTOM_A_INTR1_TEST2, TOMTOM_A_INTR1_TEST2__POR },
- { TOMTOM_A_INTR1_TEST3, TOMTOM_A_INTR1_TEST3__POR },
- { TOMTOM_A_INTR1_SET0, TOMTOM_A_INTR1_SET0__POR },
- { TOMTOM_A_INTR1_SET1, TOMTOM_A_INTR1_SET1__POR },
- { TOMTOM_A_INTR1_SET2, TOMTOM_A_INTR1_SET2__POR },
- { TOMTOM_A_INTR1_SET3, TOMTOM_A_INTR1_SET3__POR },
- { TOMTOM_A_INTR2_MASK0, TOMTOM_A_INTR2_MASK0__POR },
- { TOMTOM_A_INTR2_STATUS0, TOMTOM_A_INTR2_STATUS0__POR },
- { TOMTOM_A_INTR2_CLEAR0, TOMTOM_A_INTR2_CLEAR0__POR },
- { TOMTOM_A_INTR2_LEVEL0, TOMTOM_A_INTR2_LEVEL0__POR },
- { TOMTOM_A_INTR2_TEST0, TOMTOM_A_INTR2_TEST0__POR },
- { TOMTOM_A_INTR2_SET0, TOMTOM_A_INTR2_SET0__POR },
- { TOMTOM_A_CDC_TX_I2S_SCK_MODE, TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR },
- { TOMTOM_A_CDC_TX_I2S_WS_MODE, TOMTOM_A_CDC_TX_I2S_WS_MODE__POR },
- { TOMTOM_A_CDC_DMIC_DATA0_MODE, TOMTOM_A_CDC_DMIC_DATA0_MODE__POR },
- { TOMTOM_A_CDC_DMIC_CLK0_MODE, TOMTOM_A_CDC_DMIC_CLK0_MODE__POR },
- { TOMTOM_A_CDC_DMIC_DATA1_MODE, TOMTOM_A_CDC_DMIC_DATA1_MODE__POR },
- { TOMTOM_A_CDC_DMIC_CLK1_MODE, TOMTOM_A_CDC_DMIC_CLK1_MODE__POR },
- { TOMTOM_A_CDC_RX_I2S_SCK_MODE, TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR },
- { TOMTOM_A_CDC_RX_I2S_WS_MODE, TOMTOM_A_CDC_RX_I2S_WS_MODE__POR },
- { TOMTOM_A_CDC_DMIC_DATA2_MODE, TOMTOM_A_CDC_DMIC_DATA2_MODE__POR },
- { TOMTOM_A_CDC_DMIC_CLK2_MODE, TOMTOM_A_CDC_DMIC_CLK2_MODE__POR },
- { TOMTOM_A_CDC_INTR1_MODE, TOMTOM_A_CDC_INTR1_MODE__POR },
- { TOMTOM_A_CDC_SB_NRZ_SEL_MODE, TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR },
- { TOMTOM_A_CDC_INTR2_MODE, TOMTOM_A_CDC_INTR2_MODE__POR },
- { TOMTOM_A_CDC_RF_PA_ON_MODE, TOMTOM_A_CDC_RF_PA_ON_MODE__POR },
- { TOMTOM_A_CDC_BOOST_MODE, TOMTOM_A_CDC_BOOST_MODE__POR },
- { TOMTOM_A_CDC_JTCK_MODE, TOMTOM_A_CDC_JTCK_MODE__POR },
- { TOMTOM_A_CDC_JTDI_MODE, TOMTOM_A_CDC_JTDI_MODE__POR },
- { TOMTOM_A_CDC_JTMS_MODE, TOMTOM_A_CDC_JTMS_MODE__POR },
- { TOMTOM_A_CDC_JTDO_MODE, TOMTOM_A_CDC_JTDO_MODE__POR },
- { TOMTOM_A_CDC_JTRST_MODE, TOMTOM_A_CDC_JTRST_MODE__POR },
- { TOMTOM_A_CDC_BIST_MODE_MODE, TOMTOM_A_CDC_BIST_MODE_MODE__POR },
- { TOMTOM_A_CDC_MAD_MAIN_CTL_1, TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR },
- { TOMTOM_A_CDC_MAD_MAIN_CTL_2, TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_1, TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_2, TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_3, TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_4, TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_5, TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_6, TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_7, TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_CTL_8, TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR,
- TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR },
- { TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL,
- TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_1, TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_2, TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_3, TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_4, TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_5, TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_6, TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR },
- { TOMTOM_A_CDC_MAD_ULTR_CTL_7, TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_1, TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_2, TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_3, TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_4, TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_5, TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_6, TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_7, TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR },
- { TOMTOM_A_CDC_MAD_BEACON_CTL_8, TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR },
- { TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR,
- TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR },
- { TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL,
- TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR },
- { TOMTOM_A_CDC_MAD_INP_SEL, TOMTOM_A_CDC_MAD_INP_SEL__POR },
- { TOMTOM_A_BIAS_REF_CTL, TOMTOM_A_BIAS_REF_CTL__POR },
- { TOMTOM_A_BIAS_CENTRAL_BG_CTL, TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR },
- { TOMTOM_A_BIAS_PRECHRG_CTL, TOMTOM_A_BIAS_PRECHRG_CTL__POR },
- { TOMTOM_A_BIAS_CURR_CTL_1, TOMTOM_A_BIAS_CURR_CTL_1__POR },
- { TOMTOM_A_BIAS_CURR_CTL_2, TOMTOM_A_BIAS_CURR_CTL_2__POR },
- { TOMTOM_A_BIAS_OSC_BG_CTL, TOMTOM_A_BIAS_OSC_BG_CTL__POR },
- { TOMTOM_A_CLK_BUFF_EN1, TOMTOM_A_CLK_BUFF_EN1__POR },
- { TOMTOM_A_CLK_BUFF_EN2, TOMTOM_A_CLK_BUFF_EN2__POR },
- { TOMTOM_A_LDO_L_MODE_1, TOMTOM_A_LDO_L_MODE_1__POR },
- { TOMTOM_A_LDO_L_MODE_2, TOMTOM_A_LDO_L_MODE_2__POR },
- { TOMTOM_A_LDO_L_CTRL_1, TOMTOM_A_LDO_L_CTRL_1__POR },
- { TOMTOM_A_LDO_L_CTRL_2, TOMTOM_A_LDO_L_CTRL_2__POR },
- { TOMTOM_A_LDO_L_CTRL_3, TOMTOM_A_LDO_L_CTRL_3__POR },
- { TOMTOM_A_LDO_L_CTRL_4, TOMTOM_A_LDO_L_CTRL_4__POR },
- { TOMTOM_A_LDO_H_MODE_1, TOMTOM_A_LDO_H_MODE_1__POR },
- { TOMTOM_A_LDO_H_MODE_2, TOMTOM_A_LDO_H_MODE_2__POR },
- { TOMTOM_A_LDO_H_LOOP_CTL, TOMTOM_A_LDO_H_LOOP_CTL__POR },
- { TOMTOM_A_LDO_H_COMP_1, TOMTOM_A_LDO_H_COMP_1__POR },
- { TOMTOM_A_LDO_H_COMP_2, TOMTOM_A_LDO_H_COMP_2__POR },
- { TOMTOM_A_LDO_H_BIAS_1, TOMTOM_A_LDO_H_BIAS_1__POR },
- { TOMTOM_A_LDO_H_BIAS_2, TOMTOM_A_LDO_H_BIAS_2__POR },
- { TOMTOM_A_LDO_H_BIAS_3, TOMTOM_A_LDO_H_BIAS_3__POR },
- { TOMTOM_A_VBAT_CLK, TOMTOM_A_VBAT_CLK__POR },
- { TOMTOM_A_VBAT_LOOP, TOMTOM_A_VBAT_LOOP__POR },
- { TOMTOM_A_VBAT_REF, TOMTOM_A_VBAT_REF__POR },
- { TOMTOM_A_VBAT_ADC_TEST, TOMTOM_A_VBAT_ADC_TEST__POR },
- { TOMTOM_A_VBAT_FE, TOMTOM_A_VBAT_FE__POR },
- { TOMTOM_A_VBAT_BIAS_1, TOMTOM_A_VBAT_BIAS_1__POR },
- { TOMTOM_A_VBAT_BIAS_2, TOMTOM_A_VBAT_BIAS_2__POR },
- { TOMTOM_A_VBAT_ADC_DATA_MSB, TOMTOM_A_VBAT_ADC_DATA_MSB__POR },
- { TOMTOM_A_VBAT_ADC_DATA_LSB, TOMTOM_A_VBAT_ADC_DATA_LSB__POR },
- { TOMTOM_A_FLL_NREF, TOMTOM_A_FLL_NREF__POR },
- { TOMTOM_A_FLL_KDCO_TUNE, TOMTOM_A_FLL_KDCO_TUNE__POR },
- { TOMTOM_A_FLL_LOCK_THRESH, TOMTOM_A_FLL_LOCK_THRESH__POR },
- { TOMTOM_A_FLL_LOCK_DET_COUNT, TOMTOM_A_FLL_LOCK_DET_COUNT__POR },
- { TOMTOM_A_FLL_DAC_THRESHOLD, TOMTOM_A_FLL_DAC_THRESHOLD__POR },
- { TOMTOM_A_FLL_TEST_DCO_FREERUN, TOMTOM_A_FLL_TEST_DCO_FREERUN__POR },
- { TOMTOM_A_FLL_TEST_ENABLE, TOMTOM_A_FLL_TEST_ENABLE__POR },
- { TOMTOM_A_MICB_CFILT_1_CTL, TOMTOM_A_MICB_CFILT_1_CTL__POR },
- { TOMTOM_A_MICB_CFILT_1_VAL, TOMTOM_A_MICB_CFILT_1_VAL__POR },
- { TOMTOM_A_MICB_CFILT_1_PRECHRG, TOMTOM_A_MICB_CFILT_1_PRECHRG__POR },
- { TOMTOM_A_MICB_1_CTL, TOMTOM_A_MICB_1_CTL__POR },
- { TOMTOM_A_MICB_1_INT_RBIAS, TOMTOM_A_MICB_1_INT_RBIAS__POR },
- { TOMTOM_A_MICB_1_MBHC, TOMTOM_A_MICB_1_MBHC__POR },
- { TOMTOM_A_MICB_CFILT_2_CTL, TOMTOM_A_MICB_CFILT_2_CTL__POR },
- { TOMTOM_A_MICB_CFILT_2_VAL, TOMTOM_A_MICB_CFILT_2_VAL__POR },
- { TOMTOM_A_MICB_CFILT_2_PRECHRG, TOMTOM_A_MICB_CFILT_2_PRECHRG__POR },
- { TOMTOM_A_MICB_2_CTL, TOMTOM_A_MICB_2_CTL__POR },
- { TOMTOM_A_MICB_2_INT_RBIAS, TOMTOM_A_MICB_2_INT_RBIAS__POR },
- { TOMTOM_A_MICB_2_MBHC, TOMTOM_A_MICB_2_MBHC__POR },
- { TOMTOM_A_MICB_CFILT_3_CTL, TOMTOM_A_MICB_CFILT_3_CTL__POR },
- { TOMTOM_A_MICB_CFILT_3_VAL, TOMTOM_A_MICB_CFILT_3_VAL__POR },
- { TOMTOM_A_MICB_CFILT_3_PRECHRG, TOMTOM_A_MICB_CFILT_3_PRECHRG__POR },
- { TOMTOM_A_MICB_3_CTL, TOMTOM_A_MICB_3_CTL__POR },
- { TOMTOM_A_MICB_3_INT_RBIAS, TOMTOM_A_MICB_3_INT_RBIAS__POR },
- { TOMTOM_A_MICB_3_MBHC, TOMTOM_A_MICB_3_MBHC__POR },
- { TOMTOM_A_MICB_4_CTL, TOMTOM_A_MICB_4_CTL__POR },
- { TOMTOM_A_MICB_4_INT_RBIAS, TOMTOM_A_MICB_4_INT_RBIAS__POR },
- { TOMTOM_A_MICB_4_MBHC, TOMTOM_A_MICB_4_MBHC__POR },
- { TOMTOM_A_SPKR_DRV2_EN, TOMTOM_A_SPKR_DRV2_EN__POR },
- { TOMTOM_A_SPKR_DRV2_GAIN, TOMTOM_A_SPKR_DRV2_GAIN__POR },
- { TOMTOM_A_SPKR_DRV2_DAC_CTL, TOMTOM_A_SPKR_DRV2_DAC_CTL__POR },
- { TOMTOM_A_SPKR_DRV2_OCP_CTL, TOMTOM_A_SPKR_DRV2_OCP_CTL__POR },
- { TOMTOM_A_SPKR_DRV2_CLIP_DET, TOMTOM_A_SPKR_DRV2_CLIP_DET__POR },
- { TOMTOM_A_SPKR_DRV2_DBG_DAC, TOMTOM_A_SPKR_DRV2_DBG_DAC__POR },
- { TOMTOM_A_SPKR_DRV2_DBG_PA, TOMTOM_A_SPKR_DRV2_DBG_PA__POR },
- { TOMTOM_A_SPKR_DRV2_DBG_PWRSTG, TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR },
- { TOMTOM_A_SPKR_DRV2_BIAS_LDO, TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR },
- { TOMTOM_A_SPKR_DRV2_BIAS_INT, TOMTOM_A_SPKR_DRV2_BIAS_INT__POR },
- { TOMTOM_A_SPKR_DRV2_BIAS_PA, TOMTOM_A_SPKR_DRV2_BIAS_PA__POR },
- { TOMTOM_A_SPKR_DRV2_STATUS_OCP, TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR },
- { TOMTOM_A_SPKR_DRV2_STATUS_PA, TOMTOM_A_SPKR_DRV2_STATUS_PA__POR },
- { TOMTOM_A_MBHC_INSERT_DETECT, TOMTOM_A_MBHC_INSERT_DETECT__POR },
- { TOMTOM_A_MBHC_INSERT_DET_STATUS,
- TOMTOM_A_MBHC_INSERT_DET_STATUS__POR },
- { TOMTOM_A_TX_COM_BIAS, TOMTOM_A_TX_COM_BIAS__POR },
- { TOMTOM_A_MBHC_INSERT_DETECT2, TOMTOM_A_MBHC_INSERT_DETECT2__POR },
- { TOMTOM_A_MBHC_SCALING_MUX_1, TOMTOM_A_MBHC_SCALING_MUX_1__POR },
- { TOMTOM_A_MBHC_SCALING_MUX_2, TOMTOM_A_MBHC_SCALING_MUX_2__POR },
- { TOMTOM_A_MAD_ANA_CTRL, TOMTOM_A_MAD_ANA_CTRL__POR },
- { TOMTOM_A_TX_SUP_SWITCH_CTRL_1, TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR },
- { TOMTOM_A_TX_SUP_SWITCH_CTRL_2, TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR },
- { TOMTOM_A_TX_1_GAIN, TOMTOM_A_TX_1_GAIN__POR },
- { TOMTOM_A_TX_1_2_TEST_EN, TOMTOM_A_TX_1_2_TEST_EN__POR },
- { TOMTOM_A_TX_2_GAIN, TOMTOM_A_TX_2_GAIN__POR },
- { TOMTOM_A_TX_1_2_ADC_IB, TOMTOM_A_TX_1_2_ADC_IB__POR },
- { TOMTOM_A_TX_1_2_ATEST_REFCTRL, TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR },
- { TOMTOM_A_TX_1_2_TEST_CTL, TOMTOM_A_TX_1_2_TEST_CTL__POR },
- { TOMTOM_A_TX_1_2_TEST_BLOCK_EN, TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR },
- { TOMTOM_A_TX_1_2_TXFE_CLKDIV, TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR },
- { TOMTOM_A_TX_1_2_SAR_ERR_CH1, TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR },
- { TOMTOM_A_TX_1_2_SAR_ERR_CH2, TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR },
- { TOMTOM_A_TX_3_GAIN, TOMTOM_A_TX_3_GAIN__POR },
- { TOMTOM_A_TX_3_4_TEST_EN, TOMTOM_A_TX_3_4_TEST_EN__POR },
- { TOMTOM_A_TX_4_GAIN, TOMTOM_A_TX_4_GAIN__POR },
- { TOMTOM_A_TX_3_4_ADC_IB, TOMTOM_A_TX_3_4_ADC_IB__POR },
- { TOMTOM_A_TX_3_4_ATEST_REFCTRL, TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR },
- { TOMTOM_A_TX_3_4_TEST_CTL, TOMTOM_A_TX_3_4_TEST_CTL__POR },
- { TOMTOM_A_TX_3_4_TEST_BLOCK_EN, TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR },
- { TOMTOM_A_TX_3_4_TXFE_CKDIV, TOMTOM_A_TX_3_4_TXFE_CKDIV__POR },
- { TOMTOM_A_TX_3_4_SAR_ERR_CH3, TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR },
- { TOMTOM_A_TX_3_4_SAR_ERR_CH4, TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR },
- { TOMTOM_A_TX_5_GAIN, TOMTOM_A_TX_5_GAIN__POR },
- { TOMTOM_A_TX_5_6_TEST_EN, TOMTOM_A_TX_5_6_TEST_EN__POR },
- { TOMTOM_A_TX_6_GAIN, TOMTOM_A_TX_6_GAIN__POR },
- { TOMTOM_A_TX_5_6_ADC_IB, TOMTOM_A_TX_5_6_ADC_IB__POR },
- { TOMTOM_A_TX_5_6_ATEST_REFCTRL, TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR },
- { TOMTOM_A_TX_5_6_TEST_CTL, TOMTOM_A_TX_5_6_TEST_CTL__POR },
- { TOMTOM_A_TX_5_6_TEST_BLOCK_EN, TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR },
- { TOMTOM_A_TX_5_6_TXFE_CKDIV, TOMTOM_A_TX_5_6_TXFE_CKDIV__POR },
- { TOMTOM_A_TX_5_6_SAR_ERR_CH5, TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR },
- { TOMTOM_A_TX_5_6_SAR_ERR_CH6, TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR },
- { TOMTOM_A_TX_7_MBHC_EN, TOMTOM_A_TX_7_MBHC_EN__POR },
- { TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL,
- TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR },
- { TOMTOM_A_TX_7_MBHC_ADC, TOMTOM_A_TX_7_MBHC_ADC__POR },
- { TOMTOM_A_TX_7_MBHC_TEST_CTL, TOMTOM_A_TX_7_MBHC_TEST_CTL__POR },
- { TOMTOM_A_TX_7_MBHC_SAR_ERR, TOMTOM_A_TX_7_MBHC_SAR_ERR__POR },
- { TOMTOM_A_TX_7_TXFE_CLKDIV, TOMTOM_A_TX_7_TXFE_CLKDIV__POR },
- { TOMTOM_A_RCO_CTRL, TOMTOM_A_RCO_CTRL__POR },
- { TOMTOM_A_RCO_CALIBRATION_CTRL1, TOMTOM_A_RCO_CALIBRATION_CTRL1__POR },
- { TOMTOM_A_RCO_CALIBRATION_CTRL2, TOMTOM_A_RCO_CALIBRATION_CTRL2__POR },
- { TOMTOM_A_RCO_CALIBRATION_CTRL3, TOMTOM_A_RCO_CALIBRATION_CTRL3__POR },
- { TOMTOM_A_RCO_TEST_CTRL, TOMTOM_A_RCO_TEST_CTRL__POR },
- { TOMTOM_A_RCO_CALIBRATION_RESULT1,
- TOMTOM_A_RCO_CALIBRATION_RESULT1__POR },
- { TOMTOM_A_RCO_CALIBRATION_RESULT2,
- TOMTOM_A_RCO_CALIBRATION_RESULT2__POR },
- { TOMTOM_A_BUCK_MODE_1, TOMTOM_A_BUCK_MODE_1__POR },
- { TOMTOM_A_BUCK_MODE_2, TOMTOM_A_BUCK_MODE_2__POR },
- { TOMTOM_A_BUCK_MODE_3, TOMTOM_A_BUCK_MODE_3__POR },
- { TOMTOM_A_BUCK_MODE_4, TOMTOM_A_BUCK_MODE_4__POR },
- { TOMTOM_A_BUCK_MODE_5, TOMTOM_A_BUCK_MODE_5__POR },
- { TOMTOM_A_BUCK_CTRL_VCL_1, TOMTOM_A_BUCK_CTRL_VCL_1__POR },
- { TOMTOM_A_BUCK_CTRL_VCL_2, TOMTOM_A_BUCK_CTRL_VCL_2__POR },
- { TOMTOM_A_BUCK_CTRL_VCL_3, TOMTOM_A_BUCK_CTRL_VCL_3__POR },
- { TOMTOM_A_BUCK_CTRL_CCL_1, TOMTOM_A_BUCK_CTRL_CCL_1__POR },
- { TOMTOM_A_BUCK_CTRL_CCL_2, TOMTOM_A_BUCK_CTRL_CCL_2__POR },
- { TOMTOM_A_BUCK_CTRL_CCL_3, TOMTOM_A_BUCK_CTRL_CCL_3__POR },
- { TOMTOM_A_BUCK_CTRL_CCL_4, TOMTOM_A_BUCK_CTRL_CCL_4__POR },
- { TOMTOM_A_BUCK_CTRL_PWM_DRVR_1, TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR },
- { TOMTOM_A_BUCK_CTRL_PWM_DRVR_2, TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR },
- { TOMTOM_A_BUCK_CTRL_PWM_DRVR_3, TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR },
- { TOMTOM_A_BUCK_TMUX_A_D, TOMTOM_A_BUCK_TMUX_A_D__POR },
- { TOMTOM_A_NCP_BUCKREF, TOMTOM_A_NCP_BUCKREF__POR },
- { TOMTOM_A_NCP_EN, TOMTOM_A_NCP_EN__POR },
- { TOMTOM_A_NCP_CLK, TOMTOM_A_NCP_CLK__POR },
- { TOMTOM_A_NCP_STATIC, TOMTOM_A_NCP_STATIC__POR },
- { TOMTOM_A_NCP_VTH_LOW, TOMTOM_A_NCP_VTH_LOW__POR },
- { TOMTOM_A_NCP_VTH_HIGH, TOMTOM_A_NCP_VTH_HIGH__POR },
- { TOMTOM_A_NCP_ATEST, TOMTOM_A_NCP_ATEST__POR },
- { TOMTOM_A_NCP_DTEST, TOMTOM_A_NCP_DTEST__POR },
- { TOMTOM_A_NCP_DLY1, TOMTOM_A_NCP_DLY1__POR },
- { TOMTOM_A_NCP_DLY2, TOMTOM_A_NCP_DLY2__POR },
- { TOMTOM_A_RX_AUX_SW_CTL, TOMTOM_A_RX_AUX_SW_CTL__POR },
- { TOMTOM_A_RX_PA_AUX_IN_CONN, TOMTOM_A_RX_PA_AUX_IN_CONN__POR },
- { TOMTOM_A_RX_COM_TIMER_DIV, TOMTOM_A_RX_COM_TIMER_DIV__POR },
- { TOMTOM_A_RX_COM_OCP_CTL, TOMTOM_A_RX_COM_OCP_CTL__POR },
- { TOMTOM_A_RX_COM_OCP_COUNT, TOMTOM_A_RX_COM_OCP_COUNT__POR },
- { TOMTOM_A_RX_COM_DAC_CTL, TOMTOM_A_RX_COM_DAC_CTL__POR },
- { TOMTOM_A_RX_COM_BIAS, TOMTOM_A_RX_COM_BIAS__POR },
- { TOMTOM_A_RX_HPH_AUTO_CHOP, TOMTOM_A_RX_HPH_AUTO_CHOP__POR },
- { TOMTOM_A_RX_HPH_CHOP_CTL, TOMTOM_A_RX_HPH_CHOP_CTL__POR },
- { TOMTOM_A_RX_HPH_BIAS_PA, TOMTOM_A_RX_HPH_BIAS_PA__POR },
- { TOMTOM_A_RX_HPH_BIAS_LDO, TOMTOM_A_RX_HPH_BIAS_LDO__POR },
- { TOMTOM_A_RX_HPH_BIAS_CNP, TOMTOM_A_RX_HPH_BIAS_CNP__POR },
- { TOMTOM_A_RX_HPH_BIAS_WG_OCP, TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR },
- { TOMTOM_A_RX_HPH_OCP_CTL, TOMTOM_A_RX_HPH_OCP_CTL__POR },
- { TOMTOM_A_RX_HPH_CNP_EN, TOMTOM_A_RX_HPH_CNP_EN__POR },
- { TOMTOM_A_RX_HPH_CNP_WG_CTL, TOMTOM_A_RX_HPH_CNP_WG_CTL__POR },
- { TOMTOM_A_RX_HPH_CNP_WG_TIME, TOMTOM_A_RX_HPH_CNP_WG_TIME__POR },
- { TOMTOM_A_RX_HPH_L_GAIN, TOMTOM_A_RX_HPH_L_GAIN__POR },
- { TOMTOM_A_RX_HPH_L_TEST, TOMTOM_A_RX_HPH_L_TEST__POR },
- { TOMTOM_A_RX_HPH_L_PA_CTL, TOMTOM_A_RX_HPH_L_PA_CTL__POR },
- { TOMTOM_A_RX_HPH_L_DAC_CTL, TOMTOM_A_RX_HPH_L_DAC_CTL__POR },
- { TOMTOM_A_RX_HPH_L_ATEST, TOMTOM_A_RX_HPH_L_ATEST__POR },
- { TOMTOM_A_RX_HPH_L_STATUS, TOMTOM_A_RX_HPH_L_STATUS__POR },
- { TOMTOM_A_RX_HPH_R_GAIN, TOMTOM_A_RX_HPH_R_GAIN__POR },
- { TOMTOM_A_RX_HPH_R_TEST, TOMTOM_A_RX_HPH_R_TEST__POR },
- { TOMTOM_A_RX_HPH_R_PA_CTL, TOMTOM_A_RX_HPH_R_PA_CTL__POR },
- { TOMTOM_A_RX_HPH_R_DAC_CTL, TOMTOM_A_RX_HPH_R_DAC_CTL__POR },
- { TOMTOM_A_RX_HPH_R_ATEST, TOMTOM_A_RX_HPH_R_ATEST__POR },
- { TOMTOM_A_RX_HPH_R_STATUS, TOMTOM_A_RX_HPH_R_STATUS__POR },
- { TOMTOM_A_RX_EAR_BIAS_PA, TOMTOM_A_RX_EAR_BIAS_PA__POR },
- { TOMTOM_A_RX_EAR_BIAS_CMBUFF, TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR },
- { TOMTOM_A_RX_EAR_EN, TOMTOM_A_RX_EAR_EN__POR },
- { TOMTOM_A_RX_EAR_GAIN, TOMTOM_A_RX_EAR_GAIN__POR },
- { TOMTOM_A_RX_EAR_CMBUFF, TOMTOM_A_RX_EAR_CMBUFF__POR },
- { TOMTOM_A_RX_EAR_ICTL, TOMTOM_A_RX_EAR_ICTL__POR },
- { TOMTOM_A_RX_EAR_CCOMP, TOMTOM_A_RX_EAR_CCOMP__POR },
- { TOMTOM_A_RX_EAR_VCM, TOMTOM_A_RX_EAR_VCM__POR },
- { TOMTOM_A_RX_EAR_CNP, TOMTOM_A_RX_EAR_CNP__POR },
- { TOMTOM_A_RX_EAR_DAC_CTL_ATEST, TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR },
- { TOMTOM_A_RX_EAR_STATUS, TOMTOM_A_RX_EAR_STATUS__POR },
- { TOMTOM_A_RX_LINE_BIAS_PA, TOMTOM_A_RX_LINE_BIAS_PA__POR },
- { TOMTOM_A_RX_BUCK_BIAS1, TOMTOM_A_RX_BUCK_BIAS1__POR },
- { TOMTOM_A_RX_BUCK_BIAS2, TOMTOM_A_RX_BUCK_BIAS2__POR },
- { TOMTOM_A_RX_LINE_COM, TOMTOM_A_RX_LINE_COM__POR },
- { TOMTOM_A_RX_LINE_CNP_EN, TOMTOM_A_RX_LINE_CNP_EN__POR },
- { TOMTOM_A_RX_LINE_CNP_WG_CTL, TOMTOM_A_RX_LINE_CNP_WG_CTL__POR },
- { TOMTOM_A_RX_LINE_CNP_WG_TIME, TOMTOM_A_RX_LINE_CNP_WG_TIME__POR },
- { TOMTOM_A_RX_LINE_1_GAIN, TOMTOM_A_RX_LINE_1_GAIN__POR },
- { TOMTOM_A_RX_LINE_1_TEST, TOMTOM_A_RX_LINE_1_TEST__POR },
- { TOMTOM_A_RX_LINE_1_DAC_CTL, TOMTOM_A_RX_LINE_1_DAC_CTL__POR },
- { TOMTOM_A_RX_LINE_1_STATUS, TOMTOM_A_RX_LINE_1_STATUS__POR },
- { TOMTOM_A_RX_LINE_2_GAIN, TOMTOM_A_RX_LINE_2_GAIN__POR },
- { TOMTOM_A_RX_LINE_2_TEST, TOMTOM_A_RX_LINE_2_TEST__POR },
- { TOMTOM_A_RX_LINE_2_DAC_CTL, TOMTOM_A_RX_LINE_2_DAC_CTL__POR },
- { TOMTOM_A_RX_LINE_2_STATUS, TOMTOM_A_RX_LINE_2_STATUS__POR },
- { TOMTOM_A_RX_LINE_3_GAIN, TOMTOM_A_RX_LINE_3_GAIN__POR },
- { TOMTOM_A_RX_LINE_3_TEST, TOMTOM_A_RX_LINE_3_TEST__POR },
- { TOMTOM_A_RX_LINE_3_DAC_CTL, TOMTOM_A_RX_LINE_3_DAC_CTL__POR },
- { TOMTOM_A_RX_LINE_3_STATUS, TOMTOM_A_RX_LINE_3_STATUS__POR },
- { TOMTOM_A_RX_LINE_4_GAIN, TOMTOM_A_RX_LINE_4_GAIN__POR },
- { TOMTOM_A_RX_LINE_4_TEST, TOMTOM_A_RX_LINE_4_TEST__POR },
- { TOMTOM_A_RX_LINE_4_DAC_CTL, TOMTOM_A_RX_LINE_4_DAC_CTL__POR },
- { TOMTOM_A_RX_LINE_4_STATUS, TOMTOM_A_RX_LINE_4_STATUS__POR },
- { TOMTOM_A_RX_LINE_CNP_DBG, TOMTOM_A_RX_LINE_CNP_DBG__POR },
- { TOMTOM_A_SPKR_DRV1_EN, TOMTOM_A_SPKR_DRV1_EN__POR },
- { TOMTOM_A_SPKR_DRV1_GAIN, TOMTOM_A_SPKR_DRV1_GAIN__POR },
- { TOMTOM_A_SPKR_DRV1_DAC_CTL, TOMTOM_A_SPKR_DRV1_DAC_CTL__POR },
- { TOMTOM_A_SPKR_DRV1_OCP_CTL, TOMTOM_A_SPKR_DRV1_OCP_CTL__POR },
- { TOMTOM_A_SPKR_DRV1_CLIP_DET, TOMTOM_A_SPKR_DRV1_CLIP_DET__POR },
- { TOMTOM_A_SPKR_DRV1_IEC, TOMTOM_A_SPKR_DRV1_IEC__POR },
- { TOMTOM_A_SPKR_DRV1_DBG_DAC, TOMTOM_A_SPKR_DRV1_DBG_DAC__POR },
- { TOMTOM_A_SPKR_DRV1_DBG_PA, TOMTOM_A_SPKR_DRV1_DBG_PA__POR },
- { TOMTOM_A_SPKR_DRV1_DBG_PWRSTG, TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR },
- { TOMTOM_A_SPKR_DRV1_BIAS_LDO, TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR },
- { TOMTOM_A_SPKR_DRV1_BIAS_INT, TOMTOM_A_SPKR_DRV1_BIAS_INT__POR },
- { TOMTOM_A_SPKR_DRV1_BIAS_PA, TOMTOM_A_SPKR_DRV1_BIAS_PA__POR },
- { TOMTOM_A_SPKR_DRV1_STATUS_OCP, TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR },
- { TOMTOM_A_SPKR_DRV1_STATUS_PA, TOMTOM_A_SPKR_DRV1_STATUS_PA__POR },
- { TOMTOM_A_SPKR1_PROT_EN, TOMTOM_A_SPKR1_PROT_EN__POR },
- { TOMTOM_A_SPKR1_PROT_ADC_TEST_EN,
- TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR },
- { TOMTOM_A_SPKR1_PROT_ATEST, TOMTOM_A_SPKR1_PROT_ATEST__POR },
- { TOMTOM_A_SPKR1_PROT_LDO_CTRL, TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR },
- { TOMTOM_A_SPKR1_PROT_ISENSE_CTRL,
- TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR },
- { TOMTOM_A_SPKR1_PROT_VSENSE_CTRL,
- TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR },
- { TOMTOM_A_SPKR2_PROT_EN, TOMTOM_A_SPKR2_PROT_EN__POR },
- { TOMTOM_A_SPKR2_PROT_ADC_TEST_EN,
- TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR },
- { TOMTOM_A_SPKR2_PROT_ATEST, TOMTOM_A_SPKR2_PROT_ATEST__POR },
- { TOMTOM_A_SPKR2_PROT_LDO_CTRL, TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR },
- { TOMTOM_A_SPKR2_PROT_ISENSE_CTRL,
- TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR },
- { TOMTOM_A_SPKR2_PROT_VSENSE_CTRL,
- TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR },
- { TOMTOM_A_MBHC_HPH, TOMTOM_A_MBHC_HPH__POR },
- { TOMTOM_A_CDC_ANC1_B1_CTL, TOMTOM_A_CDC_ANC1_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC2_B1_CTL, TOMTOM_A_CDC_ANC2_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC1_SHIFT, TOMTOM_A_CDC_ANC1_SHIFT__POR },
- { TOMTOM_A_CDC_ANC2_SHIFT, TOMTOM_A_CDC_ANC2_SHIFT__POR },
- { TOMTOM_A_CDC_ANC1_IIR_B1_CTL, TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC2_IIR_B1_CTL, TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC1_IIR_B2_CTL, TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR },
- { TOMTOM_A_CDC_ANC2_IIR_B2_CTL, TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR },
- { TOMTOM_A_CDC_ANC1_IIR_B3_CTL, TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR },
- { TOMTOM_A_CDC_ANC2_IIR_B3_CTL, TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR },
- { TOMTOM_A_CDC_ANC1_LPF_B1_CTL, TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC2_LPF_B1_CTL, TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR },
- { TOMTOM_A_CDC_ANC1_LPF_B2_CTL, TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR },
- { TOMTOM_A_CDC_ANC2_LPF_B2_CTL, TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR },
- { TOMTOM_A_CDC_ANC1_SPARE, TOMTOM_A_CDC_ANC1_SPARE__POR },
- { TOMTOM_A_CDC_ANC2_SPARE, TOMTOM_A_CDC_ANC2_SPARE__POR },
- { TOMTOM_A_CDC_ANC1_SMLPF_CTL, TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR },
- { TOMTOM_A_CDC_ANC2_SMLPF_CTL, TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR },
- { TOMTOM_A_CDC_ANC1_DCFLT_CTL, TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR },
- { TOMTOM_A_CDC_ANC2_DCFLT_CTL, TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR },
- { TOMTOM_A_CDC_ANC1_GAIN_CTL, TOMTOM_A_CDC_ANC1_GAIN_CTL__POR },
- { TOMTOM_A_CDC_ANC2_GAIN_CTL, TOMTOM_A_CDC_ANC2_GAIN_CTL__POR },
- { TOMTOM_A_CDC_ANC1_B2_CTL, TOMTOM_A_CDC_ANC1_B2_CTL__POR },
- { TOMTOM_A_CDC_ANC2_B2_CTL, TOMTOM_A_CDC_ANC2_B2_CTL__POR },
- { TOMTOM_A_CDC_TX1_VOL_CTL_TIMER, TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX2_VOL_CTL_TIMER, TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX3_VOL_CTL_TIMER, TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX4_VOL_CTL_TIMER, TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX5_VOL_CTL_TIMER, TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX6_VOL_CTL_TIMER, TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX7_VOL_CTL_TIMER, TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX8_VOL_CTL_TIMER, TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX9_VOL_CTL_TIMER, TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX10_VOL_CTL_TIMER,
- TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR },
- { TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR },
- { TOMTOM_A_CDC_TX1_VOL_CTL_CFG, TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX2_VOL_CTL_CFG, TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX3_VOL_CTL_CFG, TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX4_VOL_CTL_CFG, TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX5_VOL_CTL_CFG, TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX6_VOL_CTL_CFG, TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX7_VOL_CTL_CFG, TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX8_VOL_CTL_CFG, TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX9_VOL_CTL_CFG, TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX10_VOL_CTL_CFG, TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR },
- { TOMTOM_A_CDC_TX1_MUX_CTL, TOMTOM_A_CDC_TX1_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX2_MUX_CTL, TOMTOM_A_CDC_TX2_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX3_MUX_CTL, TOMTOM_A_CDC_TX3_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX4_MUX_CTL, TOMTOM_A_CDC_TX4_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX5_MUX_CTL, TOMTOM_A_CDC_TX5_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX6_MUX_CTL, TOMTOM_A_CDC_TX6_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX7_MUX_CTL, TOMTOM_A_CDC_TX7_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX8_MUX_CTL, TOMTOM_A_CDC_TX8_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX9_MUX_CTL, TOMTOM_A_CDC_TX9_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX10_MUX_CTL, TOMTOM_A_CDC_TX10_MUX_CTL__POR },
- { TOMTOM_A_CDC_TX1_CLK_FS_CTL, TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX2_CLK_FS_CTL, TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX3_CLK_FS_CTL, TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX4_CLK_FS_CTL, TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX5_CLK_FS_CTL, TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX6_CLK_FS_CTL, TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX7_CLK_FS_CTL, TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX8_CLK_FS_CTL, TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX9_CLK_FS_CTL, TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX10_CLK_FS_CTL, TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR },
- { TOMTOM_A_CDC_TX1_DMIC_CTL, TOMTOM_A_CDC_TX1_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX2_DMIC_CTL, TOMTOM_A_CDC_TX2_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX3_DMIC_CTL, TOMTOM_A_CDC_TX3_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX4_DMIC_CTL, TOMTOM_A_CDC_TX4_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX5_DMIC_CTL, TOMTOM_A_CDC_TX5_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX6_DMIC_CTL, TOMTOM_A_CDC_TX6_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX7_DMIC_CTL, TOMTOM_A_CDC_TX7_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX8_DMIC_CTL, TOMTOM_A_CDC_TX8_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX9_DMIC_CTL, TOMTOM_A_CDC_TX9_DMIC_CTL__POR },
- { TOMTOM_A_CDC_TX10_DMIC_CTL, TOMTOM_A_CDC_TX10_DMIC_CTL__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL0, TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL1, TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL2, TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL3, TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL4, TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL5, TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL6, TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_VAL7, TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR },
- { TOMTOM_A_CDC_DEBUG_B1_CTL, TOMTOM_A_CDC_DEBUG_B1_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B2_CTL, TOMTOM_A_CDC_DEBUG_B2_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B3_CTL, TOMTOM_A_CDC_DEBUG_B3_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B4_CTL, TOMTOM_A_CDC_DEBUG_B4_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B5_CTL, TOMTOM_A_CDC_DEBUG_B5_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B6_CTL, TOMTOM_A_CDC_DEBUG_B6_CTL__POR },
- { TOMTOM_A_CDC_DEBUG_B7_CTL, TOMTOM_A_CDC_DEBUG_B7_CTL__POR },
- { TOMTOM_A_CDC_SRC1_PDA_CFG, TOMTOM_A_CDC_SRC1_PDA_CFG__POR },
- { TOMTOM_A_CDC_SRC2_PDA_CFG, TOMTOM_A_CDC_SRC2_PDA_CFG__POR },
- { TOMTOM_A_CDC_SRC1_FS_CTL, TOMTOM_A_CDC_SRC1_FS_CTL__POR },
- { TOMTOM_A_CDC_SRC2_FS_CTL, TOMTOM_A_CDC_SRC2_FS_CTL__POR },
- { TOMTOM_A_CDC_RX1_B1_CTL, TOMTOM_A_CDC_RX1_B1_CTL__POR },
- { TOMTOM_A_CDC_RX2_B1_CTL, TOMTOM_A_CDC_RX2_B1_CTL__POR },
- { TOMTOM_A_CDC_RX3_B1_CTL, TOMTOM_A_CDC_RX3_B1_CTL__POR },
- { TOMTOM_A_CDC_RX4_B1_CTL, TOMTOM_A_CDC_RX4_B1_CTL__POR },
- { TOMTOM_A_CDC_RX5_B1_CTL, TOMTOM_A_CDC_RX5_B1_CTL__POR },
- { TOMTOM_A_CDC_RX6_B1_CTL, TOMTOM_A_CDC_RX6_B1_CTL__POR },
- { TOMTOM_A_CDC_RX7_B1_CTL, TOMTOM_A_CDC_RX7_B1_CTL__POR },
- { TOMTOM_A_CDC_RX1_B2_CTL, TOMTOM_A_CDC_RX1_B2_CTL__POR },
- { TOMTOM_A_CDC_RX2_B2_CTL, TOMTOM_A_CDC_RX2_B2_CTL__POR },
- { TOMTOM_A_CDC_RX3_B2_CTL, TOMTOM_A_CDC_RX3_B2_CTL__POR },
- { TOMTOM_A_CDC_RX4_B2_CTL, TOMTOM_A_CDC_RX4_B2_CTL__POR },
- { TOMTOM_A_CDC_RX5_B2_CTL, TOMTOM_A_CDC_RX5_B2_CTL__POR },
- { TOMTOM_A_CDC_RX6_B2_CTL, TOMTOM_A_CDC_RX6_B2_CTL__POR },
- { TOMTOM_A_CDC_RX7_B2_CTL, TOMTOM_A_CDC_RX7_B2_CTL__POR },
- { TOMTOM_A_CDC_RX1_B3_CTL, TOMTOM_A_CDC_RX1_B3_CTL__POR },
- { TOMTOM_A_CDC_RX2_B3_CTL, TOMTOM_A_CDC_RX2_B3_CTL__POR },
- { TOMTOM_A_CDC_RX3_B3_CTL, TOMTOM_A_CDC_RX3_B3_CTL__POR },
- { TOMTOM_A_CDC_RX4_B3_CTL, TOMTOM_A_CDC_RX4_B3_CTL__POR },
- { TOMTOM_A_CDC_RX5_B3_CTL, TOMTOM_A_CDC_RX5_B3_CTL__POR },
- { TOMTOM_A_CDC_RX6_B3_CTL, TOMTOM_A_CDC_RX6_B3_CTL__POR },
- { TOMTOM_A_CDC_RX7_B3_CTL, TOMTOM_A_CDC_RX7_B3_CTL__POR },
- { TOMTOM_A_CDC_RX1_B4_CTL, TOMTOM_A_CDC_RX1_B4_CTL__POR },
- { TOMTOM_A_CDC_RX2_B4_CTL, TOMTOM_A_CDC_RX2_B4_CTL__POR },
- { TOMTOM_A_CDC_RX3_B4_CTL, TOMTOM_A_CDC_RX3_B4_CTL__POR },
- { TOMTOM_A_CDC_RX4_B4_CTL, TOMTOM_A_CDC_RX4_B4_CTL__POR },
- { TOMTOM_A_CDC_RX5_B4_CTL, TOMTOM_A_CDC_RX5_B4_CTL__POR },
- { TOMTOM_A_CDC_RX6_B4_CTL, TOMTOM_A_CDC_RX6_B4_CTL__POR },
- { TOMTOM_A_CDC_RX7_B4_CTL, TOMTOM_A_CDC_RX7_B4_CTL__POR },
- { TOMTOM_A_CDC_RX1_B5_CTL, TOMTOM_A_CDC_RX1_B5_CTL__POR },
- { TOMTOM_A_CDC_RX2_B5_CTL, TOMTOM_A_CDC_RX2_B5_CTL__POR },
- { TOMTOM_A_CDC_RX3_B5_CTL, TOMTOM_A_CDC_RX3_B5_CTL__POR },
- { TOMTOM_A_CDC_RX4_B5_CTL, TOMTOM_A_CDC_RX4_B5_CTL__POR },
- { TOMTOM_A_CDC_RX5_B5_CTL, TOMTOM_A_CDC_RX5_B5_CTL__POR },
- { TOMTOM_A_CDC_RX6_B5_CTL, TOMTOM_A_CDC_RX6_B5_CTL__POR },
- { TOMTOM_A_CDC_RX7_B5_CTL, TOMTOM_A_CDC_RX7_B5_CTL__POR },
- { TOMTOM_A_CDC_RX1_B6_CTL, TOMTOM_A_CDC_RX1_B6_CTL__POR },
- { TOMTOM_A_CDC_RX2_B6_CTL, TOMTOM_A_CDC_RX2_B6_CTL__POR },
- { TOMTOM_A_CDC_RX3_B6_CTL, TOMTOM_A_CDC_RX3_B6_CTL__POR },
- { TOMTOM_A_CDC_RX4_B6_CTL, TOMTOM_A_CDC_RX4_B6_CTL__POR },
- { TOMTOM_A_CDC_RX5_B6_CTL, TOMTOM_A_CDC_RX5_B6_CTL__POR },
- { TOMTOM_A_CDC_RX6_B6_CTL, TOMTOM_A_CDC_RX6_B6_CTL__POR },
- { TOMTOM_A_CDC_RX7_B6_CTL, TOMTOM_A_CDC_RX7_B6_CTL__POR },
- { TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_VBAT_CFG, TOMTOM_A_CDC_VBAT_CFG__POR },
- { TOMTOM_A_CDC_VBAT_ADC_CAL1, TOMTOM_A_CDC_VBAT_ADC_CAL1__POR },
- { TOMTOM_A_CDC_VBAT_ADC_CAL2, TOMTOM_A_CDC_VBAT_ADC_CAL2__POR },
- { TOMTOM_A_CDC_VBAT_ADC_CAL3, TOMTOM_A_CDC_VBAT_ADC_CAL3__POR },
- { TOMTOM_A_CDC_VBAT_PK_EST1, TOMTOM_A_CDC_VBAT_PK_EST1__POR },
- { TOMTOM_A_CDC_VBAT_PK_EST2, TOMTOM_A_CDC_VBAT_PK_EST2__POR },
- { TOMTOM_A_CDC_VBAT_PK_EST3, TOMTOM_A_CDC_VBAT_PK_EST3__POR },
- { TOMTOM_A_CDC_VBAT_RF_PROC1, TOMTOM_A_CDC_VBAT_RF_PROC1__POR },
- { TOMTOM_A_CDC_VBAT_RF_PROC2, TOMTOM_A_CDC_VBAT_RF_PROC2__POR },
- { TOMTOM_A_CDC_VBAT_TAC1, TOMTOM_A_CDC_VBAT_TAC1__POR },
- { TOMTOM_A_CDC_VBAT_TAC2, TOMTOM_A_CDC_VBAT_TAC2__POR },
- { TOMTOM_A_CDC_VBAT_TAC3, TOMTOM_A_CDC_VBAT_TAC3__POR },
- { TOMTOM_A_CDC_VBAT_TAC4, TOMTOM_A_CDC_VBAT_TAC4__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_UPD1, TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_UPD2, TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_UPD3, TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_UPD4, TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR },
- { TOMTOM_A_CDC_VBAT_DEBUG1, TOMTOM_A_CDC_VBAT_DEBUG1__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR },
- { TOMTOM_A_CDC_VBAT_GAIN_MON_VAL, TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR },
- { TOMTOM_A_CDC_CLK_ANC_RESET_CTL, TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR },
- { TOMTOM_A_CDC_CLK_RX_RESET_CTL, TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR },
- { TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL,
- TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR },
- { TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL,
- TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR },
- { TOMTOM_A_CDC_CLK_RX_I2S_CTL, TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR },
- { TOMTOM_A_CDC_CLK_TX_I2S_CTL, TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR },
- { TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL,
- TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR },
- { TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
- TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR },
- { TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL,
- TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR },
- { TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL,
- TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR },
- { TOMTOM_A_CDC_CLK_OTHR_CTL, TOMTOM_A_CDC_CLK_OTHR_CTL__POR },
- { TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL,
- TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR },
- { TOMTOM_A_CDC_CLK_RX_B1_CTL, TOMTOM_A_CDC_CLK_RX_B1_CTL__POR },
- { TOMTOM_A_CDC_CLK_RX_B2_CTL, TOMTOM_A_CDC_CLK_RX_B2_CTL__POR },
- { TOMTOM_A_CDC_CLK_MCLK_CTL, TOMTOM_A_CDC_CLK_MCLK_CTL__POR },
- { TOMTOM_A_CDC_CLK_PDM_CTL, TOMTOM_A_CDC_CLK_PDM_CTL__POR },
- { TOMTOM_A_CDC_CLK_SD_CTL, TOMTOM_A_CDC_CLK_SD_CTL__POR },
- { TOMTOM_A_CDC_CLSH_B1_CTL, TOMTOM_A_CDC_CLSH_B1_CTL__POR },
- { TOMTOM_A_CDC_CLSH_B2_CTL, TOMTOM_A_CDC_CLSH_B2_CTL__POR },
- { TOMTOM_A_CDC_CLSH_B3_CTL, TOMTOM_A_CDC_CLSH_B3_CTL__POR },
- { TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS,
- TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR },
- { TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD,
- TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR },
- { TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD,
- TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR },
- { TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD,
- TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR },
- { TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD,
- TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR },
- { TOMTOM_A_CDC_CLSH_K_ADDR, TOMTOM_A_CDC_CLSH_K_ADDR__POR },
- { TOMTOM_A_CDC_CLSH_K_DATA, TOMTOM_A_CDC_CLSH_K_DATA__POR },
- { TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L,
- TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR },
- { TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U,
- TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR },
- { TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L,
- TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR },
- { TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U,
- TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR },
- { TOMTOM_A_CDC_CLSH_V_PA_HD_EAR, TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR },
- { TOMTOM_A_CDC_CLSH_V_PA_HD_HPH, TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR },
- { TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR, TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR },
- { TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH, TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B5_CTL, TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B5_CTL, TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B6_CTL, TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B6_CTL, TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B7_CTL, TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B7_CTL, TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_B8_CTL, TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_B8_CTL, TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR },
- { TOMTOM_A_CDC_IIR1_CTL, TOMTOM_A_CDC_IIR1_CTL__POR },
- { TOMTOM_A_CDC_IIR2_CTL, TOMTOM_A_CDC_IIR2_CTL__POR },
- { TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL,
- TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR },
- { TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL,
- TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR },
- { TOMTOM_A_CDC_IIR1_COEF_B1_CTL, TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR },
- { TOMTOM_A_CDC_IIR2_COEF_B1_CTL, TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR },
- { TOMTOM_A_CDC_IIR1_COEF_B2_CTL, TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR },
- { TOMTOM_A_CDC_IIR2_COEF_B2_CTL, TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR },
- { TOMTOM_A_CDC_TOP_GAIN_UPDATE, TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR },
- { TOMTOM_A_CDC_PA_RAMP_B1_CTL, TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR },
- { TOMTOM_A_CDC_PA_RAMP_B2_CTL, TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR },
- { TOMTOM_A_CDC_PA_RAMP_B3_CTL, TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR },
- { TOMTOM_A_CDC_PA_RAMP_B4_CTL, TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR },
- { TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL,
- TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL,
- TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B1_CTL, TOMTOM_A_CDC_COMP0_B1_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B1_CTL, TOMTOM_A_CDC_COMP1_B1_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B1_CTL, TOMTOM_A_CDC_COMP2_B1_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B2_CTL, TOMTOM_A_CDC_COMP0_B2_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B2_CTL, TOMTOM_A_CDC_COMP1_B2_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B2_CTL, TOMTOM_A_CDC_COMP2_B2_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B3_CTL, TOMTOM_A_CDC_COMP0_B3_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B3_CTL, TOMTOM_A_CDC_COMP1_B3_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B3_CTL, TOMTOM_A_CDC_COMP2_B3_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B4_CTL, TOMTOM_A_CDC_COMP0_B4_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B4_CTL, TOMTOM_A_CDC_COMP1_B4_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B4_CTL, TOMTOM_A_CDC_COMP2_B4_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B5_CTL, TOMTOM_A_CDC_COMP0_B5_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B5_CTL, TOMTOM_A_CDC_COMP1_B5_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B5_CTL, TOMTOM_A_CDC_COMP2_B5_CTL__POR },
- { TOMTOM_A_CDC_COMP0_B6_CTL, TOMTOM_A_CDC_COMP0_B6_CTL__POR },
- { TOMTOM_A_CDC_COMP1_B6_CTL, TOMTOM_A_CDC_COMP1_B6_CTL__POR },
- { TOMTOM_A_CDC_COMP2_B6_CTL, TOMTOM_A_CDC_COMP2_B6_CTL__POR },
- { TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS,
- TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR },
- { TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS,
- TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR },
- { TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS,
- TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR },
- { TOMTOM_A_CDC_COMP0_FS_CFG, TOMTOM_A_CDC_COMP0_FS_CFG__POR },
- { TOMTOM_A_CDC_COMP1_FS_CFG, TOMTOM_A_CDC_COMP1_FS_CFG__POR },
- { TOMTOM_A_CDC_COMP2_FS_CFG, TOMTOM_A_CDC_COMP2_FS_CFG__POR },
- { TOMTOM_A_CDC_CONN_RX1_B1_CTL, TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX1_B2_CTL, TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX1_B3_CTL, TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX2_B1_CTL, TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX2_B2_CTL, TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX2_B3_CTL, TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX3_B1_CTL, TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX3_B2_CTL, TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX4_B1_CTL, TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX4_B2_CTL, TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX5_B1_CTL, TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX5_B2_CTL, TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX6_B1_CTL, TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX6_B2_CTL, TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX7_B1_CTL, TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX7_B2_CTL, TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX7_B3_CTL, TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_ANC_B1_CTL, TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_ANC_B2_CTL, TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_B1_CTL, TOMTOM_A_CDC_CONN_TX_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_B2_CTL, TOMTOM_A_CDC_CONN_TX_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_B3_CTL, TOMTOM_A_CDC_CONN_TX_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_B4_CTL, TOMTOM_A_CDC_CONN_TX_B4_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ1_B1_CTL, TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ1_B2_CTL, TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ1_B3_CTL, TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ1_B4_CTL, TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ2_B1_CTL, TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ2_B2_CTL, TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ2_B3_CTL, TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_EQ2_B4_CTL, TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR },
- { TOMTOM_A_CDC_CONN_SRC1_B1_CTL, TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_SRC1_B2_CTL, TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_SRC2_B1_CTL, TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_SRC2_B2_CTL, TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B10_CTL,
- TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR },
- { TOMTOM_A_CDC_CONN_TX_SB_B11_CTL,
- TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX_SB_B1_CTL, TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR },
- { TOMTOM_A_CDC_CONN_RX_SB_B2_CTL, TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR },
- { TOMTOM_A_CDC_CONN_CLSH_CTL, TOMTOM_A_CDC_CONN_CLSH_CTL__POR },
- { TOMTOM_A_CDC_CONN_MISC, TOMTOM_A_CDC_CONN_MISC__POR },
- { TOMTOM_A_CDC_CONN_RX8_B1_CTL, TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR },
- { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING,
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR },
- { TOMTOM_A_CDC_MBHC_EN_CTL, TOMTOM_A_CDC_MBHC_EN_CTL__POR },
- { TOMTOM_A_CDC_MBHC_FIR_B1_CFG, TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR },
- { TOMTOM_A_CDC_MBHC_FIR_B2_CFG, TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B1_CTL, TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B2_CTL, TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B3_CTL, TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B4_CTL, TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B5_CTL, TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR },
- { TOMTOM_A_CDC_MBHC_TIMER_B6_CTL, TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR },
- { TOMTOM_A_CDC_MBHC_B1_STATUS, TOMTOM_A_CDC_MBHC_B1_STATUS__POR },
- { TOMTOM_A_CDC_MBHC_B2_STATUS, TOMTOM_A_CDC_MBHC_B2_STATUS__POR },
- { TOMTOM_A_CDC_MBHC_B3_STATUS, TOMTOM_A_CDC_MBHC_B3_STATUS__POR },
- { TOMTOM_A_CDC_MBHC_B4_STATUS, TOMTOM_A_CDC_MBHC_B4_STATUS__POR },
- { TOMTOM_A_CDC_MBHC_B5_STATUS, TOMTOM_A_CDC_MBHC_B5_STATUS__POR },
- { TOMTOM_A_CDC_MBHC_B1_CTL, TOMTOM_A_CDC_MBHC_B1_CTL__POR },
- { TOMTOM_A_CDC_MBHC_B2_CTL, TOMTOM_A_CDC_MBHC_B2_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B1_CTL, TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B2_CTL, TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B3_CTL, TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B4_CTL, TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B5_CTL, TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B6_CTL, TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B7_CTL, TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B8_CTL, TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B9_CTL, TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B10_CTL, TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B11_CTL, TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR },
- { TOMTOM_A_CDC_MBHC_VOLT_B12_CTL, TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR },
- { TOMTOM_A_CDC_MBHC_CLK_CTL, TOMTOM_A_CDC_MBHC_CLK_CTL__POR },
- { TOMTOM_A_CDC_MBHC_INT_CTL, TOMTOM_A_CDC_MBHC_INT_CTL__POR },
- { TOMTOM_A_CDC_MBHC_DEBUG_CTL, TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR },
- { TOMTOM_A_CDC_MBHC_SPARE, TOMTOM_A_CDC_MBHC_SPARE__POR },
- { TOMTOM_A_CDC_RX8_B1_CTL, TOMTOM_A_CDC_RX8_B1_CTL__POR },
- { TOMTOM_A_CDC_RX8_B2_CTL, TOMTOM_A_CDC_RX8_B2_CTL__POR },
- { TOMTOM_A_CDC_RX8_B3_CTL, TOMTOM_A_CDC_RX8_B3_CTL__POR },
- { TOMTOM_A_CDC_RX8_B4_CTL, TOMTOM_A_CDC_RX8_B4_CTL__POR },
- { TOMTOM_A_CDC_RX8_B5_CTL, TOMTOM_A_CDC_RX8_B5_CTL__POR },
- { TOMTOM_A_CDC_RX8_B6_CTL, TOMTOM_A_CDC_RX8_B6_CTL__POR },
- { TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL,
- TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR },
- { TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR },
- { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7,
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR },
- { TOMTOM_A_CDC_BOOST_MODE_CTL, TOMTOM_A_CDC_BOOST_MODE_CTL__POR },
- { TOMTOM_A_CDC_BOOST_THRESHOLD, TOMTOM_A_CDC_BOOST_THRESHOLD__POR },
- { TOMTOM_A_CDC_BOOST_TAP_SEL, TOMTOM_A_CDC_BOOST_TAP_SEL__POR },
- { TOMTOM_A_CDC_BOOST_HOLD_TIME, TOMTOM_A_CDC_BOOST_HOLD_TIME__POR },
- { TOMTOM_A_CDC_BOOST_TRGR_EN, TOMTOM_A_CDC_BOOST_TRGR_EN__POR },
-};
-
-static bool wcd9330_is_readable_register(struct device *dev, unsigned int reg)
-{
- return tomtom_reg_readable[reg];
-}
-
-static bool tomtom_is_digital_gain_register(unsigned int reg)
-{
- bool rtn = false;
-
- switch (reg) {
- case TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL:
- case TOMTOM_A_CDC_TX1_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX2_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX3_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX4_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX5_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX6_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX7_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX8_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX9_VOL_CTL_GAIN:
- case TOMTOM_A_CDC_TX10_VOL_CTL_GAIN:
- rtn = true;
- break;
- default:
- break;
- }
- return rtn;
-}
-
-static bool wcd9330_is_volatile_register(struct device *dev, unsigned int reg)
-{
-
- if ((reg >= TOMTOM_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
- return true;
-
- /* IIR Coeff registers are not cacheable */
- if ((reg >= TOMTOM_A_CDC_IIR1_COEF_B1_CTL) &&
- (reg <= TOMTOM_A_CDC_IIR2_COEF_B2_CTL))
- return true;
-
- /* ANC filter registers are not cacheable */
- if ((reg >= TOMTOM_A_CDC_ANC1_IIR_B1_CTL) &&
- (reg <= TOMTOM_A_CDC_ANC1_LPF_B2_CTL))
- return true;
-
- if ((reg >= TOMTOM_A_CDC_ANC2_IIR_B1_CTL) &&
- (reg <= TOMTOM_A_CDC_ANC2_LPF_B2_CTL))
- return true;
-
- if (((reg >= TOMTOM_A_CDC_SPKR_CLIPDET_VAL0 &&
- reg <= TOMTOM_A_CDC_SPKR_CLIPDET_VAL7)) ||
- ((reg >= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0) &&
- (reg <= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7)))
- return true;
-
- /* Digital gain register is not cacheable so we have to write
- * the setting even it is the same
- */
- if (tomtom_is_digital_gain_register(reg))
- return true;
-
- switch (reg) {
- case TOMTOM_A_RX_HPH_L_STATUS:
- case TOMTOM_A_RX_HPH_R_STATUS:
- case TOMTOM_A_MBHC_INSERT_DET_STATUS:
- case TOMTOM_A_RX_HPH_CNP_EN:
- case TOMTOM_A_CDC_VBAT_GAIN_MON_VAL:
- case TOMTOM_A_CDC_MAD_MAIN_CTL_1:
- case TOMTOM_A_CDC_MAD_AUDIO_CTL_3:
- case TOMTOM_A_CDC_MAD_AUDIO_CTL_4:
- case TOMTOM_A_INTR_MODE:
- case TOMTOM_A_INTR2_MASK0:
- case TOMTOM_A_INTR2_STATUS0:
- case TOMTOM_A_INTR2_CLEAR0:
- case TOMTOM_SB_PGD_PORT_TX_BASE:
- case TOMTOM_SB_PGD_PORT_RX_BASE:
- case TOMTOM_A_CDC_ANC1_IIR_B1_CTL:
- case TOMTOM_A_CDC_ANC1_GAIN_CTL:
- case TOMTOM_A_SVASS_SPE_INBOX_TRG:
- return true;
- default:
- return false;
- }
-}
-
-struct regmap_config wcd9330_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .cache_type = REGCACHE_RBTREE,
- .reg_defaults = wcd9330_defaults,
- .num_reg_defaults = ARRAY_SIZE(wcd9330_defaults),
- .max_register = WCD9330_MAX_REGISTER,
- .volatile_reg = wcd9330_is_volatile_register,
- .readable_reg = wcd9330_is_readable_register,
-};
diff --git a/drivers/mfd/wcd9xxx-core-init.c b/drivers/mfd/wcd9xxx-core-init.c
new file mode 100644
index 0000000..7f93399
--- /dev/null
+++ b/drivers/mfd/wcd9xxx-core-init.c
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
+#include <linux/mfd/wcd9xxx/core.h>
+
+#define NUM_DRIVERS_REG_RET 3
+
+static int __init wcd9xxx_core_init(void)
+{
+ int ret[NUM_DRIVERS_REG_RET] = {0};
+ int i = 0;
+
+ ret[0] = msm_cdc_pinctrl_drv_init();
+ if (ret[0])
+ pr_err("%s: Failed init pinctrl drv: %d\n", __func__, ret[0]);
+
+ ret[1] = wcd9xxx_irq_drv_init();
+ if (ret[1])
+ pr_err("%s: Failed init irq drv: %d\n", __func__, ret[1]);
+
+ ret[2] = wcd9xxx_init();
+ if (ret[2])
+ pr_err("%s: Failed wcd core drv: %d\n", __func__, ret[2]);
+
+ for (i = 0; i < NUM_DRIVERS_REG_RET; i++) {
+ if (ret[i])
+ return ret[i];
+ }
+
+ return 0;
+}
+module_init(wcd9xxx_core_init);
+
+static void __exit wcd9xxx_core_exit(void)
+{
+ wcd9xxx_exit();
+ wcd9xxx_irq_drv_exit();
+ msm_cdc_pinctrl_drv_exit();
+}
+module_exit(wcd9xxx_core_exit);
+
+MODULE_DESCRIPTION("WCD9XXX CODEC core init driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index d143536..a12d5ca 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -228,7 +228,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No read allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No read allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -268,7 +268,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -345,7 +345,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
ret = 0;
goto done;
@@ -426,7 +426,7 @@
if (!wcd9xxx->dev_up) {
dev_dbg_ratelimited(
- wcd9xxx->dev, "%s: No write allowed. dev_up = %d\n",
+ wcd9xxx->dev, "%s: No write allowed. dev_up = %lu\n",
__func__, wcd9xxx->dev_up);
return 0;
}
@@ -1280,6 +1280,10 @@
ret = -EINVAL;
goto err_codec;
}
+
+ if (pdata->has_buck_vsel_gpio)
+ msm_cdc_pinctrl_select_active_state(pdata->buck_vsel_ctl_np);
+
device_id = slim_get_device_id(slim);
if (!device_id) {
dev_err(&slim->dev, "%s: Error, no device id\n", __func__);
@@ -1479,12 +1483,27 @@
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: device reset, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
- if (wcd9xxx->dev_up)
- return 0;
+ /*
+ * Wait for 500 ms for device down to complete. Observed delay
+ * of ~200ms for device down to complete after being called,
+ * due to context switch issue.
+ */
+ ret = wait_on_bit_timeout(&wcd9xxx->dev_up, 0,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(500));
+ if (ret)
+ pr_err("%s: slim device down not complete in 500 msec\n",
+ __func__);
mutex_lock(&wcd9xxx->reset_lock);
+
+ dev_info(wcd9xxx->dev, "%s: device reset, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
+ if (wcd9xxx->dev_up) {
+ mutex_unlock(&wcd9xxx->reset_lock);
+ return 0;
+ }
+
ret = wcd9xxx_reset(wcd9xxx->dev);
if (ret)
dev_err(wcd9xxx->dev, "%s: Resetting Codec failed\n", __func__);
@@ -1502,8 +1521,8 @@
pr_err("%s: wcd9xxx is NULL\n", __func__);
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: slim device up, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
+ dev_info(wcd9xxx->dev, "%s: slim device up, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
if (wcd9xxx->dev_up)
return 0;
@@ -1525,18 +1544,20 @@
return -EINVAL;
}
- dev_info(wcd9xxx->dev, "%s: device down, dev_up = %d\n",
- __func__, wcd9xxx->dev_up);
- if (!wcd9xxx->dev_up)
- return 0;
-
- wcd9xxx->dev_up = false;
-
mutex_lock(&wcd9xxx->reset_lock);
+
+ dev_info(wcd9xxx->dev, "%s: device down, dev_up = %lu\n",
+ __func__, wcd9xxx->dev_up);
+ if (!wcd9xxx->dev_up) {
+ mutex_unlock(&wcd9xxx->reset_lock);
+ return 0;
+ }
+
if (wcd9xxx->dev_down)
wcd9xxx->dev_down(wcd9xxx);
wcd9xxx_irq_exit(&wcd9xxx->core_res);
wcd9xxx_reset_low(wcd9xxx->dev);
+ wcd9xxx->dev_up = false;
mutex_unlock(&wcd9xxx->reset_lock);
return 0;
@@ -1665,7 +1686,7 @@
.remove = wcd9xxx_i2c_remove,
};
-static int __init wcd9xxx_init(void)
+int wcd9xxx_init(void)
{
int ret[NUM_WCD9XXX_REG_RET] = {0};
int i = 0;
@@ -1699,9 +1720,8 @@
return 0;
}
-module_init(wcd9xxx_init);
-static void __exit wcd9xxx_exit(void)
+void wcd9xxx_exit(void)
{
wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING);
@@ -1710,7 +1730,6 @@
i2c_del_driver(&wcd9335_i2c_driver);
slim_driver_unregister(&wcd_slim_driver);
}
-module_exit(wcd9xxx_exit);
MODULE_DESCRIPTION("Codec core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 0502e39d..092f446 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -406,30 +406,63 @@
return IRQ_NONE;
}
+/**
+ * wcd9xxx_free_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ * @data: data pointer
+ *
+ */
void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
int irq, void *data)
{
free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
}
+EXPORT_SYMBOL(wcd9xxx_free_irq);
+/**
+ * wcd9xxx_enable_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
if (wcd9xxx_res->irq)
enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
}
+EXPORT_SYMBOL(wcd9xxx_enable_irq);
+/**
+ * wcd9xxx_disable_irq
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
if (wcd9xxx_res->irq)
disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
}
+EXPORT_SYMBOL(wcd9xxx_disable_irq);
+/**
+ * wcd9xxx_disable_irq_sync
+ *
+ * @wcd9xxx_res: pointer to core resource
+ * irq: irq number
+ *
+ */
void wcd9xxx_disable_irq_sync(
struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
if (wcd9xxx_res->irq)
disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
}
+EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
static int wcd9xxx_irq_setup_downstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
@@ -470,6 +503,13 @@
return 0;
}
+/**
+ * wcd9xxx_irq_init
+ *
+ * @wcd9xxx_res: pointer to core resource
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
{
int i, ret;
@@ -568,6 +608,7 @@
mutex_destroy(&wcd9xxx_res->nested_irq_lock);
return ret;
}
+EXPORT_SYMBOL(wcd9xxx_irq_init);
int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
int irq, irq_handler_t handler,
@@ -580,6 +621,7 @@
return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
name, data);
}
+EXPORT_SYMBOL(wcd9xxx_request_irq);
void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
{
@@ -799,15 +841,13 @@
},
};
-static int wcd9xxx_irq_drv_init(void)
+int wcd9xxx_irq_drv_init(void)
{
return platform_driver_register(&wcd9xxx_irq_driver);
}
-subsys_initcall(wcd9xxx_irq_drv_init);
-static void wcd9xxx_irq_drv_exit(void)
+void wcd9xxx_irq_drv_exit(void)
{
platform_driver_unregister(&wcd9xxx_irq_driver);
}
-module_exit(wcd9xxx_irq_drv_exit);
#endif /* CONFIG_OF */
diff --git a/drivers/mfd/wcd9xxx-regmap.h b/drivers/mfd/wcd9xxx-regmap.h
index 6db8fc5..f44e8b1 100644
--- a/drivers/mfd/wcd9xxx-regmap.h
+++ b/drivers/mfd/wcd9xxx-regmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,42 +19,25 @@
typedef int (*regmap_patch_fptr)(struct regmap *, int);
-#ifdef CONFIG_WCD934X_CODEC
extern struct regmap_config wcd934x_regmap_config;
extern int wcd934x_regmap_register_patch(struct regmap *regmap,
int version);
-#endif
-#ifdef CONFIG_WCD9335_CODEC
extern struct regmap_config wcd9335_regmap_config;
extern int wcd9335_regmap_register_patch(struct regmap *regmap,
int version);
-#endif
-
-#ifdef CONFIG_WCD9330_CODEC
-extern struct regmap_config wcd9330_regmap_config;
-#endif
static inline struct regmap_config *wcd9xxx_get_regmap_config(int type)
{
struct regmap_config *regmap_config;
switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
case WCD934X:
regmap_config = &wcd934x_regmap_config;
break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
case WCD9335:
regmap_config = &wcd9335_regmap_config;
break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
- case WCD9330:
- regmap_config = &wcd9330_regmap_config;
- break;
-#endif
default:
regmap_config = NULL;
break;
@@ -68,16 +51,12 @@
regmap_patch_fptr apply_patch;
switch (type) {
-#ifdef CONFIG_WCD9335_CODEC
case WCD9335:
apply_patch = wcd9335_regmap_register_patch;
break;
-#endif
-#ifdef CONFIG_WCD934X_CODEC
case WCD934X:
apply_patch = wcd934x_regmap_register_patch;
break;
-#endif
default:
apply_patch = NULL;
break;
diff --git a/drivers/mfd/wcd9xxx-rst.c b/drivers/mfd/wcd9xxx-rst.c
new file mode 100644
index 0000000..c8e0b34
--- /dev/null
+++ b/drivers/mfd/wcd9xxx-rst.c
@@ -0,0 +1,443 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/mfd/wcd9xxx/pdata.h>
+#include <linux/mfd/wcd9xxx/core.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx-utils.h>
+#include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd9335/irq.h>
+#include <linux/mfd/wcd934x/irq.h>
+
+/* wcd9335 interrupt table */
+static const struct intr_data wcd9335_intr_table[] = {
+ {WCD9XXX_IRQ_SLIMBUS, false},
+ {WCD9335_IRQ_MBHC_SW_DET, true},
+ {WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true},
+ {WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+ {WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true},
+ {WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+ {WCD9335_IRQ_FLL_LOCK_LOSS, false},
+ {WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false},
+ {WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false},
+ {WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false},
+ {WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false},
+ {WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false},
+ {WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false},
+ {WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false},
+ {WCD9335_IRQ_HPH_PA_OCPL_FAULT, false},
+ {WCD9335_IRQ_HPH_PA_OCPR_FAULT, false},
+ {WCD9335_IRQ_EAR_PA_OCP_FAULT, false},
+ {WCD9335_IRQ_SOUNDWIRE, false},
+ {WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+ {WCD9335_IRQ_RCO_ERROR, false},
+ {WCD9335_IRQ_SVA_ERROR, false},
+ {WCD9335_IRQ_MAD_AUDIO, false},
+ {WCD9335_IRQ_MAD_BEACON, false},
+ {WCD9335_IRQ_SVA_OUTBOX1, true},
+ {WCD9335_IRQ_SVA_OUTBOX2, true},
+ {WCD9335_IRQ_MAD_ULTRASOUND, false},
+ {WCD9335_IRQ_VBAT_ATTACK, false},
+ {WCD9335_IRQ_VBAT_RESTORE, false},
+};
+
+static const struct intr_data wcd934x_intr_table[] = {
+ {WCD9XXX_IRQ_SLIMBUS, false},
+ {WCD934X_IRQ_MBHC_SW_DET, true},
+ {WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true},
+ {WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true},
+ {WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true},
+ {WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
+ {WCD934X_IRQ_MISC, false},
+ {WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false},
+ {WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false},
+ {WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false},
+ {WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false},
+ {WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false},
+ {WCD934X_IRQ_SLNQ_ANALOG_ERROR, false},
+ {WCD934X_IRQ_RESERVED_3, false},
+ {WCD934X_IRQ_HPH_PA_OCPL_FAULT, false},
+ {WCD934X_IRQ_HPH_PA_OCPR_FAULT, false},
+ {WCD934X_IRQ_EAR_PA_OCP_FAULT, false},
+ {WCD934X_IRQ_SOUNDWIRE, false},
+ {WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false},
+ {WCD934X_IRQ_RCO_ERROR, false},
+ {WCD934X_IRQ_CPE_ERROR, false},
+ {WCD934X_IRQ_MAD_AUDIO, false},
+ {WCD934X_IRQ_MAD_BEACON, false},
+ {WCD934X_IRQ_CPE1_INTR, true},
+ {WCD934X_IRQ_RESERVED_4, false},
+ {WCD934X_IRQ_MAD_ULTRASOUND, false},
+ {WCD934X_IRQ_VBAT_ATTACK, false},
+ {WCD934X_IRQ_VBAT_RESTORE, false},
+};
+
+/*
+ * wcd9335_bring_down: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_bring_down(struct wcd9xxx *wcd9xxx)
+{
+ if (!wcd9xxx || !wcd9xxx->regmap)
+ return -EINVAL;
+
+ regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x04);
+
+ return 0;
+}
+
+/*
+ * wcd9335_bring_up: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_bring_up(struct wcd9xxx *wcd9xxx)
+{
+ int ret = 0;
+ int val, byte0;
+ struct regmap *wcd_regmap;
+
+ if (!wcd9xxx)
+ return -EINVAL;
+
+ if (!wcd9xxx->regmap) {
+ dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+ __func__);
+ return -EINVAL;
+ }
+ wcd_regmap = wcd9xxx->regmap;
+
+ regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
+ regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
+
+ if ((val < 0) || (byte0 < 0)) {
+ dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n",
+ __func__);
+ return -EINVAL;
+ }
+ if ((val & 0x80) && (byte0 == 0x0)) {
+ dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n",
+ __func__);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x5);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x7);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x3);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+ } else if (byte0 == 0x1) {
+ dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n",
+ __func__);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
+ regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x5);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x7);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x3);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+ } else if ((byte0 == 0) && (!(val & 0x80))) {
+ dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n",
+ __func__);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
+ regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x3);
+ regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
+ } else {
+ dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * wcd9335_get_cdc_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd9335_get_cdc_info(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_codec_type *wcd_type)
+{
+ u16 id_minor, id_major;
+ struct regmap *wcd_regmap;
+ int rc, val, version = 0;
+
+ if (!wcd9xxx || !wcd_type)
+ return -EINVAL;
+
+ if (!wcd9xxx->regmap) {
+ dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+ __func__);
+ return -EINVAL;
+ }
+ wcd_regmap = wcd9xxx->regmap;
+
+ rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+ (u8 *)&id_minor, sizeof(u16));
+ if (rc)
+ return -EINVAL;
+
+ rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+ (u8 *)&id_major, sizeof(u16));
+ if (rc)
+ return -EINVAL;
+
+ dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+ __func__, id_major, id_minor);
+
+ /* Version detection */
+ if (id_major == TASHA_MAJOR) {
+ regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,
+ &val);
+ version = ((u8)val & 0x80) >> 7;
+ } else if (id_major == TASHA2P0_MAJOR)
+ version = 2;
+ else
+ dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n",
+ __func__, id_major, id_minor);
+
+ /* Fill codec type info */
+ wcd_type->id_major = id_major;
+ wcd_type->id_minor = id_minor;
+ wcd_type->num_irqs = WCD9335_NUM_IRQS;
+ wcd_type->version = version;
+ wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+ wcd_type->i2c_chip_status = 0x01;
+ wcd_type->intr_tbl = wcd9335_intr_table;
+ wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table);
+
+ wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+ WCD9335_INTR_PIN1_STATUS0;
+ wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+ WCD9335_INTR_PIN1_CLEAR0;
+ wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+ WCD9335_INTR_PIN1_MASK0;
+ wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+ WCD9335_INTR_LEVEL0;
+ wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+ WCD9335_INTR_CLR_COMMIT;
+
+ return rc;
+}
+
+/*
+ * wcd934x_bring_down: Bringdown WCD Codec
+ *
+ * @wcd9xxx: Pointer to wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_bring_down(struct wcd9xxx *wcd9xxx)
+{
+ if (!wcd9xxx || !wcd9xxx->regmap)
+ return -EINVAL;
+
+ regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
+ 0x04);
+
+ return 0;
+}
+
+/*
+ * wcd934x_bring_up: Bringup WCD Codec
+ *
+ * @wcd9xxx: Pointer to the wcd9xxx structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_bring_up(struct wcd9xxx *wcd9xxx)
+{
+ struct regmap *wcd_regmap;
+
+ if (!wcd9xxx)
+ return -EINVAL;
+
+ if (!wcd9xxx->regmap) {
+ dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
+ __func__);
+ return -EINVAL;
+ }
+ wcd_regmap = wcd9xxx->regmap;
+
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
+ regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
+ regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+ /* Add 1msec delay for VOUT to settle */
+ usleep_range(1000, 1100);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+ regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
+
+ return 0;
+}
+
+/*
+ * wcd934x_get_cdc_info: Get codec specific information
+ *
+ * @wcd9xxx: pointer to wcd9xxx structure
+ * @wcd_type: pointer to wcd9xxx_codec_type structure
+ *
+ * Returns 0 for success or negative error code for failure
+ */
+static int wcd934x_get_cdc_info(struct wcd9xxx *wcd9xxx,
+ struct wcd9xxx_codec_type *wcd_type)
+{
+ u16 id_minor, id_major;
+ struct regmap *wcd_regmap;
+ int rc, version = -1;
+
+ if (!wcd9xxx || !wcd_type)
+ return -EINVAL;
+
+ if (!wcd9xxx->regmap) {
+ dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__);
+ return -EINVAL;
+ }
+ wcd_regmap = wcd9xxx->regmap;
+
+ rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
+ (u8 *)&id_minor, sizeof(u16));
+ if (rc)
+ return -EINVAL;
+
+ rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
+ (u8 *)&id_major, sizeof(u16));
+ if (rc)
+ return -EINVAL;
+
+ dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
+ __func__, id_major, id_minor);
+
+ if (id_major != TAVIL_MAJOR)
+ goto version_unknown;
+
+ /*
+ * As fine version info cannot be retrieved before tavil probe.
+ * Assign coarse versions for possible future use before tavil probe.
+ */
+ if (id_minor == cpu_to_le16(0))
+ version = TAVIL_VERSION_1_0;
+ else if (id_minor == cpu_to_le16(0x01))
+ version = TAVIL_VERSION_1_1;
+
+version_unknown:
+ if (version < 0)
+ dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n",
+ __func__);
+
+ /* Fill codec type info */
+ wcd_type->id_major = id_major;
+ wcd_type->id_minor = id_minor;
+ wcd_type->num_irqs = WCD934X_NUM_IRQS;
+ wcd_type->version = version;
+ wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
+ wcd_type->i2c_chip_status = 0x01;
+ wcd_type->intr_tbl = wcd934x_intr_table;
+ wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table);
+
+ wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
+ WCD934X_INTR_PIN1_STATUS0;
+ wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
+ WCD934X_INTR_PIN1_CLEAR0;
+ wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
+ WCD934X_INTR_PIN1_MASK0;
+ wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
+ WCD934X_INTR_LEVEL0;
+ wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
+ WCD934X_INTR_CLR_COMMIT;
+
+ return rc;
+}
+
+codec_bringdown_fn wcd9xxx_bringdown_fn(int type)
+{
+ codec_bringdown_fn cdc_bdown_fn;
+
+ switch (type) {
+ case WCD934X:
+ cdc_bdown_fn = wcd934x_bring_down;
+ break;
+ case WCD9335:
+ cdc_bdown_fn = wcd9335_bring_down;
+ break;
+ default:
+ cdc_bdown_fn = NULL;
+ break;
+ }
+
+ return cdc_bdown_fn;
+}
+
+codec_bringup_fn wcd9xxx_bringup_fn(int type)
+{
+ codec_bringup_fn cdc_bup_fn;
+
+ switch (type) {
+ case WCD934X:
+ cdc_bup_fn = wcd934x_bring_up;
+ break;
+ case WCD9335:
+ cdc_bup_fn = wcd9335_bring_up;
+ break;
+ default:
+ cdc_bup_fn = NULL;
+ break;
+ }
+
+ return cdc_bup_fn;
+}
+
+codec_type_fn wcd9xxx_get_codec_info_fn(int type)
+{
+ codec_type_fn cdc_type_fn;
+
+ switch (type) {
+ case WCD934X:
+ cdc_type_fn = wcd934x_get_cdc_info;
+ break;
+ case WCD9335:
+ cdc_type_fn = wcd9335_get_cdc_info;
+ break;
+ default:
+ cdc_type_fn = NULL;
+ break;
+ }
+
+ return cdc_type_fn;
+}
+
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 8bf1404..a99ad5a 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -47,7 +47,18 @@
return 0;
}
-
+/**
+ * wcd9xxx_init_slimslave
+ *
+ * @wcd9xxx: pointer to wcd9xxx struct
+ * @wcd9xxx_pgd_la: pgd_la value
+ * @tx_num: tx number
+ * @rx_num: rx number
+ * @tx_slot: pointer to tx slot
+ * @rx_slot: pointer to rx slot
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
@@ -117,6 +128,7 @@
err:
return ret;
}
+EXPORT_SYMBOL(wcd9xxx_init_slimslave);
int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx)
{
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 8d3d4ad..f72585a 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -342,6 +342,19 @@
goto err_parse_dt_prop;
}
+ pdata->has_buck_vsel_gpio = of_property_read_bool(dev->of_node,
+ "qcom,has-buck-vsel-gpio");
+ if (pdata->has_buck_vsel_gpio) {
+ pdata->buck_vsel_ctl_np = of_parse_phandle(dev->of_node,
+ "qcom,buck-vsel-gpio-node", 0);
+ if (!pdata->buck_vsel_ctl_np) {
+ dev_err(dev, "%s No entry for %s property in node %s\n",
+ __func__, "qcom,buck-vsel-gpio-node",
+ dev->of_node->full_name);
+ goto err_parse_dt_prop;
+ }
+ }
+
if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-mclk-clk-rate",
&prop_val)))
pdata->mclk_rate = prop_val;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e203ba6..2e5da54 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -479,6 +479,15 @@
the genalloc API. It is supposed to be used for small on-chip SRAM
areas found on many SoCs.
+config HDCP_QSEECOM
+ tristate "QTI High-Bandwidth Digital Content Protection Module"
+ help
+ This module implements HDCP 2.2 features over external interfaces
+ such as the DisplayPort interface. It exposes APIs for the interface
+ driver to communicate with QTI Secure Execution Environment (QSEE)
+ via the QSEECOM Driver and also communicates with the Receiver via
+ APIs exposed by the interface driver.
+
config QSEECOM
tristate "QTI Secure Execution Communicator driver"
help
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e1c6ae1..cfea4a5 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,6 +49,7 @@
obj-$(CONFIG_SRAM) += sram.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
+obj-$(CONFIG_HDCP_QSEECOM) += hdcp.o
obj-$(CONFIG_QSEECOM) += qseecom.o
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301..3dc61ea 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@
duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL);
- if (!duramar2150_c2port_dev) {
- ret = -ENODEV;
+ if (IS_ERR(duramar2150_c2port_dev)) {
+ ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 65fed71..cc91f7b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -375,6 +375,7 @@
struct device *dev)
{
struct enclosure_component *cdev;
+ int err;
if (!edev || component >= edev->components)
return -EINVAL;
@@ -384,12 +385,17 @@
if (cdev->dev == dev)
return -EEXIST;
- if (cdev->dev)
+ if (cdev->dev) {
enclosure_remove_links(cdev);
-
- put_device(cdev->dev);
+ put_device(cdev->dev);
+ }
cdev->dev = get_device(dev);
- return enclosure_add_links(cdev);
+ err = enclosure_add_links(cdev);
+ if (err) {
+ put_device(cdev->dev);
+ cdev->dev = NULL;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
new file mode 100644
index 0000000..eab93cc
--- /dev/null
+++ b/drivers/misc/hdcp.c
@@ -0,0 +1,2549 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[hdcp-lib] %s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <video/msm_hdmi_hdcp_mgr.h>
+
+#include "qseecom_kernel.h"
+
+#define CLASS_NAME "hdcp"
+#define DRIVER_NAME "msm_hdcp"
+#define TZAPP_NAME "hdcp2p2"
+#define HDCP1_APP_NAME "hdcp1"
+#define QSEECOM_SBUFF_SIZE 0x1000
+
+#define MAX_TX_MESSAGE_SIZE 129
+#define MAX_RX_MESSAGE_SIZE 534
+#define MAX_TOPOLOGY_ELEMS 32
+#define HDCP1_AKSV_SIZE 8
+
+/* parameters related to LC_Init message */
+#define MESSAGE_ID_SIZE 1
+#define LC_INIT_MESSAGE_SIZE (MESSAGE_ID_SIZE+BITS_64_IN_BYTES)
+
+/* parameters related to SKE_Send_EKS message */
+#define SKE_SEND_EKS_MESSAGE_SIZE \
+ (MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES)
+
+/* all message IDs */
+#define INVALID_MESSAGE_ID 0
+#define AKE_INIT_MESSAGE_ID 2
+#define AKE_SEND_CERT_MESSAGE_ID 3
+#define AKE_NO_STORED_KM_MESSAGE_ID 4
+#define AKE_STORED_KM_MESSAGE_ID 5
+#define AKE_SEND_H_PRIME_MESSAGE_ID 7
+#define AKE_SEND_PAIRING_INFO_MESSAGE_ID 8
+#define LC_INIT_MESSAGE_ID 9
+#define LC_SEND_L_PRIME_MESSAGE_ID 10
+#define SKE_SEND_EKS_MESSAGE_ID 11
+#define REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID 12
+#define REPEATER_AUTH_SEND_ACK_MESSAGE_ID 15
+#define REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID 16
+#define REPEATER_AUTH_STREAM_READY_MESSAGE_ID 17
+#define SKE_SEND_TYPE_ID 18
+#define HDCP2P2_MAX_MESSAGES 19
+
+#define HDCP1_SET_KEY_MESSAGE_ID 202
+#define HDCP1_SET_ENC_MESSAGE_ID 205
+
+#define BITS_40_IN_BYTES 5
+#define BITS_64_IN_BYTES 8
+#define BITS_128_IN_BYTES 16
+#define RXCAPS_SIZE 3
+#define RXINFO_SIZE 2
+#define SEQ_NUM_V_SIZE 3
+
+#define RCVR_ID_SIZE BITS_40_IN_BYTES
+#define MAX_RCVR_IDS_ALLOWED_IN_LIST 31
+#define MAX_RCVR_ID_LIST_SIZE \
+ (RCVR_ID_SIZE * MAX_RCVR_IDS_ALLOWED_IN_LIST)
+/*
+ * Minimum wait as per standard is 200 ms. Keep it 220 ms
+ * to be on safe side.
+ */
+#define SLEEP_SET_HW_KEY_MS 220
+
+/* hdcp command status */
+#define HDCP_SUCCESS 0
+
+/* flags set by tz in response message */
+#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST 1
+
+#define HDCP_TXMTR_SERVICE_ID 0x0001000
+#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x)
+
+#define HDCP_TXMTR_INIT SERVICE_CREATE_CMD(1)
+#define HDCP_TXMTR_DEINIT SERVICE_CREATE_CMD(2)
+#define HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE SERVICE_CREATE_CMD(3)
+#define HDCP_TXMTR_SEND_MESSAGE_TIMEOUT SERVICE_CREATE_CMD(4)
+#define HDCP_TXMTR_SET_HW_KEY SERVICE_CREATE_CMD(5)
+#define HDCP_TXMTR_QUERY_STREAM_TYPE SERVICE_CREATE_CMD(6)
+#define HDCP_LIB_INIT SERVICE_CREATE_CMD(11)
+#define HDCP_LIB_DEINIT SERVICE_CREATE_CMD(12)
+#define HDCP_TXMTR_GET_VERSION SERVICE_CREATE_CMD(14)
+#define HDCP_TXMTR_VERIFY_KEY SERVICE_CREATE_CMD(15)
+#define HDCP_SESSION_INIT SERVICE_CREATE_CMD(16)
+#define HDCP_SESSION_DEINIT SERVICE_CREATE_CMD(17)
+#define HDCP_TXMTR_START_AUTHENTICATE SERVICE_CREATE_CMD(18)
+
+#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
+#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
+#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v) & 0xFF)
+
+#define HDCP_CLIENT_MAJOR_VERSION 2
+#define HDCP_CLIENT_MINOR_VERSION 1
+#define HDCP_CLIENT_PATCH_VERSION 0
+#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \
+ ((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF))
+
+#define REAUTH_REQ BIT(3)
+#define LINK_INTEGRITY_FAILURE BIT(4)
+
+#define HDCP_LIB_EXECUTE(x) {\
+ kthread_queue_work(&handle->worker, &handle->wk_##x);\
+}
+
+static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = {
+ [AKE_INIT_MESSAGE_ID] = { 2,
+ { {"rtx", 0x69000, 8}, {"TxCaps", 0x69008, 3} },
+ 0 },
+ [AKE_SEND_CERT_MESSAGE_ID] = { 3,
+ { {"cert-rx", 0x6900B, 522}, {"rrx", 0x69215, 8},
+ {"RxCaps", 0x6921D, 3} },
+ 0 },
+ [AKE_NO_STORED_KM_MESSAGE_ID] = { 1,
+ { {"Ekpub_km", 0x69220, 128} },
+ 0 },
+ [AKE_STORED_KM_MESSAGE_ID] = { 2,
+ { {"Ekh_km", 0x692A0, 16}, {"m", 0x692B0, 16} },
+ 0 },
+ [AKE_SEND_H_PRIME_MESSAGE_ID] = { 1,
+ { {"H'", 0x692C0, 32} },
+ (1 << 1) },
+ [AKE_SEND_PAIRING_INFO_MESSAGE_ID] = { 1,
+ { {"Ekh_km", 0x692E0, 16} },
+ (1 << 2) },
+ [LC_INIT_MESSAGE_ID] = { 1,
+ { {"rn", 0x692F0, 8} },
+ 0 },
+ [LC_SEND_L_PRIME_MESSAGE_ID] = { 1,
+ { {"L'", 0x692F8, 32} },
+ 0 },
+ [SKE_SEND_EKS_MESSAGE_ID] = { 2,
+ { {"Edkey_ks", 0x69318, 16}, {"riv", 0x69328, 8} },
+ 0 },
+ [SKE_SEND_TYPE_ID] = { 1,
+ { {"type", 0x69494, 1} },
+ 0 },
+ [REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID] = { 4,
+ { {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3},
+ {"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} },
+ (1 << 0) },
+ [REPEATER_AUTH_SEND_ACK_MESSAGE_ID] = { 1,
+ { {"V", 0x693E0, 16} },
+ 0 },
+ [REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID] = { 3,
+ { {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2},
+ {"streamID_Type", 0x693F5, 126} },
+ 0 },
+ [REPEATER_AUTH_STREAM_READY_MESSAGE_ID] = { 1,
+ { {"M'", 0x69473, 32} },
+ 0 }
+};
+
+enum hdcp_state {
+ HDCP_STATE_INIT = 0x00,
+ HDCP_STATE_APP_LOADED = 0x01,
+ HDCP_STATE_SESSION_INIT = 0x02,
+ HDCP_STATE_TXMTR_INIT = 0x04,
+ HDCP_STATE_AUTHENTICATED = 0x08,
+ HDCP_STATE_ERROR = 0x10
+};
+
+enum hdcp_element {
+ HDCP_TYPE_UNKNOWN,
+ HDCP_TYPE_RECEIVER,
+ HDCP_TYPE_REPEATER,
+};
+
+enum hdcp_version {
+ HDCP_VERSION_UNKNOWN,
+ HDCP_VERSION_2_2,
+ HDCP_VERSION_1_4
+};
+
+struct receiver_info {
+ unsigned char rcvrInfo[RCVR_ID_SIZE];
+ enum hdcp_element elem_type;
+ enum hdcp_version hdcp_version;
+};
+
+struct topology_info {
+ unsigned int nNumRcvrs;
+ struct receiver_info rcvinfo[MAX_TOPOLOGY_ELEMS];
+};
+
+struct __attribute__ ((__packed__)) hdcp1_key_set_req {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_key_set_rsp {
+ uint32_t commandid;
+ uint32_t ret;
+ uint8_t ksv[HDCP1_AKSV_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_version_req {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_version_rsp {
+ uint32_t commandid;
+ uint32_t commandId;
+ uint32_t appversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_verify_key_req {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_verify_key_rsp {
+ uint32_t status;
+ uint32_t commandId;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_req_v1 {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_rsp_v1 {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_req {
+ uint32_t commandid;
+ uint32_t clientversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_init_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t appversion;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_deinit_req {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_deinit_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_init_req {
+ uint32_t commandid;
+ uint32_t deviceid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_init_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_req {
+ uint32_t commandid;
+ uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_req_v1 {
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_rsp_v1 {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_req {
+ uint32_t commandid;
+ uint32_t sessionid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_tx_init_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_deinit_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_deinit_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_rcvd_msg_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint32_t msglen;
+ uint8_t msg[MAX_RX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_rcvd_msg_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t state;
+ uint32_t timeout;
+ uint32_t flag;
+ uint32_t msglen;
+ uint8_t msg[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_hw_key_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_hw_key_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_send_timeout_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_send_timeout_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_query_stream_type_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_query_stream_type_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t msg[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_stream_type_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint8_t streamtype;
+};
+
+struct __attribute__ ((__packed__)) hdcp_set_stream_type_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct __attribute__ ((__packed__)) hdcp_update_srm_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint32_t srmoffset;
+ uint32_t srmlength;
+};
+
+struct __attribute__ ((__packed__)) hdcp_update_srm_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
+struct __attribute__ ((__packed__)) hdcp_get_topology_req {
+ uint32_t commandid;
+ uint32_t ctxhandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_get_topology_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ struct topology_info topologyinfo;
+};
+
+struct __attribute__ ((__packed__)) rxvr_info_struct {
+ uint8_t rcvrCert[522];
+ uint8_t rrx[BITS_64_IN_BYTES];
+ uint8_t rxcaps[RXCAPS_SIZE];
+ bool repeater;
+};
+
+struct __attribute__ ((__packed__)) repeater_info_struct {
+ uint8_t RxInfo[RXINFO_SIZE];
+ uint8_t seq_num_V[SEQ_NUM_V_SIZE];
+ bool seq_num_V_Rollover_flag;
+ uint8_t ReceiverIDList[MAX_RCVR_ID_LIST_SIZE];
+ uint32_t ReceiverIDListLen;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_set_enc_req {
+ uint32_t commandid;
+ uint32_t enable;
+};
+
+struct __attribute__ ((__packed__)) hdcp1_set_enc_rsp {
+ uint32_t commandid;
+ uint32_t ret;
+};
+
+struct __attribute__ ((__packed__)) hdcp_start_auth_req {
+ uint32_t commandid;
+ uint32_t ctxHandle;
+};
+
+struct __attribute__ ((__packed__)) hdcp_start_auth_rsp {
+ uint32_t status;
+ uint32_t commandid;
+ uint32_t ctxhandle;
+ uint32_t timeout;
+ uint32_t msglen;
+ uint8_t message[MAX_TX_MESSAGE_SIZE];
+};
+
+struct hdcp_lib_handle {
+ unsigned char *listener_buf;
+ uint32_t msglen;
+ uint32_t tz_ctxhandle;
+ uint32_t hdcp_timeout;
+ uint32_t timeout_left;
+ uint32_t wait_timeout;
+ bool no_stored_km_flag;
+ bool feature_supported;
+ bool authenticated;
+ void *client_ctx;
+ struct hdcp_client_ops *client_ops;
+ struct mutex msg_lock;
+ struct mutex wakeup_mutex;
+ enum hdcp_state hdcp_state;
+ enum hdcp_lib_wakeup_cmd wakeup_cmd;
+ bool repeater_flag;
+ bool update_stream;
+ struct qseecom_handle *qseecom_handle;
+ int last_msg_sent;
+ int last_msg;
+ char *last_msg_recvd_buf;
+ uint32_t last_msg_recvd_len;
+ atomic_t hdcp_off;
+ uint32_t session_id;
+ enum hdcp_device_type device_type;
+
+ struct task_struct *thread;
+ struct completion poll_wait;
+
+ struct kthread_worker worker;
+ struct kthread_work wk_init;
+ struct kthread_work wk_msg_sent;
+ struct kthread_work wk_msg_recvd;
+ struct kthread_work wk_timeout;
+ struct kthread_work wk_clean;
+ struct kthread_work wk_wait;
+ struct kthread_work wk_stream;
+
+ int (*hdcp_app_init)(struct hdcp_lib_handle *handle);
+ int (*hdcp_txmtr_init)(struct hdcp_lib_handle *handle);
+};
+
+struct hdcp_lib_message_map {
+ int msg_id;
+ const char *msg_name;
+};
+
+struct msm_hdcp_mgr {
+ struct platform_device *pdev;
+ dev_t dev_num;
+ struct cdev cdev;
+ struct class *class;
+ struct device *device;
+ struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+ u32 tp_msgid;
+ void *client_ctx;
+ struct hdcp_lib_handle *handle;
+};
+
+static struct msm_hdcp_mgr *hdcp_drv_mgr;
+static struct hdcp_lib_handle *drv_client_handle;
+
+static void hdcp_lib_clean(struct hdcp_lib_handle *handle);
+static void hdcp_lib_init(struct hdcp_lib_handle *handle);
+static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle);
+static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle);
+static void hdcp_lib_timeout(struct hdcp_lib_handle *handle);
+static void hdcp_lib_stream(struct hdcp_lib_handle *handle);
+static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle);
+
+static struct qseecom_handle *hdcp1_handle;
+static bool hdcp1_supported = true;
+static bool hdcp1_enc_enabled;
+static struct mutex hdcp1_ta_cmd_lock;
+
+static const char *hdcp_lib_message_name(int msg_id)
+{
+ /*
+ * Message ID map. The first number indicates the message number
+ * assigned to the message by the HDCP 2.2 spec. This is also the first
+ * byte of every HDCP 2.2 authentication protocol message.
+ */
+ static struct hdcp_lib_message_map hdcp_lib_msg_map[] = {
+ {2, "AKE_INIT"},
+ {3, "AKE_SEND_CERT"},
+ {4, "AKE_NO_STORED_KM"},
+ {5, "AKE_STORED_KM"},
+ {7, "AKE_SEND_H_PRIME"},
+ {8, "AKE_SEND_PAIRING_INFO"},
+ {9, "LC_INIT"},
+ {10, "LC_SEND_L_PRIME"},
+ {11, "SKE_SEND_EKS"},
+ {12, "REPEATER_AUTH_SEND_RECEIVERID_LIST"},
+ {15, "REPEATER_AUTH_SEND_ACK"},
+ {16, "REPEATER_AUTH_STREAM_MANAGE"},
+ {17, "REPEATER_AUTH_STREAM_READY"},
+ {18, "SKE_SEND_TYPE_ID"},
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp_lib_msg_map); i++) {
+ if (msg_id == hdcp_lib_msg_map[i].msg_id)
+ return hdcp_lib_msg_map[i].msg_name;
+ }
+ return "UNKNOWN";
+}
+
+static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
+ struct hdcp_wakeup_data *data)
+{
+ switch (handle->last_msg) {
+ case INVALID_MESSAGE_ID:
+ return AKE_INIT_MESSAGE_ID;
+ case AKE_INIT_MESSAGE_ID:
+ return AKE_SEND_CERT_MESSAGE_ID;
+ case AKE_SEND_CERT_MESSAGE_ID:
+ if (handle->no_stored_km_flag)
+ return AKE_NO_STORED_KM_MESSAGE_ID;
+ else
+ return AKE_STORED_KM_MESSAGE_ID;
+ case AKE_STORED_KM_MESSAGE_ID:
+ case AKE_NO_STORED_KM_MESSAGE_ID:
+ return AKE_SEND_H_PRIME_MESSAGE_ID;
+ case AKE_SEND_H_PRIME_MESSAGE_ID:
+ if (handle->no_stored_km_flag)
+ return AKE_SEND_PAIRING_INFO_MESSAGE_ID;
+ else
+ return LC_INIT_MESSAGE_ID;
+ case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
+ return LC_INIT_MESSAGE_ID;
+ case LC_INIT_MESSAGE_ID:
+ return LC_SEND_L_PRIME_MESSAGE_ID;
+ case LC_SEND_L_PRIME_MESSAGE_ID:
+ return SKE_SEND_EKS_MESSAGE_ID;
+ case SKE_SEND_EKS_MESSAGE_ID:
+ if (!handle->repeater_flag)
+ return SKE_SEND_TYPE_ID;
+ case SKE_SEND_TYPE_ID:
+ case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
+ case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+ if (!handle->repeater_flag)
+ return INVALID_MESSAGE_ID;
+
+ if (data->cmd == HDCP_WKUP_CMD_SEND_MESSAGE)
+ return REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID;
+ else
+ return REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID;
+ case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
+ return REPEATER_AUTH_SEND_ACK_MESSAGE_ID;
+ case REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID:
+ return REPEATER_AUTH_STREAM_READY_MESSAGE_ID;
+ default:
+ pr_err("Uknown message ID (%d)", handle->last_msg);
+ return -EINVAL;
+ }
+}
+
+static void hdcp_lib_wait_for_response(struct hdcp_lib_handle *handle,
+ struct hdcp_wakeup_data *data)
+{
+ switch (handle->last_msg) {
+ case AKE_SEND_H_PRIME_MESSAGE_ID:
+ if (handle->no_stored_km_flag)
+ handle->wait_timeout = HZ;
+ else
+ handle->wait_timeout = HZ / 4;
+ break;
+ case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
+ handle->wait_timeout = HZ / 4;
+ break;
+ case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
+ if (!handle->authenticated)
+ handle->wait_timeout = HZ * 3;
+ else
+ handle->wait_timeout = 0;
+ break;
+ default:
+ handle->wait_timeout = 0;
+ }
+
+ if (handle->wait_timeout)
+ kthread_queue_work(&handle->worker, &handle->wk_wait);
+}
+
+static void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle,
+ struct hdcp_wakeup_data *data)
+{
+ int rc = 0, i;
+
+ if (!handle || !handle->client_ops || !handle->client_ops->wakeup ||
+ !data || (data->cmd == HDCP_WKUP_CMD_INVALID))
+ return;
+
+ data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
+
+ if (data->cmd == HDCP_WKUP_CMD_RECV_MESSAGE ||
+ data->cmd == HDCP_WKUP_CMD_LINK_POLL)
+ handle->last_msg = hdcp_lib_get_next_message(handle, data);
+
+ if (handle->last_msg != INVALID_MESSAGE_ID &&
+ data->cmd != HDCP_WKUP_CMD_STATUS_SUCCESS &&
+ data->cmd != HDCP_WKUP_CMD_STATUS_FAILED) {
+ u32 msg_num, rx_status;
+ const struct hdcp_msg_part *msg;
+
+ pr_debug("lib->client: %s (%s)\n",
+ hdcp_cmd_to_str(data->cmd),
+ hdcp_lib_message_name(handle->last_msg));
+
+ data->message_data = &hdcp_msg_lookup[handle->last_msg];
+
+ msg_num = data->message_data->num_messages;
+ msg = data->message_data->messages;
+ rx_status = data->message_data->rx_status;
+
+ pr_debug("%10s | %6s | %4s\n", "name", "offset", "len");
+
+ for (i = 0; i < msg_num; i++)
+ pr_debug("%10s | %6x | %4d\n",
+ msg[i].name, msg[i].offset,
+ msg[i].length);
+ } else {
+ pr_debug("lib->client: %s\n", hdcp_cmd_to_str(data->cmd));
+ }
+
+ rc = handle->client_ops->wakeup(data);
+ if (rc)
+ pr_err("error sending %s to client\n",
+ hdcp_cmd_to_str(data->cmd));
+
+ hdcp_lib_wait_for_response(handle, data);
+}
+
+static inline void hdcp_lib_send_message(struct hdcp_lib_handle *handle)
+{
+ char msg_name[50];
+ struct hdcp_wakeup_data cdata = {
+ HDCP_WKUP_CMD_SEND_MESSAGE
+ };
+
+ cdata.context = handle->client_ctx;
+ cdata.send_msg_buf = handle->listener_buf;
+ cdata.send_msg_len = handle->msglen;
+ cdata.timeout = handle->hdcp_timeout;
+
+ snprintf(msg_name, sizeof(msg_name), "%s: ",
+ hdcp_lib_message_name((int)cdata.send_msg_buf[0]));
+
+ print_hex_dump(KERN_DEBUG, msg_name,
+ DUMP_PREFIX_NONE, 16, 1, cdata.send_msg_buf,
+ cdata.send_msg_len, false);
+
+ hdcp_lib_wakeup_client(handle, &cdata);
+}
+
+static int hdcp_lib_enable_encryption(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_set_hw_key_req *req_buf;
+ struct hdcp_set_hw_key_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * wait at least 200ms before enabling encryption
+ * as per hdcp2p2 sepcifications.
+ */
+ msleep(SLEEP_SET_HW_KEY_MS);
+
+ req_buf = (struct hdcp_set_hw_key_req *)(handle->qseecom_handle->sbuf);
+ req_buf->commandid = HDCP_TXMTR_SET_HW_KEY;
+ req_buf->ctxhandle = handle->tz_ctxhandle;
+
+ rsp_buf = (struct hdcp_set_hw_key_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_set_hw_key_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_set_hw_key_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_set_hw_key_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status < 0)) {
+ pr_err("qseecom cmd failed with err = %d status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /* reached an authenticated state */
+ handle->hdcp_state |= HDCP_STATE_AUTHENTICATED;
+
+ pr_debug("success\n");
+ return 0;
+error:
+ if (handle && !atomic_read(&handle->hdcp_off))
+ HDCP_LIB_EXECUTE(clean);
+
+ return rc;
+}
+
+static int hdcp_lib_get_version(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_version_req *req_buf;
+ struct hdcp_version_rsp *rsp_buf;
+ uint32_t app_major_version = 0;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("library not loaded\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* get the TZ hdcp2p2 app version */
+ req_buf = (struct hdcp_version_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_GET_VERSION;
+
+ rsp_buf = (struct hdcp_version_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_version_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_init_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_init_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err = %d\n", rc);
+ goto exit;
+ }
+
+ app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion);
+
+ pr_debug("hdcp2p2 app major version %d, app version %d\n",
+ app_major_version, rsp_buf->appversion);
+
+exit:
+ return rc;
+}
+
+static int hdcp_lib_verify_keys(struct hdcp_lib_handle *handle)
+{
+ int rc = -EINVAL;
+ struct hdcp_verify_key_req *req_buf;
+ struct hdcp_verify_key_rsp *rsp_buf;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("app not loaded\n");
+ goto exit;
+ }
+
+ req_buf = (struct hdcp_verify_key_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_VERIFY_KEY;
+
+ rsp_buf = (struct hdcp_verify_key_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_verify_key_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_verify_key_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_verify_key_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err = %d\n", rc);
+ goto exit;
+ }
+
+ return rsp_buf->status;
+exit:
+ return rc;
+}
+
+static int hdcp_app_init(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_lib_init_req *req_buf;
+ struct hdcp_lib_init_rsp *rsp_buf;
+ uint32_t app_minor_version = 0;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("library not loaded\n");
+ goto exit;
+ }
+
+ /* now load the app by sending hdcp_lib_init */
+ req_buf = (struct hdcp_lib_init_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_LIB_INIT;
+ req_buf->clientversion =
+ HDCP_CLIENT_MAKE_VERSION(HDCP_CLIENT_MAJOR_VERSION,
+ HDCP_CLIENT_MINOR_VERSION,
+ HDCP_CLIENT_PATCH_VERSION);
+ rsp_buf = (struct hdcp_lib_init_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_lib_init_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_init_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_init_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err = %d\n", rc);
+ goto exit;
+ }
+
+ app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion);
+ if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) {
+ pr_err
+ ("client-app minor version mismatch app(%d), client(%d)\n",
+ app_minor_version, HDCP_CLIENT_MINOR_VERSION);
+ rc = -1;
+ goto exit;
+ }
+ pr_debug("success\n");
+ pr_debug("client version major(%d), minor(%d), patch(%d)\n",
+ HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION,
+ HDCP_CLIENT_PATCH_VERSION);
+ pr_debug("app version major(%d), minor(%d), patch(%d)\n",
+ HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion),
+ HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion),
+ HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion));
+
+exit:
+ return rc;
+}
+
+static int hdcp_lib_library_load(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+ pr_err("library already loaded\n");
+ goto exit;
+ }
+
+ /*
+ * allocating resource for qseecom handle
+ * the app is not loaded here
+ */
+ rc = qseecom_start_app(&(handle->qseecom_handle),
+ TZAPP_NAME, QSEECOM_SBUFF_SIZE);
+ if (rc) {
+ pr_err("qseecom_start_app failed %d\n", rc);
+ goto exit;
+ }
+
+ handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+ pr_debug("qseecom_start_app success\n");
+
+ rc = hdcp_lib_get_version(handle);
+ if (rc) {
+ pr_err("library get version failed\n");
+ goto exit;
+ }
+
+ handle->hdcp_app_init = hdcp_app_init;
+ handle->hdcp_txmtr_init = hdcp_lib_txmtr_init;
+
+ if (handle->hdcp_app_init == NULL) {
+ pr_err("invalid app init function pointer\n");
+ goto exit;
+ }
+
+ rc = handle->hdcp_app_init(handle);
+ if (rc) {
+ pr_err("app init failed\n");
+ goto exit;
+ }
+exit:
+ return rc;
+}
+
+static int hdcp_lib_library_unload(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_lib_deinit_req *req_buf;
+ struct hdcp_lib_deinit_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("library not loaded\n");
+ return rc;
+ }
+
+ /* unloading app by sending hdcp_lib_deinit cmd */
+ req_buf = (struct hdcp_lib_deinit_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_LIB_DEINIT;
+ rsp_buf = (struct hdcp_lib_deinit_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_lib_deinit_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_deinit_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_lib_deinit_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err = %d\n", rc);
+ goto exit;
+ }
+
+ /* deallocate the resources for qseecom handle */
+ rc = qseecom_shutdown_app(&handle->qseecom_handle);
+ if (rc) {
+ pr_err("qseecom_shutdown_app failed err: %d\n", rc);
+ goto exit;
+ }
+
+ handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static int hdcp_lib_session_init(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_lib_session_init_req *req_buf;
+ struct hdcp_lib_session_init_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("app not loaded\n");
+ goto exit;
+ }
+
+ if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) {
+ pr_err("session already initialized\n");
+ goto exit;
+ }
+
+ /* send HDCP_Session_Init command to TZ */
+ req_buf =
+ (struct hdcp_lib_session_init_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_SESSION_INIT;
+ req_buf->deviceid = handle->device_type;
+ rsp_buf = (struct hdcp_lib_session_init_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_init_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_lib_session_init_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_lib_session_init_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+ (rsp_buf->commandid != HDCP_SESSION_INIT)) {
+ pr_err("qseecom cmd failed with err = %d, status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ pr_debug("session id %d\n", rsp_buf->sessionid);
+
+ handle->session_id = rsp_buf->sessionid;
+ handle->hdcp_state |= HDCP_STATE_SESSION_INIT;
+
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static int hdcp_lib_session_deinit(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_lib_session_deinit_req *req_buf;
+ struct hdcp_lib_session_deinit_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("app not loaded\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+ /* unload library here */
+ pr_err("session not initialized\n");
+ goto exit;
+ }
+
+ /* send command to TZ */
+ req_buf =
+ (struct hdcp_lib_session_deinit_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_SESSION_DEINIT;
+ req_buf->sessionid = handle->session_id;
+ rsp_buf = (struct hdcp_lib_session_deinit_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_deinit_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_lib_session_deinit_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_lib_session_deinit_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status < 0) ||
+ (rsp_buf->commandid != HDCP_SESSION_DEINIT)) {
+ pr_err("qseecom cmd failed with err = %d status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT;
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_tx_init_req *req_buf;
+ struct hdcp_tx_init_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+ pr_err("session not initialized\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("library not loaded\n");
+ goto exit;
+ }
+
+ /* send HDCP_Txmtr_Init command to TZ */
+ req_buf = (struct hdcp_tx_init_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_INIT;
+ req_buf->sessionid = handle->session_id;
+ rsp_buf = (struct hdcp_tx_init_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_tx_init_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_tx_init_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_tx_init_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+ (rsp_buf->commandid != HDCP_TXMTR_INIT)) {
+ pr_err("qseecom cmd failed with err = %d, status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ handle->tz_ctxhandle = rsp_buf->ctxhandle;
+ handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static int hdcp_lib_txmtr_deinit(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_deinit_req *req_buf;
+ struct hdcp_deinit_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_err("app not loaded\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+ /* unload library here */
+ pr_err("txmtr not initialized\n");
+ goto exit;
+ }
+
+ /* send command to TZ */
+ req_buf = (struct hdcp_deinit_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_DEINIT;
+ req_buf->ctxhandle = handle->tz_ctxhandle;
+ rsp_buf = (struct hdcp_deinit_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_deinit_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status < 0) ||
+ (rsp_buf->commandid != HDCP_TXMTR_DEINIT)) {
+ pr_err("qseecom cmd failed with err = %d status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT;
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static int hdcp_lib_start_auth(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_start_auth_req *req_buf;
+ struct hdcp_start_auth_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+ pr_err("session not initialized\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+ pr_err("txmtr not initialized\n");
+ goto exit;
+ }
+
+ /* send HDCP_Txmtr_Start_Auth command to TZ */
+ req_buf = (struct hdcp_start_auth_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_START_AUTHENTICATE;
+ req_buf->ctxHandle = handle->tz_ctxhandle;
+ rsp_buf = (struct hdcp_start_auth_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_start_auth_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_start_auth_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_start_auth_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) ||
+ (rsp_buf->commandid != HDCP_TXMTR_START_AUTHENTICATE) ||
+ (rsp_buf->msglen <= 0) || (rsp_buf->message == NULL)) {
+ pr_err("qseecom cmd failed with err = %d, status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ pr_debug("recvd %s from TZ at %dms\n",
+ hdcp_lib_message_name((int)rsp_buf->message[0]),
+ jiffies_to_msecs(jiffies));
+
+ handle->last_msg = (int)rsp_buf->message[0];
+
+ /* send the response to HDMI driver */
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ memcpy(handle->listener_buf, (unsigned char *)rsp_buf->message,
+ rsp_buf->msglen);
+ handle->msglen = rsp_buf->msglen;
+ handle->hdcp_timeout = rsp_buf->timeout;
+
+ handle->tz_ctxhandle = rsp_buf->ctxhandle;
+
+ pr_debug("success\n");
+exit:
+ return rc;
+}
+
+static void hdcp_lib_stream(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_query_stream_type_req *req_buf;
+ struct hdcp_query_stream_type_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ return;
+ }
+
+ if (atomic_read(&handle->hdcp_off)) {
+ pr_debug("invalid state, hdcp off\n");
+ return;
+ }
+
+ if (!handle->repeater_flag) {
+ pr_debug("invalid state, not a repeater\n");
+ return;
+ }
+
+ /* send command to TZ */
+ req_buf =
+ (struct hdcp_query_stream_type_req *)handle->qseecom_handle->sbuf;
+ req_buf->commandid = HDCP_TXMTR_QUERY_STREAM_TYPE;
+ req_buf->ctxhandle = handle->tz_ctxhandle;
+ rsp_buf = (struct hdcp_query_stream_type_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_query_stream_type_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_query_stream_type_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_query_stream_type_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status < 0) || (rsp_buf->msglen <= 0) ||
+ (rsp_buf->commandid != HDCP_TXMTR_QUERY_STREAM_TYPE) ||
+ (rsp_buf->msg == NULL)) {
+ pr_err("qseecom cmd failed with err=%d status=%d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ pr_debug("message received from TZ: %s\n",
+ hdcp_lib_message_name((int)rsp_buf->msg[0]));
+
+ handle->last_msg = (int)rsp_buf->msg[0];
+
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg,
+ rsp_buf->msglen);
+ handle->hdcp_timeout = rsp_buf->timeout;
+ handle->msglen = rsp_buf->msglen;
+exit:
+ if (!rc && !atomic_read(&handle->hdcp_off))
+ hdcp_lib_send_message(handle);
+}
+
+static void hdcp_lib_query_stream_work(struct kthread_work *work)
+{
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_stream);
+
+ hdcp_lib_stream(handle);
+}
+
+static bool hdcp_lib_client_feature_supported(void *phdcpcontext)
+{
+ int rc = 0;
+ bool supported = false;
+ struct hdcp_lib_handle *handle = phdcpcontext;
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ goto exit;
+ }
+
+ if (handle->feature_supported) {
+ supported = true;
+ goto exit;
+ }
+
+ rc = hdcp_lib_library_load(handle);
+ if (!rc) {
+ if (!hdcp_lib_verify_keys(handle)) {
+ pr_debug("HDCP2p2 supported\n");
+ handle->feature_supported = true;
+ supported = true;
+ }
+ hdcp_lib_library_unload(handle);
+ }
+exit:
+ return supported;
+}
+
+static void hdcp_lib_check_worker_status(struct hdcp_lib_handle *handle)
+{
+ if (!list_empty(&handle->wk_init.node))
+ pr_debug("init work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_init)
+ pr_debug("init work executing\n");
+
+ if (!list_empty(&handle->wk_msg_sent.node))
+ pr_debug("msg_sent work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_msg_sent)
+ pr_debug("msg_sent work executing\n");
+
+ if (!list_empty(&handle->wk_msg_recvd.node))
+ pr_debug("msg_recvd work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_msg_recvd)
+ pr_debug("msg_recvd work executing\n");
+
+ if (!list_empty(&handle->wk_timeout.node))
+ pr_debug("timeout work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_timeout)
+ pr_debug("timeout work executing\n");
+
+ if (!list_empty(&handle->wk_clean.node))
+ pr_debug("clean work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_clean)
+ pr_debug("clean work executing\n");
+
+ if (!list_empty(&handle->wk_wait.node))
+ pr_debug("wait work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_wait)
+ pr_debug("wait work executing\n");
+
+ if (!list_empty(&handle->wk_stream.node))
+ pr_debug("stream work queued\n");
+
+ if (handle->worker.current_work == &handle->wk_stream)
+ pr_debug("stream work executing\n");
+}
+
+static int hdcp_lib_check_valid_state(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+
+ if (!list_empty(&handle->worker.work_list))
+ hdcp_lib_check_worker_status(handle);
+
+ if (handle->wakeup_cmd == HDCP_LIB_WKUP_CMD_START) {
+ if (!list_empty(&handle->worker.work_list)) {
+ pr_debug("error: queue not empty\n");
+ rc = -EBUSY;
+ goto exit;
+ }
+
+ if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+ pr_debug("library already loaded\n");
+ rc = -EBUSY;
+ goto exit;
+ }
+ } else {
+ if (atomic_read(&handle->hdcp_off)) {
+ pr_debug("hdcp2.2 session tearing down\n");
+ goto exit;
+ }
+
+ if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+ pr_debug("hdcp 2.2 app not loaded\n");
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
+static int hdcp_lib_wakeup_thread(struct hdcp_lib_wakeup_data *data)
+{
+ struct hdcp_lib_handle *handle;
+ int rc = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ handle = data->context;
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&handle->wakeup_mutex);
+
+ handle->wakeup_cmd = data->cmd;
+ handle->timeout_left = data->timeout;
+
+ pr_debug("client->lib: %s (%s)\n",
+ hdcp_lib_cmd_to_str(data->cmd),
+ hdcp_lib_message_name(handle->last_msg));
+
+ rc = hdcp_lib_check_valid_state(handle);
+ if (rc)
+ goto exit;
+
+ mutex_lock(&handle->msg_lock);
+ if (data->recvd_msg_len) {
+ kzfree(handle->last_msg_recvd_buf);
+
+ handle->last_msg_recvd_len = data->recvd_msg_len;
+ handle->last_msg_recvd_buf = kzalloc(data->recvd_msg_len,
+ GFP_KERNEL);
+ if (!handle->last_msg_recvd_buf) {
+ rc = -ENOMEM;
+ mutex_unlock(&handle->msg_lock);
+ goto exit;
+ }
+
+ memcpy(handle->last_msg_recvd_buf, data->recvd_msg_buf,
+ data->recvd_msg_len);
+ }
+ mutex_unlock(&handle->msg_lock);
+
+ if (!completion_done(&handle->poll_wait))
+ complete_all(&handle->poll_wait);
+
+ switch (handle->wakeup_cmd) {
+ case HDCP_LIB_WKUP_CMD_START:
+ handle->no_stored_km_flag = 0;
+ handle->repeater_flag = false;
+ handle->update_stream = false;
+ handle->last_msg_sent = 0;
+ handle->last_msg = INVALID_MESSAGE_ID;
+ handle->hdcp_timeout = 0;
+ handle->timeout_left = 0;
+ atomic_set(&handle->hdcp_off, 0);
+ handle->hdcp_state = HDCP_STATE_INIT;
+
+ HDCP_LIB_EXECUTE(init);
+ break;
+ case HDCP_LIB_WKUP_CMD_STOP:
+ atomic_set(&handle->hdcp_off, 1);
+
+ HDCP_LIB_EXECUTE(clean);
+ break;
+ case HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS:
+ handle->last_msg_sent = handle->listener_buf[0];
+
+ HDCP_LIB_EXECUTE(msg_sent);
+ break;
+ case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED:
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED:
+ case HDCP_LIB_WKUP_CMD_LINK_FAILED:
+ handle->hdcp_state |= HDCP_STATE_ERROR;
+ HDCP_LIB_EXECUTE(clean);
+ break;
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS:
+ HDCP_LIB_EXECUTE(msg_recvd);
+ break;
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT:
+ HDCP_LIB_EXECUTE(timeout);
+ break;
+ case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE:
+ HDCP_LIB_EXECUTE(stream);
+ break;
+ default:
+ pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+ }
+exit:
+ mutex_unlock(&handle->wakeup_mutex);
+
+ return rc;
+}
+
+static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
+{
+ struct hdcp_wakeup_data cdata = { HDCP_WKUP_CMD_INVALID };
+
+ if (!handle) {
+ pr_err("invalid handle\n");
+ return;
+ }
+
+ cdata.context = handle->client_ctx;
+
+ switch (handle->last_msg_sent) {
+ case SKE_SEND_TYPE_ID:
+ if (!hdcp_lib_enable_encryption(handle)) {
+ handle->authenticated = true;
+
+ cdata.cmd = HDCP_WKUP_CMD_STATUS_SUCCESS;
+ hdcp_lib_wakeup_client(handle, &cdata);
+ }
+
+ /* poll for link check */
+ cdata.cmd = HDCP_WKUP_CMD_LINK_POLL;
+ break;
+ case SKE_SEND_EKS_MESSAGE_ID:
+ if (handle->repeater_flag) {
+ /* poll for link check */
+ cdata.cmd = HDCP_WKUP_CMD_LINK_POLL;
+ } else {
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ handle->listener_buf[0] = SKE_SEND_TYPE_ID;
+ handle->msglen = 2;
+ cdata.cmd = HDCP_WKUP_CMD_SEND_MESSAGE;
+ cdata.send_msg_buf = handle->listener_buf;
+ cdata.send_msg_len = handle->msglen;
+ handle->last_msg = hdcp_lib_get_next_message(handle,
+ &cdata);
+ }
+ break;
+ case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+ pr_debug("Repeater authentication successful\n");
+
+ if (handle->update_stream) {
+ HDCP_LIB_EXECUTE(stream);
+ handle->update_stream = false;
+ } else {
+ cdata.cmd = HDCP_WKUP_CMD_LINK_POLL;
+ }
+ break;
+ default:
+ cdata.cmd = HDCP_WKUP_CMD_RECV_MESSAGE;
+ cdata.timeout = handle->timeout_left;
+ }
+
+ hdcp_lib_wakeup_client(handle, &cdata);
+}
+
+static void hdcp_lib_msg_sent_work(struct kthread_work *work)
+{
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_msg_sent);
+
+ if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS) {
+ pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+ return;
+ }
+
+ hdcp_lib_msg_sent(handle);
+}
+
+static void hdcp_lib_init(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+
+ if (!handle) {
+ pr_err("invalid handle\n");
+ return;
+ }
+
+ if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_START) {
+ pr_err("invalid wakeup command %d\n", handle->wakeup_cmd);
+ return;
+ }
+
+ rc = hdcp_lib_library_load(handle);
+ if (rc)
+ goto exit;
+
+ rc = hdcp_lib_session_init(handle);
+ if (rc)
+ goto exit;
+
+ if (handle->hdcp_txmtr_init == NULL) {
+ pr_err("invalid txmtr init function pointer\n");
+ return;
+ }
+
+ rc = handle->hdcp_txmtr_init(handle);
+ if (rc)
+ goto exit;
+
+ rc = hdcp_lib_start_auth(handle);
+ if (rc)
+ goto exit;
+
+ hdcp_lib_send_message(handle);
+
+ return;
+exit:
+ HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_init_work(struct kthread_work *work)
+{
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_init);
+
+ hdcp_lib_init(handle);
+}
+
+static void hdcp_lib_timeout(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_send_timeout_req *req_buf;
+ struct hdcp_send_timeout_rsp *rsp_buf;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_debug("invalid handle\n");
+ return;
+ }
+
+ if (atomic_read(&handle->hdcp_off)) {
+ pr_debug("invalid state, hdcp off\n");
+ return;
+ }
+
+ req_buf = (struct hdcp_send_timeout_req *)
+ (handle->qseecom_handle->sbuf);
+ req_buf->commandid = HDCP_TXMTR_SEND_MESSAGE_TIMEOUT;
+ req_buf->ctxhandle = handle->tz_ctxhandle;
+
+ rsp_buf = (struct hdcp_send_timeout_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_send_timeout_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_send_timeout_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_send_timeout_rsp)));
+
+ if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS)) {
+ pr_err("qseecom cmd failed for with err = %d status = %d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (rsp_buf->commandid == HDCP_TXMTR_SEND_MESSAGE_TIMEOUT) {
+ pr_err("HDCP_TXMTR_SEND_MESSAGE_TIMEOUT\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * if the response contains LC_Init message
+ * send the message again to TZ
+ */
+ if ((rsp_buf->commandid == HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) &&
+ ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID) &&
+ (rsp_buf->msglen == LC_INIT_MESSAGE_SIZE)) {
+ if (!atomic_read(&handle->hdcp_off)) {
+ /* keep local copy of TZ response */
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ memcpy(handle->listener_buf,
+ (unsigned char *)rsp_buf->message,
+ rsp_buf->msglen);
+ handle->hdcp_timeout = rsp_buf->timeout;
+ handle->msglen = rsp_buf->msglen;
+
+ hdcp_lib_send_message(handle);
+ }
+ }
+
+ return;
+error:
+ if (!atomic_read(&handle->hdcp_off))
+ HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_manage_timeout_work(struct kthread_work *work)
+{
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_timeout);
+
+ hdcp_lib_timeout(handle);
+}
+
+static void hdcp_lib_clean(struct hdcp_lib_handle *handle)
+{
+ struct hdcp_wakeup_data cdata = { HDCP_WKUP_CMD_INVALID };
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ handle->authenticated = false;
+
+ hdcp_lib_txmtr_deinit(handle);
+ hdcp_lib_session_deinit(handle);
+ hdcp_lib_library_unload(handle);
+
+ cdata.context = handle->client_ctx;
+ cdata.cmd = HDCP_WKUP_CMD_STATUS_FAILED;
+
+ if (!atomic_read(&handle->hdcp_off))
+ hdcp_lib_wakeup_client(handle, &cdata);
+
+ atomic_set(&handle->hdcp_off, 1);
+}
+
+static void hdcp_lib_cleanup_work(struct kthread_work *work)
+{
+
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_clean);
+
+ hdcp_lib_clean(handle);
+}
+
+static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_wakeup_data cdata = { HDCP_WKUP_CMD_INVALID };
+ struct hdcp_rcvd_msg_req *req_buf;
+ struct hdcp_rcvd_msg_rsp *rsp_buf;
+ uint32_t msglen;
+ char *msg = NULL;
+ char msg_name[50];
+ uint32_t message_id_bytes = 0;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ return;
+ }
+
+ if (atomic_read(&handle->hdcp_off)) {
+ pr_debug("invalid state, hdcp off\n");
+ return;
+ }
+
+ cdata.context = handle->client_ctx;
+
+ mutex_lock(&handle->msg_lock);
+ msglen = handle->last_msg_recvd_len;
+
+ if (msglen <= 0) {
+ pr_err("invalid msg len\n");
+ mutex_unlock(&handle->msg_lock);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* If the client is DP then allocate extra byte for message ID. */
+ if (handle->device_type == HDCP_TXMTR_DP)
+ message_id_bytes = 1;
+
+ msglen += message_id_bytes;
+
+ msg = kzalloc(msglen, GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&handle->msg_lock);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ /* copy the message id if needed */
+ if (message_id_bytes)
+ memcpy(msg, &handle->last_msg, message_id_bytes);
+
+ memcpy(msg + message_id_bytes,
+ handle->last_msg_recvd_buf,
+ handle->last_msg_recvd_len);
+
+ mutex_unlock(&handle->msg_lock);
+
+ snprintf(msg_name, sizeof(msg_name), "%s: ",
+ hdcp_lib_message_name((int)msg[0]));
+
+ print_hex_dump(KERN_DEBUG, msg_name,
+ DUMP_PREFIX_NONE, 16, 1, msg, msglen, false);
+
+ /* send the message to QSEECOM */
+ req_buf = (struct hdcp_rcvd_msg_req *)(handle->qseecom_handle->sbuf);
+ req_buf->commandid = HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE;
+ memcpy(req_buf->msg, msg, msglen);
+ req_buf->msglen = msglen;
+ req_buf->ctxhandle = handle->tz_ctxhandle;
+
+ rsp_buf =
+ (struct hdcp_rcvd_msg_rsp *)(handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof
+ (struct
+ hdcp_rcvd_msg_req)));
+
+ pr_debug("writing %s to TZ at %dms\n",
+ hdcp_lib_message_name((int)msg[0]), jiffies_to_msecs(jiffies));
+
+ rc = qseecom_send_command(handle->qseecom_handle, req_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_rcvd_msg_req)),
+ rsp_buf,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp_rcvd_msg_rsp)));
+
+ /* get next message from sink if we receive H PRIME on no store km */
+ if ((msg[0] == AKE_SEND_H_PRIME_MESSAGE_ID) &&
+ handle->no_stored_km_flag) {
+ handle->hdcp_timeout = rsp_buf->timeout;
+
+ cdata.cmd = HDCP_WKUP_CMD_RECV_MESSAGE;
+ cdata.timeout = handle->hdcp_timeout;
+
+ goto exit;
+ }
+
+ if ((msg[0] == REPEATER_AUTH_STREAM_READY_MESSAGE_ID) &&
+ (rc == 0) && (rsp_buf->status == 0)) {
+ pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
+
+ if (!handle->authenticated &&
+ !hdcp_lib_enable_encryption(handle)) {
+ handle->authenticated = true;
+
+ cdata.cmd = HDCP_WKUP_CMD_STATUS_SUCCESS;
+ hdcp_lib_wakeup_client(handle, &cdata);
+ }
+
+ cdata.cmd = HDCP_WKUP_CMD_LINK_POLL;
+ goto exit;
+ }
+
+ if ((rc < 0) || (rsp_buf->status != 0) || (rsp_buf->msglen <= 0) ||
+ (rsp_buf->commandid != HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) ||
+ (rsp_buf->msg == NULL)) {
+ pr_err("qseecom cmd failed with err=%d status=%d\n",
+ rc, rsp_buf->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ pr_debug("recvd %s from TZ at %dms\n",
+ hdcp_lib_message_name((int)rsp_buf->msg[0]),
+ jiffies_to_msecs(jiffies));
+
+ handle->last_msg = (int)rsp_buf->msg[0];
+
+ /* set the flag if response is AKE_No_Stored_km */
+ if (((int)rsp_buf->msg[0] == AKE_NO_STORED_KM_MESSAGE_ID)) {
+ pr_debug("Setting no_stored_km_flag\n");
+ handle->no_stored_km_flag = 1;
+ } else {
+ handle->no_stored_km_flag = 0;
+ }
+
+ /* check if it's a repeater */
+ if ((rsp_buf->msg[0] == SKE_SEND_EKS_MESSAGE_ID) &&
+ (rsp_buf->msglen == SKE_SEND_EKS_MESSAGE_SIZE)) {
+ if ((rsp_buf->flag ==
+ HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) &&
+ (rsp_buf->timeout > 0))
+ handle->repeater_flag = true;
+ handle->update_stream = true;
+ }
+
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg,
+ rsp_buf->msglen);
+ handle->hdcp_timeout = rsp_buf->timeout;
+ handle->msglen = rsp_buf->msglen;
+
+ if (!atomic_read(&handle->hdcp_off))
+ hdcp_lib_send_message(handle);
+exit:
+ kzfree(msg);
+
+ hdcp_lib_wakeup_client(handle, &cdata);
+
+ if (rc && !atomic_read(&handle->hdcp_off))
+ HDCP_LIB_EXECUTE(clean);
+}
+
+static void hdcp_lib_msg_recvd_work(struct kthread_work *work)
+{
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle,
+ wk_msg_recvd);
+
+ hdcp_lib_msg_recvd(handle);
+}
+
+static void hdcp_lib_wait_work(struct kthread_work *work)
+{
+ u32 timeout;
+ struct hdcp_lib_handle *handle = container_of(work,
+ struct hdcp_lib_handle, wk_wait);
+
+ if (!handle) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&handle->hdcp_off)) {
+ pr_debug("invalid state: hdcp off\n");
+ return;
+ }
+
+ if (handle->hdcp_state & HDCP_STATE_ERROR) {
+ pr_debug("invalid state: hdcp error\n");
+ return;
+ }
+
+ reinit_completion(&handle->poll_wait);
+ timeout = wait_for_completion_timeout(&handle->poll_wait,
+ handle->wait_timeout);
+ if (!timeout) {
+ pr_err("wait timeout\n");
+
+ if (!atomic_read(&handle->hdcp_off))
+ HDCP_LIB_EXECUTE(clean);
+ }
+
+ handle->wait_timeout = 0;
+}
+
+bool hdcp1_check_if_supported_load_app(void)
+{
+ int rc = 0;
+
+ /* start hdcp1 app */
+ if (hdcp1_supported && !hdcp1_handle) {
+ rc = qseecom_start_app(&hdcp1_handle, HDCP1_APP_NAME,
+ QSEECOM_SBUFF_SIZE);
+ if (rc) {
+ pr_err("qseecom_start_app failed %d\n", rc);
+ hdcp1_supported = false;
+ } else {
+ mutex_init(&hdcp1_ta_cmd_lock);
+ }
+ }
+
+ pr_debug("hdcp1 app %s loaded\n",
+ hdcp1_supported ? "successfully" : "not");
+
+ return hdcp1_supported;
+}
+
+/* APIs exposed to all clients */
+int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb)
+{
+ int rc = 0;
+ struct hdcp1_key_set_req *key_set_req;
+ struct hdcp1_key_set_rsp *key_set_rsp;
+
+ if (aksv_msb == NULL || aksv_lsb == NULL)
+ return -EINVAL;
+
+ if (!hdcp1_supported || !hdcp1_handle)
+ return -EINVAL;
+
+ /* set keys and request aksv */
+ key_set_req = (struct hdcp1_key_set_req *)hdcp1_handle->sbuf;
+ key_set_req->commandid = HDCP1_SET_KEY_MESSAGE_ID;
+ key_set_rsp = (struct hdcp1_key_set_rsp *)(hdcp1_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req)));
+ rc = qseecom_send_command(hdcp1_handle, key_set_req,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp1_key_set_req)),
+ key_set_rsp,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp1_key_set_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err=%d\n", rc);
+ return -ENOKEY;
+ }
+
+ rc = key_set_rsp->ret;
+ if (rc) {
+ pr_err("set key cmd failed, rsp=%d\n", key_set_rsp->ret);
+ return -ENOKEY;
+ }
+
+ /* copy bytes into msb and lsb */
+ *aksv_msb = key_set_rsp->ksv[0] << 24;
+ *aksv_msb |= key_set_rsp->ksv[1] << 16;
+ *aksv_msb |= key_set_rsp->ksv[2] << 8;
+ *aksv_msb |= key_set_rsp->ksv[3];
+ *aksv_lsb = key_set_rsp->ksv[4] << 24;
+ *aksv_lsb |= key_set_rsp->ksv[5] << 16;
+ *aksv_lsb |= key_set_rsp->ksv[6] << 8;
+ *aksv_lsb |= key_set_rsp->ksv[7];
+
+ return 0;
+}
+
+int hdcp1_set_enc(bool enable)
+{
+ int rc = 0;
+ struct hdcp1_set_enc_req *set_enc_req;
+ struct hdcp1_set_enc_rsp *set_enc_rsp;
+
+ mutex_lock(&hdcp1_ta_cmd_lock);
+
+ if (!hdcp1_supported || !hdcp1_handle) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (hdcp1_enc_enabled == enable) {
+ pr_info("already %s\n", enable ? "enabled" : "disabled");
+ goto end;
+ }
+
+ /* set keys and request aksv */
+ set_enc_req = (struct hdcp1_set_enc_req *)hdcp1_handle->sbuf;
+ set_enc_req->commandid = HDCP1_SET_ENC_MESSAGE_ID;
+ set_enc_req->enable = enable;
+ set_enc_rsp = (struct hdcp1_set_enc_rsp *)(hdcp1_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req)));
+ rc = qseecom_send_command(hdcp1_handle, set_enc_req,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp1_set_enc_req)),
+ set_enc_rsp,
+ QSEECOM_ALIGN(sizeof
+ (struct hdcp1_set_enc_rsp)));
+
+ if (rc < 0) {
+ pr_err("qseecom cmd failed err=%d\n", rc);
+ goto end;
+ }
+
+ rc = set_enc_rsp->ret;
+ if (rc) {
+ pr_err("enc cmd failed, rsp=%d\n", set_enc_rsp->ret);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ hdcp1_enc_enabled = enable;
+ pr_info("%s success\n", enable ? "enable" : "disable");
+end:
+ mutex_unlock(&hdcp1_ta_cmd_lock);
+ return rc;
+}
+
+int hdcp_library_register(struct hdcp_register_data *data)
+{
+ int rc = 0;
+ struct hdcp_lib_handle *handle = NULL;
+
+ if (!data) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!data->txmtr_ops) {
+ pr_err("invalid input: txmtr context\n");
+ return -EINVAL;
+ }
+
+ if (!data->client_ops) {
+ pr_err("invalid input: client_ops\n");
+ return -EINVAL;
+ }
+
+ if (!data->hdcp_ctx) {
+ pr_err("invalid input: hdcp_ctx\n");
+ return -EINVAL;
+ }
+
+ /* populate ops to be called by client */
+ data->txmtr_ops->feature_supported = hdcp_lib_client_feature_supported;
+ data->txmtr_ops->wakeup = hdcp_lib_wakeup_thread;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ handle->client_ctx = data->client_ctx;
+ handle->client_ops = data->client_ops;
+ handle->hdcp_app_init = NULL;
+ handle->hdcp_txmtr_init = NULL;
+ handle->device_type = data->device_type;
+
+ atomic_set(&handle->hdcp_off, 0);
+
+ mutex_init(&handle->msg_lock);
+ mutex_init(&handle->wakeup_mutex);
+
+ kthread_init_worker(&handle->worker);
+
+ kthread_init_work(&handle->wk_init, hdcp_lib_init_work);
+ kthread_init_work(&handle->wk_msg_sent, hdcp_lib_msg_sent_work);
+ kthread_init_work(&handle->wk_msg_recvd, hdcp_lib_msg_recvd_work);
+ kthread_init_work(&handle->wk_timeout, hdcp_lib_manage_timeout_work);
+ kthread_init_work(&handle->wk_clean, hdcp_lib_cleanup_work);
+ kthread_init_work(&handle->wk_wait, hdcp_lib_wait_work);
+ kthread_init_work(&handle->wk_stream, hdcp_lib_query_stream_work);
+
+ init_completion(&handle->poll_wait);
+
+ handle->listener_buf = kzalloc(MAX_TX_MESSAGE_SIZE, GFP_KERNEL);
+ if (!(handle->listener_buf)) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ *data->hdcp_ctx = handle;
+ /* Cache the client ctx to be used later
+ * HDCP driver probe happens earlier than
+ * SDE driver probe hence caching it to
+ * be used later.
+ */
+
+ drv_client_handle = handle;
+ handle->thread = kthread_run(kthread_worker_fn,
+ &handle->worker, "hdcp_tz_lib");
+
+ if (IS_ERR(handle->thread)) {
+ pr_err("unable to start lib thread\n");
+ rc = PTR_ERR(handle->thread);
+ handle->thread = NULL;
+ goto error;
+ }
+
+ return 0;
+error:
+ kzfree(handle->listener_buf);
+ handle->listener_buf = NULL;
+ kzfree(handle);
+ handle = NULL;
+unlock:
+ return rc;
+}
+EXPORT_SYMBOL(hdcp_library_register);
+
+void hdcp_library_deregister(void *phdcpcontext)
+{
+ struct hdcp_lib_handle *handle = phdcpcontext;
+
+ if (!handle)
+ return;
+
+ kthread_stop(handle->thread);
+
+ kzfree(handle->qseecom_handle);
+ kzfree(handle->last_msg_recvd_buf);
+
+ mutex_destroy(&handle->wakeup_mutex);
+
+ kzfree(handle->listener_buf);
+ kzfree(handle);
+}
+EXPORT_SYMBOL(hdcp_library_deregister);
+
+void hdcp1_notify_topology(void)
+{
+ char *envp[4];
+ char *a;
+ char *b;
+
+ if (!hdcp_drv_mgr) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ a = kzalloc(SZ_16, GFP_KERNEL);
+
+ if (!a)
+ return;
+
+ b = kzalloc(SZ_16, GFP_KERNEL);
+
+ if (!b) {
+ kfree(a);
+ return;
+ }
+
+ envp[0] = "HDCP_MGR_EVENT=MSG_READY";
+ envp[1] = a;
+ envp[2] = b;
+ envp[3] = NULL;
+
+ snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY);
+ snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX);
+
+ kobject_uevent_env(&hdcp_drv_mgr->device->kobj, KOBJ_CHANGE, envp);
+ kfree(a);
+ kfree(b);
+}
+
+static ssize_t msm_hdcp_1x_sysfs_rda_tp(struct device *dev,
+struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+
+ if (!hdcp_drv_mgr) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ switch (hdcp_drv_mgr->tp_msgid) {
+ case DOWN_CHECK_TOPOLOGY:
+ case DOWN_REQUEST_TOPOLOGY:
+ buf[MSG_ID_IDX] = hdcp_drv_mgr->tp_msgid;
+ buf[RET_CODE_IDX] = HDCP_AUTHED;
+ ret = HEADER_LEN;
+
+ memcpy(buf + HEADER_LEN, &hdcp_drv_mgr->cached_tp,
+ sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+
+ ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
+
+ /* clear the flag once data is read back to user space*/
+ hdcp_drv_mgr->tp_msgid = -1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t msm_hdcp_1x_sysfs_wta_tp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int msgid = 0;
+ ssize_t ret = count;
+
+ if (!hdcp_drv_mgr || !buf) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ msgid = buf[0];
+
+ switch (msgid) {
+ case DOWN_CHECK_TOPOLOGY:
+ case DOWN_REQUEST_TOPOLOGY:
+ hdcp_drv_mgr->tp_msgid = msgid;
+ break;
+ /* more cases added here */
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ int min_enc_lvl;
+ struct hdcp_lib_handle *handle;
+ ssize_t ret = count;
+
+ if (!hdcp_drv_mgr) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ handle = hdcp_drv_mgr->handle;
+
+ rc = kstrtoint(buf, 10, &min_enc_lvl);
+ if (rc) {
+ pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return -EINVAL;
+ }
+
+ if (handle && handle->client_ops->notify_lvl_change) {
+ handle->client_ops->notify_lvl_change(handle->client_ctx,
+ min_enc_lvl);
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR(tp, 0644, msm_hdcp_1x_sysfs_rda_tp,
+ msm_hdcp_1x_sysfs_wta_tp);
+
+static DEVICE_ATTR(min_level_change, 0200, NULL,
+ hdcp2p2_sysfs_wta_min_level_change);
+
+void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp)
+{
+ if (!hdcp_drv_mgr) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ memcpy((void *)&hdcp_drv_mgr->cached_tp,
+ hdcp1_cached_tp,
+ sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
+}
+
+static struct attribute *msm_hdcp_fs_attrs[] = {
+ &dev_attr_tp.attr,
+ &dev_attr_min_level_change.attr,
+ NULL
+};
+
+static struct attribute_group msm_hdcp_fs_attr_group = {
+ .attrs = msm_hdcp_fs_attrs
+};
+
+static int msm_hdcp_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int msm_hdcp_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations msm_hdcp_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_hdcp_open,
+ .release = msm_hdcp_close,
+};
+
+static const struct of_device_id msm_hdcp_dt_match[] = {
+ { .compatible = "qcom,msm-hdcp",},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_hdcp_dt_match);
+
+static int msm_hdcp_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ hdcp_drv_mgr = devm_kzalloc(&pdev->dev, sizeof(struct msm_hdcp_mgr),
+ GFP_KERNEL);
+ if (!hdcp_drv_mgr)
+ return -ENOMEM;
+
+ hdcp_drv_mgr->pdev = pdev;
+
+ platform_set_drvdata(pdev, hdcp_drv_mgr);
+
+ ret = alloc_chrdev_region(&hdcp_drv_mgr->dev_num, 0, 1, DRIVER_NAME);
+ if (ret < 0) {
+ pr_err("alloc_chrdev_region failed ret = %d\n", ret);
+ goto error_get_dev_num;
+ }
+
+ hdcp_drv_mgr->class = class_create(THIS_MODULE, CLASS_NAME);
+ if (IS_ERR(hdcp_drv_mgr->class)) {
+ ret = PTR_ERR(hdcp_drv_mgr->class);
+ pr_err("couldn't create class rc = %d\n", ret);
+ goto error_class_create;
+ }
+
+ hdcp_drv_mgr->device = device_create(hdcp_drv_mgr->class, NULL,
+ hdcp_drv_mgr->dev_num, NULL, DRIVER_NAME);
+ if (IS_ERR(hdcp_drv_mgr->device)) {
+ ret = PTR_ERR(hdcp_drv_mgr->device);
+ pr_err("device_create failed %d\n", ret);
+ goto error_class_device_create;
+ }
+
+ cdev_init(&hdcp_drv_mgr->cdev, &msm_hdcp_fops);
+ ret = cdev_add(&hdcp_drv_mgr->cdev,
+ MKDEV(MAJOR(hdcp_drv_mgr->dev_num), 0), 1);
+ if (ret < 0) {
+ pr_err("cdev_add failed %d\n", ret);
+ goto error_cdev_add;
+ }
+
+ ret = sysfs_create_group(&hdcp_drv_mgr->device->kobj,
+ &msm_hdcp_fs_attr_group);
+ if (ret)
+ pr_err("unable to register rotator sysfs nodes\n");
+
+ /* Store the handle in the hdcp drv mgr
+ * to be used for the sysfs notifications
+ */
+ hdcp_drv_mgr->handle = drv_client_handle;
+
+ return 0;
+error_cdev_add:
+ device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num);
+error_class_device_create:
+ class_destroy(hdcp_drv_mgr->class);
+error_class_create:
+ unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1);
+error_get_dev_num:
+ devm_kfree(&pdev->dev, hdcp_drv_mgr);
+ hdcp_drv_mgr = NULL;
+ return ret;
+}
+
+static int msm_hdcp_remove(struct platform_device *pdev)
+{
+ struct msm_hdcp_mgr *mgr;
+
+ mgr = (struct msm_hdcp_mgr *)platform_get_drvdata(pdev);
+ if (!mgr)
+ return -ENODEV;
+
+ sysfs_remove_group(&hdcp_drv_mgr->device->kobj,
+ &msm_hdcp_fs_attr_group);
+ cdev_del(&hdcp_drv_mgr->cdev);
+ device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num);
+ class_destroy(hdcp_drv_mgr->class);
+ unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1);
+
+ devm_kfree(&pdev->dev, hdcp_drv_mgr);
+ hdcp_drv_mgr = NULL;
+ return 0;
+}
+
+static struct platform_driver msm_hdcp_driver = {
+ .probe = msm_hdcp_probe,
+ .remove = msm_hdcp_remove,
+ .driver = {
+ .name = "msm_hdcp",
+ .of_match_table = msm_hdcp_dt_match,
+ .pm = NULL,
+ }
+};
+
+static int __init msm_hdcp_init(void)
+{
+ return platform_driver_register(&msm_hdcp_driver);
+}
+
+static void __exit msm_hdcp_exit(void)
+{
+ return platform_driver_unregister(&msm_hdcp_driver);
+}
+
+module_init(msm_hdcp_init);
+module_exit(msm_hdcp_exit);
+
+MODULE_DESCRIPTION("MSM HDCP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index fdf954c..cfa1039 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -21,6 +21,8 @@
void lkdtm_HUNG_TASK(void);
void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
+void lkdtm_CORRUPT_LIST_ADD(void);
+void lkdtm_CORRUPT_LIST_DEL(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index 182ae18..bb3bb8e 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -5,8 +5,13 @@
* test source files.
*/
#include "lkdtm.h"
+#include <linux/list.h>
#include <linux/sched.h>
+struct lkdtm_list {
+ struct list_head node;
+};
+
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
@@ -75,12 +80,18 @@
(void) recursive_loop(recur_count);
}
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+ memset(stack, 'a', 64);
+}
+
noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8];
+ __lkdtm_CORRUPT_STACK(&data);
- memset((void *)data, 0, 64);
+ pr_info("Corrupted stack with '%16s'...\n", data);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
@@ -146,3 +157,66 @@
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
}
+
+void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = ⌖
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_add() corruption not detected!\n");
+}
+
+void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = ⌖
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_del() corruption not detected!\n");
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index f9154b8..7eeb71a 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -197,6 +197,8 @@
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 88e4523..fed992e 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -292,7 +292,6 @@
if (ret) {
dev_err(vop_dev(vdev), "%s %d err %d\n",
__func__, __LINE__, ret);
- kfree(vdev);
return ret;
}
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 80f6e57..2f927bd 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -26,11 +26,14 @@
#include <linux/debugfs.h>
#include <linux/msm_audio_ion.h>
#include <linux/compat.h>
+#include <linux/mutex.h>
#include "audio_utils_aio.h"
#ifdef CONFIG_USE_DEV_CTRL_VOLUME
#include <linux/qdsp6v2/audio_dev_ctl.h>
#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/
+static DEFINE_MUTEX(lock);
#ifdef CONFIG_DEBUG_FS
+
int audio_aio_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
@@ -43,29 +46,37 @@
const int debug_bufmax = 4096;
static char buffer[4096];
int n = 0;
- struct q6audio_aio *audio = file->private_data;
+ struct q6audio_aio *audio;
- mutex_lock(&audio->lock);
- n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "enabled %d\n", audio->enabled);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "stopped %d\n", audio->stopped);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "feedback %d\n", audio->feedback);
- mutex_unlock(&audio->lock);
- /* Following variables are only useful for debugging when
- * when playback halts unexpectedly. Thus, no mutual exclusion
- * enforced
- */
- n += scnprintf(buffer + n, debug_bufmax - n,
- "wflush %d\n", audio->wflush);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "rflush %d\n", audio->rflush);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "inqueue empty %d\n", list_empty(&audio->in_queue));
- n += scnprintf(buffer + n, debug_bufmax - n,
- "outqueue empty %d\n", list_empty(&audio->out_queue));
+ mutex_lock(&lock);
+ if (file->private_data != NULL) {
+ audio = file->private_data;
+ mutex_lock(&audio->lock);
+ n = scnprintf(buffer, debug_bufmax, "opened %d\n",
+ audio->opened);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "enabled %d\n", audio->enabled);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "stopped %d\n", audio->stopped);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "feedback %d\n", audio->feedback);
+ mutex_unlock(&audio->lock);
+ /* Following variables are only useful for debugging when
+ * when playback halts unexpectedly. Thus, no mutual exclusion
+ * enforced
+ */
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "wflush %d\n", audio->wflush);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "rflush %d\n", audio->rflush);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "inqueue empty %d\n",
+ list_empty(&audio->in_queue));
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "outqueue empty %d\n",
+ list_empty(&audio->out_queue));
+ }
+ mutex_unlock(&lock);
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
@@ -580,6 +591,7 @@
struct q6audio_aio *audio = file->private_data;
pr_debug("%s[%pK]\n", __func__, audio);
+ mutex_lock(&lock);
mutex_lock(&audio->lock);
mutex_lock(&audio->read_lock);
mutex_lock(&audio->write_lock);
@@ -622,6 +634,8 @@
#endif
kfree(audio->codec_cfg);
kfree(audio);
+ file->private_data = NULL;
+ mutex_unlock(&lock);
return 0;
}
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index bd51c6c..7077b30 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -281,6 +281,7 @@
wait_queue_head_t app_block_wq;
atomic_t qseecom_state;
int is_apps_region_protected;
+ bool smcinvoke_support;
};
struct qseecom_sec_buf_fd_info {
@@ -580,10 +581,12 @@
desc.args[1] = req_64bit->sb_ptr;
desc.args[2] = req_64bit->sb_len;
}
+ qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
if (ret) {
+ qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
__qseecom_reentrancy_check_if_no_app_blocked(
smc_id);
@@ -1012,10 +1015,14 @@
struct qseecom_continue_blocked_request_ireq *req =
(struct qseecom_continue_blocked_request_ireq *)
req_buf;
- smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+ if (qseecom.smcinvoke_support)
+ smc_id =
+ TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+ else
+ smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
desc.arginfo =
TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
- desc.args[0] = req->app_id;
+ desc.args[0] = req->app_or_session_id;
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1842,7 +1849,7 @@
return ret;
}
-int __qseecom_process_reentrancy_blocked_on_listener(
+static int __qseecom_process_blocked_on_listener_legacy(
struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
@@ -1851,9 +1858,8 @@
int ret = 0;
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
- sigset_t new_sigset, old_sigset;
- unsigned long flags;
bool found_app = false;
+ unsigned long flags;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1893,32 +1899,30 @@
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = resp->data;
+
/* sleep until listener is available */
- do {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- sigfillset(&new_sigset);
- sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
- mutex_unlock(&app_access_lock);
- do {
- if (!wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- break;
- }
- } while (1);
- mutex_lock(&app_access_lock);
- sigprocmask(SIG_SETMASK, &old_sigset, NULL);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use == true);
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
+ resp->data, ptr_app->app_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
resp->data, data->client.app_id,
data->client.app_name);
ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_id = data->client.app_id;
+ ireq.app_or_session_id = data->client.app_id;
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
&ireq, sizeof(ireq),
&continue_resp, sizeof(continue_resp));
@@ -1937,6 +1941,73 @@
return ret;
}
+static int __qseecom_process_blocked_on_listener_smcinvoke(
+ struct qseecom_command_scm_resp *resp)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ unsigned int session_id;
+
+ if (!resp) {
+ pr_err("invalid resp pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ session_id = resp->resp_type;
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ /* sleep until listener is available */
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
+ resp->data, session_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+
+ /* notify TZ that listener is available */
+ pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
+ resp->data, session_id);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_or_session_id = session_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
+ session_id, ret);
+ goto exit;
+ }
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ if (!qseecom.smcinvoke_support)
+ return __qseecom_process_blocked_on_listener_legacy(
+ resp, ptr_app, data);
+ else
+ return __qseecom_process_blocked_on_listener_smcinvoke(
+ resp);
+}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
@@ -2912,7 +2983,11 @@
}
if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
pr_warn("RPMB key status is 0x%x\n", resp.result);
- *(uint32_t *)req.resp_buf = resp.result;
+ if (put_user(resp.result,
+ (uint32_t __user *)req.resp_buf)) {
+ ret = -EINVAL;
+ goto exit;
+ }
ret = 0;
}
break;
@@ -4699,18 +4774,15 @@
}
resp.result = desc->ret[0]; /*req_cmd*/
- resp.resp_type = desc->ret[1]; /*app_id*/
+ resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
resp.data = desc->ret[2]; /*listener_id*/
- dummy_private_data.client.app_id = desc->ret[1];
- dummy_app_entry.app_id = desc->ret[1];
-
mutex_lock(&app_access_lock);
ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
mutex_unlock(&app_access_lock);
if (ret)
- pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+ pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
(int)desc->ret[0], (int)desc->ret[2],
(int)desc->ret[1], ret);
desc->ret[0] = resp.result;
@@ -6507,11 +6579,16 @@
void *cmd_buf = NULL;
size_t cmd_len;
struct sglist_info *table = data->sglistinfo_ptr;
+ void *req_ptr = NULL;
+ void *resp_ptr = NULL;
ret = __qseecom_qteec_validate_msg(data, req);
if (ret)
return ret;
+ req_ptr = req->req_ptr;
+ resp_ptr = req->resp_ptr;
+
/* find app_id & img_name from list */
spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
@@ -6529,6 +6606,11 @@
return -ENOENT;
}
+ req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req->req_ptr);
+ req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req->resp_ptr);
+
if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
ret = __qseecom_update_qteec_req_buf(
@@ -6540,10 +6622,10 @@
if (qseecom.qsee_version < QSEE_VERSION_40) {
ireq.app_id = data->client.app_id;
ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->req_ptr);
+ (uintptr_t)req_ptr);
ireq.req_len = req->req_len;
ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->resp_ptr);
+ (uintptr_t)resp_ptr);
ireq.resp_len = req->resp_len;
ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
@@ -6554,10 +6636,10 @@
} else {
ireq_64bit.app_id = data->client.app_id;
ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->req_ptr);
+ (uintptr_t)req_ptr);
ireq_64bit.req_len = req->req_len;
ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->resp_ptr);
+ (uintptr_t)resp_ptr);
ireq_64bit.resp_len = req->resp_len;
if ((data->client.app_arch == ELFCLASS32) &&
((ireq_64bit.req_ptr >=
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 871040e..8bf4c57 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -95,7 +95,7 @@
static int uid_cputime_show(struct seq_file *m, void *v)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
cputime_t utime;
@@ -113,7 +113,8 @@
read_lock(&tasklist_lock);
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
read_unlock(&tasklist_lock);
rt_mutex_unlock(&uid_lock);
@@ -252,7 +253,7 @@
static void update_io_stats_all_locked(void)
{
- struct uid_entry *uid_entry;
+ struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
@@ -265,7 +266,8 @@
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
- uid_entry = find_or_register_uid(uid);
+ if (!uid_entry || uid_entry->uid != uid)
+ uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f300435..120fd54 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,6 +1718,8 @@
/* We couldn't get a response from the card. Give up. */
if (err) {
+ if (card->err_in_sdr104)
+ return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -2208,7 +2210,8 @@
brq->data.error == -ETIMEDOUT ||
brq->cmd.error == -EILSEQ ||
brq->cmd.error == -EIO ||
- brq->cmd.error == -ETIMEDOUT))
+ brq->cmd.error == -ETIMEDOUT ||
+ brq->sbc.error))
card->err_in_sdr104 = true;
/*
@@ -4586,6 +4589,10 @@
dev_set_drvdata(&card->dev, md);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
if (mmc_add_disk(md))
goto out;
@@ -4629,6 +4636,9 @@
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index e3696c5..a531cb4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -398,8 +398,6 @@
mmc_hostname(card->host), __func__, ret);
}
- device_enable_async_suspend(&card->dev);
-
ret = device_add(&card->dev);
if (ret)
return ret;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 787779c..978dd9a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,6 +456,22 @@
}
EXPORT_SYMBOL(mmc_clk_update_freq);
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+ if (!host->card)
+ return;
+
+ if (host->sdr104_wa && mmc_card_sd(host->card) &&
+ (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !host->card->sdr104_blocked) {
+ pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ mmc_hostname(host), __func__);
+ mmc_host_clear_sdr104(host);
+ mmc_hw_reset(host);
+ host->card->sdr104_blocked = true;
+ }
+}
+
static int mmc_devfreq_set_target(struct device *dev,
unsigned long *freq, u32 devfreq_flags)
{
@@ -463,22 +479,22 @@
struct mmc_devfeq_clk_scaling *clk_scaling;
int err = 0;
int abort;
+ unsigned long pflags = current->flags;
+
+ /* Ensure scaling would happen even in memory pressure conditions */
+ current->flags |= PF_MEMALLOC;
if (!(host && freq)) {
pr_err("%s: unexpected host/freq parameter\n", __func__);
err = -EINVAL;
goto out;
}
+
clk_scaling = &host->clk_scaling;
if (!clk_scaling->enable)
goto out;
- if (*freq == UINT_MAX)
- *freq = clk_scaling->freq_table[1];
- else
- *freq = clk_scaling->freq_table[0];
-
pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
*freq, current->comm);
@@ -507,6 +523,9 @@
if (abort)
goto out;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ goto rel_host;
+
/*
* In case we were able to claim host there is no need to
* defer the frequency change. It will be done now
@@ -515,17 +534,21 @@
mmc_host_clk_hold(host);
err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
+ }
mmc_host_clk_release(host);
+rel_host:
mmc_release_host(host);
out:
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
return err;
}
@@ -544,6 +567,9 @@
if (!host->clk_scaling.enable)
return;
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ return;
+
spin_lock_bh(&host->clk_scaling.lock);
if (host->clk_scaling.clk_scaling_in_progress ||
@@ -564,13 +590,15 @@
err = mmc_clk_update_freq(host, target_freq,
host->clk_scaling.state);
- if (err && err != -EAGAIN)
+ if (err && err != -EAGAIN) {
pr_err("%s: failed on deferred scale clocks (%d)\n",
mmc_hostname(host), err);
- else
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
mmc_hostname(host),
target_freq, current->comm);
+ }
host->clk_scaling.clk_scaling_in_progress = false;
atomic_dec(&host->clk_scaling.devfreq_abort);
}
@@ -1571,8 +1599,13 @@
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card))
+ mmc_card_removed(host->card)) {
+ if (cmd->error && !cmd->retries &&
+ cmd->opcode != MMC_SEND_STATUS &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ mmc_recovery_fallback_lower_speed(host);
break;
+ }
mmc_retune_recheck(host);
@@ -2252,6 +2285,7 @@
int claimed_host = 0;
unsigned long flags;
int retry_cnt = delay_ms/10;
+ bool pm = false;
do {
spin_lock_irqsave(&host->lock, flags);
@@ -2260,11 +2294,17 @@
host->claimer = current;
host->claim_cnt += 1;
claimed_host = 1;
+ if (host->claim_cnt == 1)
+ pm = true;
}
spin_unlock_irqrestore(&host->lock, flags);
if (!claimed_host)
mmc_delay(10);
} while (!claimed_host && retry_cnt--);
+
+ if (pm)
+ pm_runtime_get_sync(mmc_dev(host));
+
if (host->ops->enable && claimed_host && host->claim_cnt == 1)
host->ops->enable(host);
return claimed_host;
@@ -4257,12 +4297,18 @@
}
if (ret) {
- mmc_card_set_removed(host->card);
- if (host->card->sdr104_blocked) {
- mmc_host_set_sdr104(host);
- host->card->sdr104_blocked = false;
+ if (host->ops->get_cd && host->ops->get_cd(host)) {
+ mmc_recovery_fallback_lower_speed(host);
+ ret = 0;
+ } else {
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
+ pr_debug("%s: card remove detected\n",
+ mmc_hostname(host));
}
- pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
@@ -4426,7 +4472,7 @@
BUG_ON(host->card);
- mmc_register_extcon(host);
+ mmc_unregister_extcon(host);
mmc_claim_host(host);
mmc_power_off(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c92ea77..127ab0f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -224,7 +224,7 @@
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- schedule_delayed_work(&host->clk_gate_work,
+ queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work,
msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -283,6 +283,8 @@
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
+ if (host->clk_gate_wq)
+ destroy_workqueue(host->clk_gate_wq);
/* There should be only one user now */
WARN_ON(host->clk_requests > 1);
}
@@ -298,6 +300,42 @@
pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
mmc_hostname(host));
}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+ char *wq = NULL;
+ int wq_nl;
+ bool ret = true;
+
+ wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1;
+
+ wq = kzalloc(wq_nl, GFP_KERNEL);
+ if (!wq) {
+ ret = false;
+ goto out;
+ }
+
+ snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host));
+
+ /*
+ * Create a work queue with flag WQ_MEM_RECLAIM set for
+ * mmc clock gate work. Because mmc thread is created with
+ * flag PF_MEMALLOC set, kernel will check for work queue
+ * flag WQ_MEM_RECLAIM when flush the work queue. If work
+ * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+ * will be triggered.
+ */
+ host->clk_gate_wq = create_workqueue(wq);
+ if (!host->clk_gate_wq) {
+ ret = false;
+ dev_err(host->parent,
+ "failed to create clock gate work queue\n");
+ }
+
+ kfree(wq);
+out:
+ return ret;
+}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -316,6 +354,11 @@
{
return false;
}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+ return true;
+}
#endif
void mmc_retune_enable(struct mmc_host *host)
@@ -644,6 +687,11 @@
return NULL;
}
+ if (!mmc_host_clk_gate_wq_init(host)) {
+ kfree(host);
+ return NULL;
+ }
+
mmc_host_clk_init(host);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3184dcd..409718b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2944,6 +2944,7 @@
return -EBUSY;
}
+ MMC_TRACE(host, "%s\n", __func__);
err = _mmc_suspend(host, true);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
@@ -2962,6 +2963,7 @@
int err;
ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s\n", __func__);
err = _mmc_resume(host);
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 1499d53..e32ed3d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -333,7 +333,6 @@
sdio_set_of_node(func);
sdio_acpi_set_handle(func);
- device_enable_async_suspend(&func->dev);
ret = device_add(&func->dev);
if (ret == 0)
sdio_func_set_present(func);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index b5c81e4..91ad946 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -77,6 +77,15 @@
int mmc_gpio_get_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int ret;
+
+ if (host->extcon) {
+ ret = extcon_get_state(host->extcon, EXTCON_MECHANICAL);
+ if (ret < 0)
+ dev_err(mmc_dev(host), "%s: Extcon failed to check card state, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
if (!ctx || !ctx->cd_gpio)
return -ENOSYS;
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index bfe1242..7c3638c 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -729,6 +729,15 @@
goto ring_doorbell;
}
+ if (cq_host->ops->crypto_cfg) {
+ err = cq_host->ops->crypto_cfg(mmc, mrq, tag);
+ if (err) {
+ pr_err("%s: failed to configure crypto: err %d tag %d\n",
+ mmc_hostname(mmc), err, tag);
+ goto out;
+ }
+ }
+
task_desc = (__le64 __force *)get_desc(cq_host, tag);
cmdq_prep_task_desc(mrq, &data, 1,
@@ -778,6 +787,8 @@
CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
cmdq_runtime_pm_put(cq_host);
+ if (cq_host->ops->crypto_cfg_reset)
+ cq_host->ops->crypto_cfg_reset(mmc, tag);
mrq->done(mrq);
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 5347b3ab..8e9f765 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -208,6 +208,9 @@
void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
int (*reset)(struct mmc_host *mmc);
void (*post_cqe_halt)(struct mmc_host *mmc);
+ int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
+ u32 slot);
+ void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
};
static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index ba6e51c..d624b48 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,19 +13,6 @@
#include "sdhci-msm-ice.h"
-static void sdhci_msm_ice_success_cb(void *host_ctrl,
- enum ice_event_completion evt)
-{
- struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
-
- if ((msm_host->ice.state == SDHCI_MSM_ICE_STATE_DISABLED &&
- evt == ICE_INIT_COMPLETION) || (msm_host->ice.state ==
- SDHCI_MSM_ICE_STATE_SUSPENDED && evt == ICE_RESUME_COMPLETION))
- msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
-
- complete(&msm_host->ice.async_done);
-}
-
static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
{
struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
@@ -35,8 +22,6 @@
if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
-
- complete(&msm_host->ice.async_done);
}
static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
@@ -194,89 +179,79 @@
struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
- init_completion(&msm_host->ice.async_done);
- if (msm_host->ice.vops->config) {
+ if (msm_host->ice.vops->init) {
+ err = sdhci_msm_ice_pltfm_init(msm_host);
+ if (err)
+ goto out;
+
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
err = msm_host->ice.vops->init(msm_host->ice.pdev,
msm_host,
- sdhci_msm_ice_success_cb,
sdhci_msm_ice_error_cb);
if (err) {
pr_err("%s: ice init err %d\n",
mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, false);
+ goto out;
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+ }
+
+out:
+ return err;
+}
+
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+ writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+}
+
+static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+ unsigned int *bypass, short *key_index)
+{
+ int err = 0;
+ struct ice_data_setting ice_set;
+
+ memset(&ice_set, 0, sizeof(struct ice_data_setting));
+ if (msm_host->ice.vops->config_start) {
+ err = msm_host->ice.vops->config_start(
+ msm_host->ice.pdev,
+ req, &ice_set, false);
+ if (err) {
+ pr_err("%s: ice config failed %d\n",
+ mmc_hostname(msm_host->mmc), err);
return err;
}
}
-
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice init timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
- return 0;
+ /* if writing data command */
+ if (rq_data_dir(req) == WRITE)
+ *bypass = ice_set.encr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (rq_data_dir(req) == READ)
+ *bypass = ice_set.decr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ *key_index = ice_set.crypto_data.key_index;
+ return err;
}
-int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
- u32 slot)
+static
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
+ u32 slot, unsigned int bypass, short key_index)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = pltfm_host->priv;
- int err = 0;
- struct ice_data_setting ice_set;
- sector_t lba = 0;
unsigned int ctrl_info_val = 0;
- unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
- struct request *req;
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
-
- BUG_ON(!mrq);
- memset(&ice_set, 0, sizeof(struct ice_data_setting));
- req = mrq->req;
- if (req) {
- lba = req->__sector;
- if (msm_host->ice.vops->config) {
- err = msm_host->ice.vops->config(msm_host->ice.pdev,
- req, &ice_set);
- if (err) {
- pr_err("%s: ice config failed %d\n",
- mmc_hostname(host->mmc), err);
- return err;
- }
- }
- /* if writing data command */
- if (rq_data_dir(req) == WRITE)
- bypass = ice_set.encr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- /* if reading data command */
- else if (rq_data_dir(req) == READ)
- bypass = ice_set.decr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- pr_debug("%s: %s: slot %d encr_bypass %d bypass %d decr_bypass %d key_index %d\n",
- mmc_hostname(host->mmc),
- (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
- slot, ice_set.encr_bypass, bypass,
- ice_set.decr_bypass,
- ice_set.crypto_data.key_index);
- }
/* Configure ICE index */
ctrl_info_val =
- (ice_set.crypto_data.key_index &
+ (key_index &
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
@@ -297,9 +272,43 @@
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
writel_relaxed(ctrl_info_val,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
-
/* Ensure ICE registers are configured before issuing SDHCI request */
mb();
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index = 0;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
return 0;
}
@@ -315,25 +324,19 @@
return -EINVAL;
}
- init_completion(&msm_host->ice.async_done);
-
if (msm_host->ice.vops->reset) {
err = msm_host->ice.vops->reset(msm_host->ice.pdev);
if (err) {
pr_err("%s: ice reset failed %d\n",
mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
return err;
}
}
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice reset timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
+ /* If ICE HCI support is present then re-enable it */
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state after reset %d\n",
@@ -356,8 +359,6 @@
return -EINVAL;
}
- init_completion(&msm_host->ice.async_done);
-
if (msm_host->ice.vops->resume) {
err = msm_host->ice.vops->resume(msm_host->ice.pdev);
if (err) {
@@ -367,20 +368,7 @@
}
}
- if (!wait_for_completion_timeout(&msm_host->ice.async_done,
- msecs_to_jiffies(SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS))) {
- pr_err("%s: ice resume timedout after %d ms\n",
- mmc_hostname(host->mmc),
- SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS);
- sdhci_msm_ice_print_regs(host);
- return -ETIMEDOUT;
- }
-
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state after resume %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
return 0;
}
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
index 88ef0e2..23922cf 100644
--- a/drivers/mmc/host/sdhci-msm-ice.h
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/async.h>
#include <linux/blkdev.h>
#include <crypto/ice.h>
@@ -97,6 +96,7 @@
#ifdef CONFIG_MMC_SDHCI_MSM_ICE
int sdhci_msm_ice_get_dev(struct sdhci_host *host);
int sdhci_msm_ice_init(struct sdhci_host *host);
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot);
int sdhci_msm_ice_reset(struct sdhci_host *host);
@@ -120,6 +120,11 @@
{
return 0;
}
+
+inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+}
+
inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot)
{
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1d9cf34..1e25b31 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -42,6 +42,7 @@
#include <trace/events/mmc.h>
#include "sdhci-msm.h"
+#include "sdhci-msm-ice.h"
#include "cmdq_hci.h"
#define QOS_REMOVE_DELAY_MS 10
@@ -690,7 +691,7 @@
mclk_freq = 5;
else if (host->clock <= 187000000)
mclk_freq = 6;
- else if (host->clock <= 200000000)
+ else if (host->clock <= 208000000)
mclk_freq = 7;
writel_relaxed(((readl_relaxed(host->ioaddr +
@@ -1814,6 +1815,8 @@
int len, i;
int clk_table_len;
u32 *clk_table = NULL;
+ int ice_clk_table_len;
+ u32 *ice_clk_table = NULL;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
const char *lower_bus_speed = NULL;
@@ -1824,7 +1827,7 @@
}
pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
- if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
+ if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
of_property_read_u32(np, "qcom,bus-width", &bus_width);
@@ -1871,6 +1874,20 @@
pdata->sup_clk_table = clk_table;
pdata->sup_clk_cnt = clk_table_len;
+ if (msm_host->ice.pdev) {
+ if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
+ &ice_clk_table, &ice_clk_table_len, 0)) {
+ dev_err(dev, "failed parsing supported ice clock rates\n");
+ goto out;
+ }
+ if (!ice_clk_table || !ice_clk_table_len) {
+ dev_err(dev, "Invalid clock table\n");
+ goto out;
+ }
+ pdata->sup_ice_clk_table = ice_clk_table;
+ pdata->sup_ice_clk_cnt = ice_clk_table_len;
+ }
+
pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
sdhci_msm_slot_reg_data),
GFP_KERNEL);
@@ -1940,6 +1957,8 @@
msm_host->core_3_0v_support = true;
pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+ msm_host->regs_restore.is_supported =
+ of_property_read_bool(np, "qcom,restore-after-cx-collapse");
return pdata;
out:
@@ -2345,21 +2364,6 @@
return ret;
}
-/*
- * Reset vreg by ensuring it is off during probe. A call
- * to enable vreg is needed to balance disable vreg
- */
-static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata)
-{
- int ret;
-
- ret = sdhci_msm_setup_vreg(pdata, 1, true);
- if (ret)
- return ret;
- ret = sdhci_msm_setup_vreg(pdata, 0, true);
- return ret;
-}
-
/* This init function should be called only once for each SDHC slot */
static int sdhci_msm_vreg_init(struct device *dev,
struct sdhci_msm_pltfm_data *pdata,
@@ -2394,7 +2398,7 @@
if (ret)
goto vdd_reg_deinit;
}
- ret = sdhci_msm_vreg_reset(pdata);
+
if (ret)
dev_err(dev, "vreg reset failed (%d)\n", ret);
goto out;
@@ -2488,15 +2492,31 @@
struct sdhci_msm_host *msm_host = pltfm_host->priv;
const struct sdhci_msm_offset *msm_host_offset =
msm_host->offset;
+ unsigned int irq_flags = 0;
+ struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
- pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ if (pwr_irq_desc)
+ irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
+ state_use_accessors);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
mmc_hostname(host->mmc),
sdhci_msm_readl_relaxed(host,
msm_host_offset->CORE_PWRCTL_STATUS),
sdhci_msm_readl_relaxed(host,
msm_host_offset->CORE_PWRCTL_MASK),
sdhci_msm_readl_relaxed(host,
- msm_host_offset->CORE_PWRCTL_CTL));
+ msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+
+ MMC_TRACE(host->mmc,
+ "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
+ __func__,
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS),
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_MASK),
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
@@ -2571,7 +2591,9 @@
io_level = REQ_IO_HIGH;
}
if (irq_status & CORE_PWRCTL_BUS_OFF) {
- ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
+ if (msm_host->pltfm_init_done)
+ ret = sdhci_msm_setup_vreg(msm_host->pdata,
+ false, false);
if (!ret) {
ret = sdhci_msm_setup_pins(msm_host->pdata, false);
ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
@@ -2766,10 +2788,14 @@
if (done)
init_completion(&msm_host->pwr_irq_completion);
else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
- msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
__WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
mmc_hostname(host->mmc), req_type);
-
+ MMC_TRACE(host->mmc,
+ "%s: request(%d) timed out waiting for pwr_irq\n",
+ __func__, req_type);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ }
pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
__func__, req_type);
}
@@ -2844,6 +2870,103 @@
return sel_clk;
}
+static void sdhci_msm_registers_save(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->regs_restore.is_supported)
+ return;
+
+ msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ msm_host->regs_restore.vendor_pwrctl_mask =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK);
+ msm_host->regs_restore.vendor_func2 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ msm_host->regs_restore.vendor_func3 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ msm_host->regs_restore.hc_2c_2e =
+ sdhci_readl(host, SDHCI_CLOCK_CONTROL);
+ msm_host->regs_restore.hc_3c_3e =
+ sdhci_readl(host, SDHCI_AUTO_CMD_ERR);
+ msm_host->regs_restore.vendor_pwrctl_ctl =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_CTL);
+ msm_host->regs_restore.hc_38_3a =
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
+ msm_host->regs_restore.hc_34_36 =
+ sdhci_readl(host, SDHCI_INT_ENABLE);
+ msm_host->regs_restore.hc_28_2a =
+ sdhci_readl(host, SDHCI_HOST_CONTROL);
+ msm_host->regs_restore.vendor_caps_0 =
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ msm_host->regs_restore.hc_caps_1 =
+ sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ msm_host->regs_restore.is_valid = true;
+
+ pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
+ mmc_hostname(host->mmc), __func__,
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK));
+}
+
+static void sdhci_msm_registers_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->regs_restore.is_supported ||
+ !msm_host->regs_restore.is_valid)
+ return;
+
+ writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
+ host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
+ writel_relaxed(msm_host->regs_restore.vendor_func2,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed(msm_host->regs_restore.vendor_func3,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
+ SDHCI_CLOCK_CONTROL);
+ sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
+ SDHCI_AUTO_CMD_ERR);
+ writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
+ host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
+ sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
+ SDHCI_SIGNAL_ENABLE);
+ sdhci_writel(host, msm_host->regs_restore.hc_34_36,
+ SDHCI_INT_ENABLE);
+ sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
+ SDHCI_HOST_CONTROL);
+ writel_relaxed(msm_host->regs_restore.vendor_caps_0,
+ host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
+ SDHCI_CAPABILITIES_1);
+ writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ msm_host->regs_restore.is_valid = false;
+
+ pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
+ mmc_hostname(host->mmc), __func__,
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_PWRCTL_MASK));
+}
+
static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2871,11 +2994,23 @@
goto disable_pclk;
}
+ if (!IS_ERR(msm_host->ice_clk)) {
+ rc = clk_prepare_enable(msm_host->ice_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_host_clk;
+ }
+ }
atomic_set(&msm_host->controller_clock, 1);
pr_debug("%s: %s: enabled controller clock\n",
mmc_hostname(host->mmc), __func__);
+ sdhci_msm_registers_restore(host);
goto out;
+disable_host_clk:
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
disable_pclk:
if (!IS_ERR(msm_host->pclk))
clk_disable_unprepare(msm_host->pclk);
@@ -2892,12 +3027,13 @@
struct sdhci_msm_host *msm_host = pltfm_host->priv;
if (atomic_read(&msm_host->controller_clock)) {
+ sdhci_msm_registers_save(host);
if (!IS_ERR(msm_host->clk))
clk_disable_unprepare(msm_host->clk);
- if (!IS_ERR(msm_host->pclk))
- clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->ice_clk))
clk_disable_unprepare(msm_host->ice_clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
sdhci_msm_bus_voting(host, 0);
atomic_set(&msm_host->controller_clock, 0);
pr_debug("%s: %s: disabled controller clock\n",
@@ -2970,14 +3106,9 @@
clk_disable_unprepare(msm_host->sleep_clk);
if (!IS_ERR_OR_NULL(msm_host->ff_clk))
clk_disable_unprepare(msm_host->ff_clk);
- clk_disable_unprepare(msm_host->clk);
- if (!IS_ERR(msm_host->pclk))
- clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR_OR_NULL(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
-
- atomic_set(&msm_host->controller_clock, 0);
- sdhci_msm_bus_voting(host, 0);
+ sdhci_msm_disable_controller_clock(host);
}
atomic_set(&msm_host->clks_on, enable);
goto out;
@@ -2990,6 +3121,8 @@
disable_controller_clk:
if (!IS_ERR_OR_NULL(msm_host->clk))
clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
if (!IS_ERR_OR_NULL(msm_host->pclk))
clk_disable_unprepare(msm_host->pclk);
atomic_set(&msm_host->controller_clock, 0);
@@ -3295,6 +3428,7 @@
int i, index = 0;
u32 test_bus_val = 0;
u32 debug_reg[MAX_TEST_BUS] = {0};
+ u32 sts = 0;
sdhci_msm_cache_debug_data(host);
pr_info("----------- VENDOR REGISTER DUMP -----------\n");
@@ -3357,6 +3491,29 @@
pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, i + 3, debug_reg[i], debug_reg[i+1],
debug_reg[i+2], debug_reg[i+3]);
+ if (host->is_crypto_en) {
+ sdhci_msm_ice_get_status(host, &sts);
+ pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
+ sdhci_msm_ice_print_regs(host);
+ }
+}
+
+static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ /* Set ICE core to be reset in sync with SDHC core */
+ if (msm_host->ice.pdev) {
+ if (msm_host->ice_hci_support)
+ writel_relaxed(1, host->ioaddr +
+ HC_VENDOR_SPECIFIC_ICE_CTRL);
+ else
+ writel_relaxed(1,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+ }
+
+ sdhci_reset(host, mask);
}
/*
@@ -3939,6 +4096,8 @@
}
static struct sdhci_ops sdhci_msm_ops = {
+ .crypto_engine_cfg = sdhci_msm_ice_cfg,
+ .crypto_engine_reset = sdhci_msm_ice_reset,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.check_power_status = sdhci_msm_check_power_status,
.platform_execute_tuning = sdhci_msm_execute_tuning,
@@ -3952,7 +4111,7 @@
.config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
.enable_controller_clock = sdhci_msm_enable_controller_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_msm_reset,
.clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
.enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
.reset_workaround = sdhci_msm_reset_workaround,
@@ -4151,7 +4310,7 @@
host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
- goto out;
+ goto out_host_free;
}
pltfm_host = sdhci_priv(host);
@@ -4159,6 +4318,31 @@
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
+ /* get the ice device vops if present */
+ ret = sdhci_msm_ice_get_dev(host);
+ if (ret == -EPROBE_DEFER) {
+ /*
+ * SDHCI driver might be probed before ICE driver does.
+ * In that case we would like to return EPROBE_DEFER code
+ * in order to delay its probing.
+ */
+ dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
+ __func__, ret);
+ goto out_host_free;
+
+ } else if (ret == -ENODEV) {
+ /*
+ * ICE device is not enabled in DTS file. No need for further
+ * initialization of ICE driver.
+ */
+ dev_warn(&pdev->dev, "%s: ICE device is not enabled",
+ __func__);
+ } else if (ret) {
+ dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
+ __func__, ret);
+ goto out_host_free;
+ }
+
/* Extract platform data */
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
@@ -4218,6 +4402,28 @@
}
atomic_set(&msm_host->controller_clock, 1);
+ if (msm_host->ice.pdev) {
+ /* Setup SDC ICE clock */
+ msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
+ if (!IS_ERR(msm_host->ice_clk)) {
+ /* ICE core has only one clock frequency for now */
+ ret = clk_set_rate(msm_host->ice_clk,
+ msm_host->pdata->sup_ice_clk_table[0]);
+ if (ret) {
+ dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
+ ret,
+ msm_host->pdata->sup_ice_clk_table[0]);
+ goto pclk_disable;
+ }
+ ret = clk_prepare_enable(msm_host->ice_clk);
+ if (ret)
+ goto pclk_disable;
+
+ msm_host->ice_clk_rate =
+ msm_host->pdata->sup_clk_table[0];
+ }
+ }
+
/* Setup SDC MMC clock */
msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(msm_host->clk)) {
@@ -4444,6 +4650,21 @@
msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+ /* Initialize ICE if present */
+ if (msm_host->ice.pdev) {
+ ret = sdhci_msm_ice_init(host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ ret = -EINVAL;
+ goto vreg_deinit;
+ }
+ host->is_crypto_en = true;
+ /* Packed commands cannot be encrypted/decrypted using ICE */
+ msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
+ MMC_CAP2_PACKED_WR_CONTROL);
+ }
+
init_completion(&msm_host->pwr_irq_completion);
if (gpio_is_valid(msm_host->pdata->status_gpio)) {
@@ -4512,6 +4733,8 @@
goto vreg_deinit;
}
+ msm_host->pltfm_init_done = true;
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
@@ -4581,6 +4804,8 @@
clk_disable_unprepare(msm_host->bus_clk);
pltfm_free:
sdhci_pltfm_free(pdev);
+out_host_free:
+ devm_kfree(&pdev->dev, msm_host);
out:
pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
return ret;
@@ -4670,6 +4895,7 @@
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
+ int ret;
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_disable_host_irq;
@@ -4691,6 +4917,12 @@
trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
ktime_to_us(ktime_sub(ktime_get(), start)));
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_ice_suspend(host);
+ if (ret < 0)
+ pr_err("%s: failed to suspend crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
return 0;
}
@@ -4700,6 +4932,21 @@
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
ktime_t start = ktime_get();
+ int ret;
+
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_enable_controller_clock(host);
+ if (ret) {
+ pr_err("%s: Failed to enable reqd clocks\n",
+ mmc_hostname(host->mmc));
+ goto skip_ice_resume;
+ }
+ ret = sdhci_msm_ice_resume(host);
+ if (ret)
+ pr_err("%s: failed to resume crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
+skip_ice_resume:
if (host->mmc->card && mmc_card_sdio(host->mmc->card))
goto defer_enable_host_irq;
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 2b907e9..cdbaaa9 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -165,11 +165,28 @@
struct sdhci_msm_ice_data {
struct qcom_ice_variant_ops *vops;
- struct completion async_done;
struct platform_device *pdev;
int state;
};
+struct sdhci_msm_regs_restore {
+ bool is_supported;
+ bool is_valid;
+ u32 vendor_pwrctl_mask;
+ u32 vendor_pwrctl_ctl;
+ u32 vendor_caps_0;
+ u32 vendor_func;
+ u32 vendor_func2;
+ u32 vendor_func3;
+ u32 hc_2c_2e;
+ u32 hc_28_2a;
+ u32 hc_34_36;
+ u32 hc_38_3a;
+ u32 hc_3c_3e;
+ u32 hc_caps_1;
+ u32 testbus_config;
+};
+
struct sdhci_msm_debug_data {
struct mmc_host copy_mmc;
struct mmc_card copy_card;
@@ -225,6 +242,8 @@
bool mci_removed;
const struct sdhci_msm_offset *offset;
bool core_3_0v_support;
+ bool pltfm_init_done;
+ struct sdhci_msm_regs_restore regs_restore;
};
extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8fbcdae..68e49bb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -245,6 +245,8 @@
if (timeout == 0) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ MMC_TRACE(host->mmc, "%s: Reset 0x%x never completed\n",
+ __func__, (int)mask);
if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND)
&& host->ops->reset_workaround) {
if (!host->reset_wa_applied) {
@@ -305,6 +307,8 @@
/* Resetting the controller clears many */
host->preset_enabled = false;
}
+ if (host->is_crypto_en)
+ host->crypto_reset_reqd = true;
}
static void sdhci_init(struct sdhci_host *host, int soft)
@@ -1242,6 +1246,9 @@
if (timeout == 0) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc,
+ "%s :Controller never released inhibit bit(s)\n",
+ __func__);
sdhci_dumpregs(host);
cmd->error = -EIO;
sdhci_finish_mrq(host, cmd->mrq);
@@ -1300,12 +1307,12 @@
if (cmd->data)
host->data_start_time = ktime_get();
trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
MMC_TRACE(host->mmc,
"%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE),
sdhci_readw(host, SDHCI_COMMAND));
- sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1531,6 +1538,8 @@
if (timeout == 0) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc,
+ "%s: Internal clock never stabilised.\n", __func__);
sdhci_dumpregs(host);
return;
}
@@ -1749,6 +1758,33 @@
return MMC_SEND_TUNING_BLOCK;
}
+static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ int err = 0;
+
+ if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+ err = host->ops->crypto_engine_reset(host);
+ if (err) {
+ pr_err("%s: crypto reset failed\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ host->crypto_reset_reqd = false;
+ }
+
+ if (host->ops->crypto_engine_cfg) {
+ err = host->ops->crypto_engine_cfg(host, mrq, slot);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@@ -1815,6 +1851,13 @@
sdhci_get_tuning_cmd(host));
}
+ if (host->is_crypto_en) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (sdhci_crypto_cfg(host, mrq, 0))
+ goto end_req;
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1823,6 +1866,12 @@
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ return;
+end_req:
+ mrq->cmd->error = -EIO;
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ mmc_request_done(host->mmc, mrq);
}
void sdhci_set_bus_width(struct sdhci_host *host, int width)
@@ -2875,6 +2924,7 @@
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc, "Timeout waiting for h/w interrupt\n");
sdhci_dumpregs(host);
if (host->data) {
@@ -2917,6 +2967,9 @@
return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ MMC_TRACE(host->mmc,
+ "Got command interrupt 0x%08x even though no command operation was in progress.\n",
+ (unsigned int)intmask);
sdhci_dumpregs(host);
return;
}
@@ -3069,6 +3122,9 @@
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ MMC_TRACE(host->mmc,
+ "Got data interrupt 0x%08x even though no data operation was in progress.\n",
+ (unsigned int)intmask);
sdhci_dumpregs(host);
return;
@@ -3104,6 +3160,11 @@
mmc_hostname(host->mmc), intmask,
host->data->error, ktime_to_ms(ktime_sub(
ktime_get(), host->data_start_time)));
+ MMC_TRACE(host->mmc,
+ "data txfr (0x%08x) error: %d after %lld ms\n",
+ intmask, host->data->error,
+ ktime_to_ms(ktime_sub(ktime_get(),
+ host->data_start_time)));
if (!host->mmc->sdr104_wa ||
(host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
@@ -3357,6 +3418,8 @@
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ MMC_TRACE(host->mmc, "Unexpected interrupt 0x%08x.\n",
+ unexpected);
sdhci_dumpregs(host);
}
@@ -3704,6 +3767,27 @@
SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return 0;
+
+ return sdhci_crypto_cfg(host, mrq, slot);
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return;
+
+ if (host->ops->crypto_cfg_reset)
+ host->ops->crypto_cfg_reset(host, slot);
+}
#else
static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
{
@@ -3747,6 +3831,18 @@
static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
{
+
+}
+
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot)
+{
+ return 0;
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+
}
#endif
@@ -3759,6 +3855,8 @@
.enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
.post_cqe_halt = sdhci_cmdq_post_cqe_halt,
.set_transfer_params = sdhci_cmdq_set_transfer_params,
+ .crypto_cfg = sdhci_cmdq_crypto_cfg,
+ .crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset,
};
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index d9e656a..04e806c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -628,6 +628,8 @@
enum sdhci_power_policy power_policy;
bool sdio_irq_async_status;
+ bool is_crypto_en;
+ bool crypto_reset_reqd;
u32 auto_cmd_err_sts;
struct ratelimit_state dbg_dump_rs;
@@ -666,6 +668,10 @@
unsigned int (*get_ro)(struct sdhci_host *host);
void (*reset)(struct sdhci_host *host, u8 mask);
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ int (*crypto_engine_cfg)(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot);
+ int (*crypto_engine_reset)(struct sdhci_host *host);
+ void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 9d2424b..d9fab22 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -101,6 +101,9 @@
#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
+#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
+#define NAND_POLL_STATUS_TIMEOUT_MS 100
+
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -765,6 +768,31 @@
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
+static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ u32 mask, u32 expected_val,
+ unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 val;
+
+ if (!timeout_ms)
+ timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
+
+ return -ETIMEDOUT;
+}
+
static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
{
u32 val = en ? CS_SELECT_NAND_WP : 0;
@@ -1024,12 +1052,39 @@
if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
static int old_wp = -1;
+ int ret;
if (old_wp != wp) {
dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
old_wp = wp;
}
+
+ /*
+ * make sure ctrl/flash ready before and after
+ * changing state of #WP pin
+ */
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ NAND_STATUS_READY,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY, 0);
+ if (ret)
+ return;
+
brcmnand_set_wp(ctrl, wp);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
+ ret = bcmnand_ctrl_poll_status(ctrl,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ NAND_STATUS_WP,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ (wp ? 0 : NAND_STATUS_WP), 0);
+
+ if (ret)
+ dev_err_ratelimited(&host->pdev->dev,
+ "nand #WP expected %s\n",
+ wp ? "on" : "off");
}
}
@@ -1157,15 +1212,15 @@
static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
- u32 intfc;
+ int ret;
dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
- intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
- WARN_ON(!(intfc & INTFC_CTLR_READY));
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ WARN_ON(ret);
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165..21dde52 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1255,6 +1255,13 @@
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 7ab24c5..05369dc 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@
sizeof(*dm),
1000);
+ kfree(dm);
+
return rc;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9d91f96..8cc7467 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -112,7 +112,6 @@
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
-source "drivers/net/ethernet/msm/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index b31cbc2..a09423d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -52,7 +52,6 @@
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
-obj-$(CONFIG_ARCH_QCOM) += msm/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8af2c88..45bb0fe 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1153,6 +1153,12 @@
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[i].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[i].skb = NULL;
+ break;
+ }
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1183,8 +1189,9 @@
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
+ unsigned int prev_tx;
u32 status;
- int i;
+ int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@@ -1202,6 +1209,7 @@
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+ prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@@ -1235,6 +1243,11 @@
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->tx_info[entry].mapping)) {
+ dev->stats.tx_dropped++;
+ goto err_out;
+ }
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1269,8 +1282,30 @@
netif_stop_queue(dev);
return NETDEV_TX_OK;
-}
+err_out:
+ entry = prev_tx % TX_RING_SIZE;
+ np->tx_info[entry].skb = NULL;
+ if (i > 0) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ for (j = 1; j < i; j++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_frag_size(
+ &skb_shinfo(skb)->frags[j-1]),
+ PCI_DMA_TODEVICE);
+ entry++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ np->cur_tx = prev_tx;
+ return NETDEV_TX_OK;
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@@ -1570,6 +1605,12 @@
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[entry].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[entry].skb = NULL;
+ break;
+ }
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index ca106d4..3424435 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2825,8 +2825,10 @@
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 0f0f3014..1e4e8b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -877,7 +877,9 @@
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
ret = phy_if->phy_start(pdata);
if (ret)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 48ee411..20e569b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1097,7 +1097,7 @@
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int len, nw_off, tcp_opt_len;
+ int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;
@@ -1499,6 +1499,7 @@
netdev_warn(bp->dev, "Link speed %d no longer supported\n",
speed);
}
+ set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
}
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
@@ -5110,6 +5111,7 @@
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
+ u16 diff;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -5197,6 +5199,18 @@
link_info->link_up = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ diff = link_info->support_auto_speeds ^ link_info->advertising;
+ if ((link_info->support_auto_speeds | diff) !=
+ link_info->support_auto_speeds) {
+ /* An advertised speed is no longer supported, so we need to
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
+ */
+ link_info->advertising = link_info->support_auto_speeds;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ bnxt_hwrm_set_link_setting(bp, true, false);
+ }
return 0;
}
@@ -6080,29 +6094,37 @@
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6116,12 +6138,6 @@
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6142,18 +6158,39 @@
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51b164a..666bc06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1089,6 +1089,7 @@
#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 380a641..258bc8d 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -230,7 +230,7 @@
/* Wait for 100ms as Octeon resets. */
mdelay(100);
- if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
oct->octeon_id);
return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index e779af8..cda32d5 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -48,7 +48,7 @@
/* Wait for 10ms as Octeon resets. */
mdelay(100);
- if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
return 1;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 050e21f..e858b1a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -31,6 +31,7 @@
u8 lmac_type;
u8 lane_to_sds;
bool use_training;
+ bool autoneg;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -47,8 +48,9 @@
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
- int lmac_count;
+ u8 lmac_count;
u8 max_lmac;
+ u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@@ -418,7 +420,17 @@
/* power down, reset autoneg, autoneg enable */
cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
cfg &= ~PCS_MRX_CTL_PWR_DN;
- cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ cfg |= PCS_MRX_CTL_RST_AN;
+ if (lmac->phydev) {
+ cfg |= PCS_MRX_CTL_AN_EN;
+ } else {
+ /* In scenarios where PHY driver is not present or it's a
+ * non-standard PHY, FW sets AN_EN to inform Linux driver
+ * to do auto-neg and link polling or not.
+ */
+ if (cfg & PCS_MRX_CTL_AN_EN)
+ lmac->autoneg = true;
+ }
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -429,7 +441,7 @@
return 0;
}
- if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
PCS_MRX_STATUS_AN_CPT, false)) {
dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -623,12 +635,71 @@
return -1;
}
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+ u64 pcs_link, an_result;
+ u8 speed;
+
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ /*Link state bit is sticky, read it again*/
+ if (!(pcs_link & PCS_MRX_STATUS_LINK))
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ goto next_poll;
+ }
+
+ lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+ an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_ANX_AN_RESULTS);
+
+ speed = (an_result >> 3) & 0x3;
+ lmac->last_duplex = (an_result >> 1) & 0x1;
+ switch (speed) {
+ case 0:
+ lmac->last_speed = 10;
+ break;
+ case 1:
+ lmac->last_speed = 100;
+ break;
+ case 2:
+ lmac->last_speed = 1000;
+ break;
+ default:
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+next_poll:
+
+ if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up)
+ bgx_sgmii_change_link_state(lmac);
+ lmac->last_link = lmac->link_up;
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
+ if (lmac->is_sgmii) {
+ bgx_poll_for_sgmii_link(lmac);
+ return;
+ }
/* Receive link is latching low. Force it high and verify it */
bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -720,9 +791,21 @@
(lmac->lmac_type != BGX_MODE_XLAUI) &&
(lmac->lmac_type != BGX_MODE_40G_KR) &&
(lmac->lmac_type != BGX_MODE_10G_KR)) {
- if (!lmac->phydev)
- return -ENODEV;
-
+ if (!lmac->phydev) {
+ if (lmac->autoneg) {
+ bgx_reg_write(bgx, lmacid,
+ BGX_GMP_PCS_LINKX_TIMER,
+ PCS_LINKX_TIMER_COUNT);
+ goto poll;
+ } else {
+ /* Default to below link speed and duplex */
+ lmac->link_up = true;
+ lmac->last_speed = 1000;
+ lmac->last_duplex = 1;
+ bgx_sgmii_change_link_state(lmac);
+ return 0;
+ }
+ }
lmac->phydev->dev_flags = 0;
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -731,15 +814,17 @@
return -ENODEV;
phy_start_aneg(lmac->phydev);
- } else {
- lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
- if (!lmac->check_link)
- return -ENOMEM;
- INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
- queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ return 0;
}
+poll:
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
return 0;
}
@@ -1075,13 +1160,13 @@
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
- bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
- bgx->lmac_count++;
return AE_OK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 01cc7c8..1143e957 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -144,10 +144,15 @@
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_LINK BIT_ULL(2)
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV 0x30010
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_LINKX_TIMER 0x30040
+#define PCS_LINKX_TIMER_COUNT 0x1E84
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_MODE BIT_ULL(8)
#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f9c2feb..0c2a32a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2201,9 +2201,10 @@
{
int err;
+ mutex_lock(&uld_mutex);
err = setup_sge_queues(adap);
if (err)
- goto out;
+ goto rel_lock;
err = setup_rss(adap);
if (err)
goto freeq;
@@ -2227,7 +2228,6 @@
goto irq_err;
}
- mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
@@ -2240,13 +2240,15 @@
#endif
/* Initialize hash mac addr list*/
INIT_LIST_HEAD(&adap->mac_hlist);
- out:
return err;
+
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq:
t4_free_sge_resources(adap);
- goto out;
+ rel_lock:
+ mutex_unlock(&uld_mutex);
+ return err;
}
static void cxgb_down(struct adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529..30e8550 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@
err:
mutex_unlock(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9711ca4..b3c9cbe 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -319,6 +319,13 @@
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@@ -3630,7 +3637,11 @@
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT))
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3783,8 +3794,9 @@
if (status)
return status;
- /* For BE3 VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9061c2f..3f4e711 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2007,8 +2007,8 @@
if (!rxb->page)
continue;
- dma_unmap_single(rx_queue->dev, rxb->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
@@ -2951,7 +2951,7 @@
}
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index dff7b60..c06845b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -304,8 +304,8 @@
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 03dca73..b375ae9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1604,8 +1604,11 @@
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fbf686..9f2184b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@
}
ltb->map_id = adapter->map_id;
adapter->map_id++;
+
+ init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
- init_completion(&adapter->fw_done);
wait_for_completion(&adapter->fw_done);
return 0;
}
@@ -505,7 +506,7 @@
adapter->rx_pool = NULL;
rx_pool_arr_alloc_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ napi_disable(&adapter->napi[i]);
alloc_napi_failed:
return -ENOMEM;
}
@@ -1133,10 +1134,10 @@
crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
crq.request_statistics.len =
cpu_to_be32(sizeof(struct ibmvnic_statistics));
- ibmvnic_send_crq(adapter, &crq);
/* Wait for data to be written */
init_completion(&adapter->stats_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->stats_done);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -2197,12 +2198,12 @@
if (!found) {
dev_err(dev, "Couldn't find error id %x\n",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
return;
}
dev_err(dev, "Detailed info for error id %x:",
- crq->request_error_rsp.error_id);
+ be32_to_cpu(crq->request_error_rsp.error_id));
for (i = 0; i < error_buff->len; i++) {
pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2281,8 +2282,8 @@
dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
crq->error_indication.
flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
- crq->error_indication.error_id,
- crq->error_indication.error_cause);
+ be32_to_cpu(crq->error_indication.error_id),
+ be16_to_cpu(crq->error_indication.error_cause));
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
@@ -2400,10 +2401,10 @@
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be32_to_cpu(crq->request_capability_rsp.
+ (long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
release_sub_crqs_no_irqs(adapter);
- *req_value = be32_to_cpu(crq->request_capability_rsp.number);
+ *req_value = be64_to_cpu(crq->request_capability_rsp.number);
init_sub_crqs(adapter, 1);
return;
default:
@@ -2809,9 +2810,9 @@
crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3591,9 +3592,9 @@
memset(&crq, 0, sizeof(crq));
crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
- ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
+ ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3639,8 +3640,8 @@
}
}
- send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
+ send_version_xchg(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Passive init timeout\n");
goto task_failed;
@@ -3650,9 +3651,9 @@
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Passive init timeout\n");
@@ -3780,9 +3781,9 @@
adapter->debugfs_dump = ent;
}
}
- ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
+ ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout))
return 0;
@@ -3790,9 +3791,9 @@
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
- send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
+ send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout))
return 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a61447f..1264a36 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -246,6 +246,7 @@
E1000_STATUS_FUNC_SHIFT;
/* Set phy->phy_addr and phy->id. */
+ igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 1799fe1..c051987 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 86a89cb..4832223 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2518,7 +2518,7 @@
}
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7623-eth" },
+ { .compatible = "mediatek,mt2701-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e9399..53daa6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@
return -ETIMEDOUT;
}
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e..0509996 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c4..8258d08 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@
return;
mlx4_stop_catas_poll(dev);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+ mlx4_is_slave(dev)) {
+ /* In mlx4_remove_one on a VF */
+ u32 slave_read =
+ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+ __func__);
+ mlx4_enter_error_state(dev->persist);
+ }
+ }
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8..086920b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 21ce0b7..6180ad4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -283,13 +283,15 @@
struct mlx5e_rx_am_stats {
int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
int epms; /* events per msec */
};
struct mlx5e_rx_am_sample {
- ktime_t time;
- unsigned int pkt_ctr;
- u16 event_ctr;
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
};
struct mlx5e_rx_am { /* Adaptive Moderation */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3744e2f..da1d73f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1183,11 +1183,11 @@
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
- (BIT(1) << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
- (BIT(1) << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 36fbc6b..8cd7227 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1081,7 +1081,7 @@
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d4fa851..9d37229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2671,8 +2671,6 @@
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
- stats->tx_carrier_errors =
- PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
@@ -3846,7 +3844,8 @@
return netdev;
err_cleanup_nic:
- profile->cleanup(priv);
+ if (profile->cleanup)
+ profile->cleanup(priv);
free_netdev(netdev);
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index cbfac06..23ccec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -183,28 +183,27 @@
mlx5e_am_step(am);
}
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
struct mlx5e_rx_am_stats *prev)
{
- int diff;
-
- if (!prev->ppms)
- return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ if (!prev->bpms)
+ return curr->bpms ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_SAME;
- diff = curr->ppms - prev->ppms;
- if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
- return (diff > 0) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
- if (!prev->epms)
- return curr->epms ? MLX5E_AM_STATS_WORSE :
- MLX5E_AM_STATS_SAME;
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
- diff = curr->epms - prev->epms;
- if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
- return (diff < 0) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
return MLX5E_AM_STATS_SAME;
}
@@ -266,10 +265,13 @@
{
s->time = ktime_get();
s->pkt_ctr = rq->stats.packets;
+ s->byte_ctr = rq->stats.bytes;
s->event_ctr = rq->cq.event_ctr;
}
#define MLX5E_AM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
- unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
if (!delta_us)
return;
- curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
- curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
}
void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@
switch (am->state) {
case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+ nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
+ am->start_sample.event_ctr);
if (nevents < MLX5E_AM_NEVENTS)
break;
mlx5e_am_sample(rq, &end_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c7011ef..a8966e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -352,7 +352,7 @@
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -961,7 +961,7 @@
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -1078,7 +1078,7 @@
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d239f5d..b08b9e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -414,6 +414,7 @@
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
goto ns_err;
}
@@ -520,7 +521,7 @@
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
@@ -639,7 +640,7 @@
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
}
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 7e20e4b..4de3c28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1678,7 +1678,7 @@
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
- if (!ns)
+ if (WARN_ON(!ns))
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2115c8a..8beecd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -67,6 +67,7 @@
enum {
MLX5_DROP_NEW_HEALTH_WORK,
+ MLX5_DROP_NEW_RECOVERY_WORK,
};
static u8 get_nic_state(struct mlx5_core_dev *dev)
@@ -193,7 +194,7 @@
mlx5_handle_bad_state(dev);
spin_lock(&health->wq_lock);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
dev_err(&dev->pdev->dev,
@@ -328,6 +329,7 @@
init_timer(&health->timer);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -350,11 +352,22 @@
spin_lock(&health->wq_lock);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock);
cancel_delayed_work_sync(&health->recover_work);
cancel_work_sync(&health->work);
}
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&dev->priv.health.recover_work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d776db7..b3309f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -155,8 +155,9 @@
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@@ -956,6 +957,15 @@
*/
dev->state = MLX5_DEVICE_STATE_UP;
+ /* wait for firmware to accept initialization segments configurations
+ */
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ goto out;
+ }
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
@@ -1159,7 +1169,7 @@
int err = 0;
if (cleanup)
- mlx5_drain_health_wq(dev);
+ mlx5_drain_health_recovery(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6905630..9e31a33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1178,7 +1178,8 @@
static int
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp)
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ bool reallocate)
{
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
@@ -1193,7 +1194,7 @@
continue;
}
- if (nh->update) {
+ if (nh->update || reallocate) {
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
adj_index, nh);
if (err)
@@ -1254,7 +1255,8 @@
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+ false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -1282,7 +1284,7 @@
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
deleted file mode 100644
index 586e03e..0000000
--- a/drivers/net/ethernet/msm/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# msm network device configuration
-#
-
-config ECM_IPA
- tristate "STD ECM LAN Driver support"
- depends on IPA || IPA3
- help
- Enables LAN between applications processor and a tethered
- host using the STD ECM protocol.
- This Network interface is aimed to allow data path go through
- IPA core while using STD ECM protocol.
-
-config RNDIS_IPA
- tristate "RNDIS_IPA Network Interface Driver support"
- depends on IPA || IPA3
- help
- Enables LAN between applications processor and a tethered
- host using the RNDIS protocol.
- This Network interface is aimed to allow data path go through
- IPA core while using RNDIS protocol.
-
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
deleted file mode 100644
index ec2699a..0000000
--- a/drivers/net/ethernet/msm/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for the msm networking support.
-#
-
-obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
-obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 862f18e..11623aa 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -186,6 +229,34 @@
int ring_size;
int i;
+ if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+
/* Free RX skb ringbuffer */
if (priv->rx_skb[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++)
@@ -194,33 +265,15 @@
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
- if (priv->rx_ring[q]) {
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
- priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
- }
-
- if (priv->tx_ring[q]) {
- ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
- priv->tx_desc_dma[q]);
- priv->tx_ring[q] = NULL;
- }
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1571,7 +1586,8 @@
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 4ca4613..b1af7cd 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1505,8 +1505,8 @@
*index = entry->index;
resolved = false;
} else if (removing) {
- ofdpa_neigh_del(trans, found);
*index = found->index;
+ ofdpa_neigh_del(trans, found);
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 00279da..c4ada72 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4399,12 +4399,9 @@
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *uc;
- int addr_count;
unsigned int i;
- addr_count = netdev_uc_count(net_dev);
table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
- table->dev_uc_count = 1 + addr_count;
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
@@ -4415,6 +4412,8 @@
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
i++;
}
+
+ table->dev_uc_count = i;
}
static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
@@ -4422,11 +4421,10 @@
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *mc;
- unsigned int i, addr_count;
+ unsigned int i;
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
- addr_count = netdev_mc_count(net_dev);
i = 0;
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 1a70926..1bfb214 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2801,6 +2801,11 @@
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
const struct efx_nic_type falcon_b0_nic_type = {
@@ -2902,4 +2907,9 @@
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ac3d39c..890e4b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -346,6 +346,7 @@
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
}
#else
struct plat_stmmacenet_data *
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b..aa02a03 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl ioread32be
+#define xemaclite_writel iowrite32be
+#else
+#define xemaclite_readl ioread32
+#define xemaclite_writel iowrite32
+#endif
+
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@@ -156,15 +164,15 @@
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -179,17 +187,17 @@
u32 reg_data;
/* Disable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+ drvdata->base_addr + XEL_RSR_OFFSET);
}
/**
@@ -321,7 +329,7 @@
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@@ -334,7 +342,7 @@
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
- __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
- addr + XEL_TPLR_OFFSET);
+ xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+ addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
- __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@@ -369,7 +377,7 @@
*
* Return: Total number of bytes received
*/
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@@ -379,7 +387,7 @@
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@
return 0; /* No data was available */
/* Verify that buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
- length = ((ntohl(__raw_readl(addr +
+ length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
+ length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ if (WARN_ON(length > maxlen))
+ length = maxlen;
+
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
- __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@@ -463,14 +475,14 @@
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
- __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+ xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
- while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@@ -603,7 +615,7 @@
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data);
+ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -640,32 +652,32 @@
u32 tx_status;
/* Check if there is Rx Data available */
- if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
- (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+ (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET);
tx_complete = true;
}
@@ -698,7 +710,7 @@
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
- while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@@ -734,17 +746,17 @@
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(XEL_MDIOADDR_OP_MASK |
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
- rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+ rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(~XEL_MDIOADDR_OP_MASK &
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
@@ -834,8 +846,8 @@
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
- __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
@@ -1140,8 +1152,8 @@
}
/* Clear the Tx CSR's in case this is a restart */
- __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
- __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 97e0cbc..cebde07 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1372,3 +1372,4 @@
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 1dfe230..e0a6b1a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 4865221..b88f7d6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
- skb_complete_tx_timestamp(skb, NULL);
+ kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f..a10d0e7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#define TI_DP83848C_PHY_ID 0x20005ca0
+#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
+ { TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
{ TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
@@ -106,6 +108,7 @@
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 91177a4..4cad955 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -113,12 +113,16 @@
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d6a541b..c60c147 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1114,8 +1114,6 @@
if (adv < 0)
return adv;
- lpa &= adv;
-
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
@@ -1202,7 +1200,8 @@
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE) {
+ if (phydev->supported & SUPPORTED_FIBRE &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
if (err < 0)
goto error;
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65..46fe1ae 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
+ iproc_mdio_config_clk(priv->base);
+
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ea92d52..2229188 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -622,6 +622,8 @@
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
}
return 0;
@@ -1014,6 +1016,20 @@
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ8795,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8795",
+ .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a9be26f..edd30eb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1384,6 +1384,9 @@
{
int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ /* Mask prohibited EEE modes */
+ val &= ~phydev->eee_broken_modes;
+
phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c4ceb08..32b555a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -860,6 +860,7 @@
struct module *ndev_owner = dev->dev.parent->driver->owner;
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
+ bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@@ -885,12 +886,22 @@
d->driver =
&genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
+ using_genphy = true;
+ }
+
+ if (!try_module_get(d->driver->owner)) {
+ dev_err(&dev->dev, "failed to get the device driver module\n");
+ err = -EIO;
+ goto error_put_device;
+ }
+
+ if (using_genphy) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
if (err)
- goto error;
+ goto error_module_put;
}
if (phydev->attached_dev) {
@@ -926,6 +937,13 @@
return err;
error:
+ /* phy_detach() does all of the cleanup below */
+ phy_detach(phydev);
+ return err;
+
+error_module_put:
+ module_put(d->driver->owner);
+error_put_device:
put_device(d);
if (ndev_owner != bus->owner)
module_put(bus->owner);
@@ -987,6 +1005,8 @@
phydev->attached_dev = NULL;
phy_suspend(phydev);
+ module_put(phydev->mdio.dev.driver->owner);
+
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
* from the generic driver so that there's a chance a
@@ -1126,6 +1146,43 @@
}
/**
+ * genphy_config_eee_advert - disable unwanted eee mode advertisement
+ * @phydev: target phy_device struct
+ *
+ * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy
+ * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
+ * changed, and 1 if it has changed.
+ */
+static int genphy_config_eee_advert(struct phy_device *phydev)
+{
+ int broken = phydev->eee_broken_modes;
+ int old_adv, adv;
+
+ /* Nothing to disable */
+ if (!broken)
+ return 0;
+
+ /* If the following call fails, we assume that EEE is not
+ * supported by the phy. If we read 0, EEE is not advertised
+ * In both case, we don't need to continue
+ */
+ adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
+ if (adv <= 0)
+ return 0;
+
+ old_adv = adv;
+ adv &= ~broken;
+
+ /* Advertising remains unchanged with the broken mask */
+ if (old_adv == adv)
+ return 0;
+
+ phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
+
+ return 1;
+}
+
+/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
* @phydev: target phy_device struct
*
@@ -1183,15 +1240,20 @@
*/
int genphy_config_aneg(struct phy_device *phydev)
{
- int result;
+ int err, changed;
+
+ changed = genphy_config_eee_advert(phydev);
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
- result = genphy_config_advert(phydev);
- if (result < 0) /* error */
- return result;
- if (result == 0) {
+ err = genphy_config_advert(phydev);
+ if (err < 0) /* error */
+ return err;
+
+ changed |= err;
+
+ if (changed == 0) {
/* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated?
*/
@@ -1201,16 +1263,16 @@
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- result = 1; /* do restart aneg */
+ changed = 1; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before.
*/
- if (result > 0)
- result = genphy_restart_aneg(phydev);
+ if (changed > 0)
+ return genphy_restart_aneg(phydev);
- return result;
+ return 0;
}
EXPORT_SYMBOL(genphy_config_aneg);
@@ -1568,6 +1630,33 @@
__set_phy_supported(phydev, max_speed);
}
+static void of_set_phy_eee_broken(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u32 broken = 0;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (of_property_read_bool(node, "eee-broken-100tx"))
+ broken |= MDIO_EEE_100TX;
+ if (of_property_read_bool(node, "eee-broken-1000t"))
+ broken |= MDIO_EEE_1000T;
+ if (of_property_read_bool(node, "eee-broken-10gt"))
+ broken |= MDIO_EEE_10GT;
+ if (of_property_read_bool(node, "eee-broken-1000kx"))
+ broken |= MDIO_EEE_1000KX;
+ if (of_property_read_bool(node, "eee-broken-10gkx4"))
+ broken |= MDIO_EEE_10GKX4;
+ if (of_property_read_bool(node, "eee-broken-10gkr"))
+ broken |= MDIO_EEE_10GKR;
+
+ phydev->eee_broken_modes = broken;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -1605,6 +1694,11 @@
of_set_phy_supported(phydev);
phydev->advertising = phydev->supported;
+ /* Get the EEE modes we want to prohibit. We will ask
+ * the PHY stop advertising these mode later on
+ */
+ of_set_phy_eee_broken(phydev);
+
/* Set the state to READY by default */
phydev->state = PHY_READY;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4b7a363..35aa28b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1276,6 +1276,10 @@
return -EINVAL;
}
+ if (!(tun->flags & IFF_NO_PI))
+ if (pi.flags & htons(CHECKSUM_UNNECESSARY))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 90b426c..afb953a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "7"
+#define NET_VERSION "8"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -3155,10 +3158,13 @@
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3515,12 +3522,12 @@
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
return 0;
}
@@ -3583,10 +3595,15 @@
struct net_device *netdev = tp->netdev;
int ret = 0;
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
u32 rcr = 0;
if (delay_autosuspend(tp)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3603,6 +3620,8 @@
if (!(ocp_data & RXFIFO_EMPTY)) {
rxdy_gated_en(tp, false);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
@@ -3622,8 +3641,6 @@
}
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
out1:
return ret;
}
@@ -3679,12 +3696,15 @@
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
netif_carrier_off(tp->netdev);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a2515887..0b5a84c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@
/* Private data structure */
struct sierra_net_data {
- u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@@ -122,6 +120,7 @@
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@@ -129,7 +128,8 @@
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@@ -137,9 +137,14 @@
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
- u8 unused2[41];
+ u8 network_len; /* network name len */
+ u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+ struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@
u8 reserved[8];
} __packed;
+struct lsi_umts_dual {
+ struct lsi_umts lsi;
+ u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
+ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
+ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
+ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+ u8 unused5[68];
+} __packed;
+
#define SIERRA_NET_LSI_COMMON_LEN 4
-#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@@ -191,10 +217,11 @@
dev->data[0] = (unsigned long)priv;
}
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
- return skb->protocol == cpu_to_be16(ETH_P_IP);
+ return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+ skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@@ -350,46 +377,51 @@
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
+ u32 expected_length;
- if (datalen < sizeof(struct lsi_umts)) {
- netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
- __func__, datalen,
- sizeof(struct lsi_umts));
+ if (datalen < sizeof(struct lsi_umts_single)) {
+ netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+ __func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
- if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
- netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
- __func__, be16_to_cpu(lsi->length),
- (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
- return -1;
- }
-
- /* Validate the protocol - only support UMTS for now */
- if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
- netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
- lsi->protocol);
- return -1;
- }
-
- /* Validate the link type */
- if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
- netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
- lsi->link_type);
- return -1;
- }
-
- /* Validate the coverage */
- if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
- || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
- netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
- return 0;
- }
-
/* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n",
- lsi->session_state);
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+ struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+ /* Validate the link type */
+ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+ single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ single->link_type);
+ return -1;
+ }
+ expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+ } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+ expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+ } else {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ if (be16_to_cpu(lsi->length) != expected_length) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length), expected_length);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+ lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
@@ -662,7 +694,6 @@
u8 numendpoints;
u16 fwattr = 0;
int status;
- struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -700,11 +731,6 @@
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
- /* we will have to manufacture ethernet headers, prepare template */
- eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
- memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN);
- eth->h_proto = cpu_to_be16(ETH_P_IP);
-
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@@ -833,9 +859,14 @@
skb_pull(skb, hh.hdrlen);
- /* We are going to accept this packet, prepare it */
- memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
- ETH_HLEN);
+ /* We are going to accept this packet, prepare it.
+ * In case protocol is IPv6, keep it, otherwise force IPv4.
+ */
+ skb_reset_mac_header(skb);
+ if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+ eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+ eth_zero_addr(eth_hdr(skb)->h_source);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7ca9989..1568aed 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,8 +47,16 @@
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ee02605..578bd50 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -36,12 +36,14 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/netns/generic.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
-static bool add_fib_rules = true;
+
+static unsigned int vrf_net_id;
struct net_vrf {
struct rtable __rcu *rth;
@@ -785,15 +787,10 @@
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct net_device *port_dev;
- struct list_head *iter;
vrf_rtable_release(dev, vrf);
vrf_rt6_release(dev, vrf);
- netdev_for_each_lower_dev(dev, port_dev, iter)
- vrf_del_slave(dev, port_dev);
-
free_percpu(dev->dstats);
dev->dstats = NULL;
}
@@ -1230,6 +1227,12 @@
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
+ struct net_device *port_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, port_dev, iter)
+ vrf_del_slave(dev, port_dev);
+
unregister_netdevice_queue(dev, head);
}
@@ -1237,6 +1240,8 @@
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
+ bool *add_fib_rules;
+ struct net *net;
int err;
if (!data || !data[IFLA_VRF_TABLE])
@@ -1252,13 +1257,15 @@
if (err)
goto out;
- if (add_fib_rules) {
+ net = dev_net(dev);
+ add_fib_rules = net_generic(net, vrf_net_id);
+ if (*add_fib_rules) {
err = vrf_add_fib_rules(dev);
if (err) {
unregister_netdevice(dev);
goto out;
}
- add_fib_rules = false;
+ *add_fib_rules = false;
}
out:
@@ -1341,16 +1348,38 @@
.notifier_call = vrf_device_event,
};
+/* Initialize per network namespace state */
+static int __net_init vrf_netns_init(struct net *net)
+{
+ bool *add_fib_rules = net_generic(net, vrf_net_id);
+
+ *add_fib_rules = true;
+
+ return 0;
+}
+
+static struct pernet_operations vrf_net_ops __net_initdata = {
+ .init = vrf_netns_init,
+ .id = &vrf_net_id,
+ .size = sizeof(bool),
+};
+
static int __init vrf_init_module(void)
{
int rc;
register_netdevice_notifier(&vrf_notifier_block);
- rc = rtnl_link_register(&vrf_link_ops);
+ rc = register_pernet_subsys(&vrf_net_ops);
if (rc < 0)
goto error;
+ rc = rtnl_link_register(&vrf_link_ops);
+ if (rc < 0) {
+ unregister_pernet_subsys(&vrf_net_ops);
+ goto error;
+ }
+
return 0;
error:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 55c4408..983e941 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -227,15 +227,15 @@
static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
{
- struct vxlan_dev *vxlan;
+ struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
if (vs->flags & VXLAN_F_COLLECT_METADATA)
vni = 0;
- hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
- if (vxlan->default_dst.remote_vni == vni)
- return vxlan;
+ hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
+ if (node->vxlan->default_dst.remote_vni == vni)
+ return node->vxlan;
}
return NULL;
@@ -2285,7 +2285,7 @@
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2309,17 +2309,22 @@
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
- hlist_del_init_rcu(&vxlan->hlist);
+ hlist_del_init_rcu(&vxlan->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&vxlan->hlist6.hlist);
+#endif
spin_unlock(&vn->sock_lock);
}
-static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
+ struct vxlan_dev_node *node)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__be32 vni = vxlan->default_dst.remote_vni;
+ node->vxlan = vxlan;
spin_lock(&vn->sock_lock);
- hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
spin_unlock(&vn->sock_lock);
}
@@ -2778,6 +2783,7 @@
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = NULL;
+ struct vxlan_dev_node *node;
if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
@@ -2795,12 +2801,16 @@
if (IS_ERR(vs))
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6)
+ if (ipv6) {
rcu_assign_pointer(vxlan->vn6_sock, vs);
- else
+ node = &vxlan->hlist6;
+ } else
#endif
+ {
rcu_assign_pointer(vxlan->vn4_sock, vs);
- vxlan_vs_add_dev(vs, vxlan);
+ node = &vxlan->hlist4;
+ }
+ vxlan_vs_add_dev(vs, vxlan, node);
return 0;
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 030d849..d092d34 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -121,4 +121,6 @@
Select Y to compile the driver in order to have WLAN functionality
support.
+source "drivers/net/wireless/cnss_utils/Kconfig"
+
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 91594de..005523c 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -27,3 +27,5 @@
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils/
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e31..410bcda 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1647,6 +1647,8 @@
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+ napi_enable(&ar->napi);
+
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
@@ -2531,7 +2533,6 @@
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
- napi_enable(&ar->napi);
return 0;
@@ -3132,7 +3133,7 @@
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
- if (QCA_REV_6174(ar))
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
ret = ath10k_pci_alloc_pipes(ar);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ae304355..fe5102c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1821,8 +1821,6 @@
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
- REG_SET_BIT(ah, 0x9864, 0x7f000);
- REG_SET_BIT(ah, 0x9924, 0x7f00fe);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index d38e50f..e0374eb 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -120,6 +120,8 @@
void ath9k_rng_stop(struct ath_softc *sc)
{
- if (sc->rng_task)
+ if (sc->rng_task) {
kthread_stop(sc->rng_task);
+ sc->rng_task = NULL;
+ }
}
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 16aca9e..1fa7f84 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -189,22 +189,27 @@
if (strtobool(buf, &start))
return -EINVAL;
+ mutex_lock(&sc->mutex);
+
if (start == sc->tx99_state) {
if (!start)
- return count;
+ goto out;
ath_dbg(common, XMIT, "Resetting TX99\n");
ath9k_tx99_deinit(sc);
}
if (!start) {
ath9k_tx99_deinit(sc);
- return count;
+ goto out;
}
r = ath9k_tx99_init(sc);
- if (r)
+ if (r) {
+ mutex_unlock(&sc->mutex);
return r;
-
+ }
+out:
+ mutex_unlock(&sc->mutex);
return count;
}
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9afd6f2..70379c8 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -46,6 +46,12 @@
/* channel 4 not supported yet */
};
+#ifdef CONFIG_PM
+static struct wiphy_wowlan_support wil_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
/* Vendor id to be used in vendor specific command and events
* to user space.
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
@@ -959,6 +965,9 @@
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
+ if (len < sizeof(struct ieee80211_hdr_3addr))
+ return -EINVAL;
+
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
@@ -1883,6 +1892,10 @@
wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS;
wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
}
+
+#ifdef CONFIG_PM
+ wiphy->wowlan = &wil_wowlan_support;
+#endif
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 0ac657d..a0a676b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -799,8 +799,12 @@
struct wireless_dev *wdev = wil_to_wdev(wil);
struct cfg80211_mgmt_tx_params params;
int rc;
- void *frame = kmalloc(len, GFP_KERNEL);
+ void *frame;
+ if (!len)
+ return -EINVAL;
+
+ frame = kmalloc(len, GFP_KERNEL);
if (!frame)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b4c4d09..b91bf51 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -291,6 +291,8 @@
return err;
}
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
return 0;
}
@@ -299,4 +301,5 @@
struct device *dev = wil_to_dev(wil);
sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ae0952f..3028f18 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -592,7 +592,8 @@
cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
assoc_req_ie, assoc_req_ielen,
assoc_resp_ie, assoc_resp_ielen,
- WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ WLAN_STATUS_SUCCESS, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
wil->bss = NULL;
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 72139b5..746f8c9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -705,7 +705,7 @@
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq, uint totlen)
{
- struct sk_buff *glom_skb;
+ struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
u32 addr = sdiodev->sbwad;
int err = 0;
@@ -726,10 +726,8 @@
return -ENOMEM;
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
glom_skb);
- if (err) {
- brcmu_pkt_buf_free_skb(glom_skb);
+ if (err)
goto done;
- }
skb_queue_walk(pktq, skb) {
memcpy(skb->data, glom_skb->data, skb->len);
@@ -740,6 +738,7 @@
pktq);
done:
+ brcmu_pkt_buf_free_skb(glom_skb);
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 78d9966..b85398c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -414,23 +414,24 @@
struct brcmf_cfg80211_vif *vif,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
bool check_combos = false;
int ret = 0;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
if (pos == vif) {
- iftype_num[new_type]++;
+ params.iftype_num[new_type]++;
} else {
/* concurrent interfaces so need check combinations */
check_combos = true;
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
}
if (check_combos)
- ret = cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ ret = cfg80211_check_combinations(cfg->wiphy, ¶ms);
return ret;
}
@@ -438,15 +439,16 @@
static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
enum nl80211_iftype new_type)
{
- int iftype_num[NUM_NL80211_IFTYPES];
struct brcmf_cfg80211_vif *pos;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
- memset(&iftype_num[0], 0, sizeof(iftype_num));
list_for_each_entry(pos, &cfg->vif_list, list)
- iftype_num[pos->wdev.iftype]++;
+ params.iftype_num[pos->wdev.iftype]++;
- iftype_num[new_type]++;
- return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+ params.iftype_num[new_type]++;
+ return cfg80211_check_combinations(cfg->wiphy, ¶ms);
}
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -4928,6 +4930,11 @@
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) {
+ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
+ brcmf_err("invalid action frame length\n");
+ err = -EINVAL;
+ goto exit;
+ }
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) {
brcmf_err("unable to allocate frame\n");
@@ -5913,7 +5920,6 @@
u32 i, j;
u32 total;
u32 chaninfo;
- u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5961,33 +5967,36 @@
ch.bw == BRCMU_CHAN_BW_80)
continue;
- channel = band->channels;
- index = band->n_channels;
+ channel = NULL;
for (j = 0; j < band->n_channels; j++) {
- if (channel[j].hw_value == ch.control_ch_num) {
- index = j;
+ if (band->channels[j].hw_value == ch.control_ch_num) {
+ channel = &band->channels[j];
break;
}
}
- channel[index].center_freq =
- ieee80211_channel_to_frequency(ch.control_ch_num,
- band->band);
- channel[index].hw_value = ch.control_ch_num;
+ if (!channel) {
+ /* It seems firmware supports some channel we never
+ * considered. Something new in IEEE standard?
+ */
+ brcmf_err("Ignoring unexpected firmware channel %d\n",
+ ch.control_ch_num);
+ continue;
+ }
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
*/
if (ch.bw == BRCMU_CHAN_BW_80) {
- channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
+ channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
} else if (ch.bw == BRCMU_CHAN_BW_40) {
- brcmf_update_bw40_channel_flag(&channel[index], &ch);
+ brcmf_update_bw40_channel_flag(channel, &ch);
} else {
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
* for subsequent chanspecs.
*/
- channel[index].flags = IEEE80211_CHAN_NO_HT40 |
- IEEE80211_CHAN_NO_80MHZ;
+ channel->flags = IEEE80211_CHAN_NO_HT40 |
+ IEEE80211_CHAN_NO_80MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
chaninfo = ch.chspec;
@@ -5995,11 +6004,11 @@
&chaninfo);
if (!err) {
if (chaninfo & WL_CHAN_RADAR)
- channel[index].flags |=
+ channel->flags |=
(IEEE80211_CHAN_RADAR |
IEEE80211_CHAN_NO_IR);
if (chaninfo & WL_CHAN_PASSIVE)
- channel[index].flags |=
+ channel->flags |=
IEEE80211_CHAN_NO_IR;
}
}
@@ -6869,7 +6878,7 @@
wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
if (!wiphy) {
brcmf_err("Could not allocate wiphy device\n");
- return NULL;
+ goto ops_out;
}
memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
set_wiphy_dev(wiphy, busdev);
@@ -7003,6 +7012,7 @@
ifp->vif = NULL;
wiphy_out:
brcmf_free_wiphy(wiphy);
+ops_out:
kfree(ops);
return NULL;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e99..d231042 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@
const char *nvram_name;
u16 domain_nr;
u16 bus_nr;
- void (*done)(struct device *dev, const struct firmware *fw,
+ void (*done)(struct device *dev, int err, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -477,52 +477,51 @@
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
- fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+ fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
kfree(fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
release_firmware(fwctx->code);
- device_release_driver(fwctx->dev);
+ fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
kfree(fwctx);
}
static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
- int ret;
+ int ret = 0;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
- if (!fw)
+ if (!fw) {
+ ret = -ENOENT;
goto fail;
-
- /* only requested code so done here */
- if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
- fwctx->done(fwctx->dev, fw, NULL, 0);
- kfree(fwctx);
- return;
}
+ /* only requested code so done here */
+ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+ goto done;
+
fwctx->code = fw;
ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_nvram_done);
- if (!ret)
- return;
-
- brcmf_fw_request_nvram_done(NULL, fwctx);
+ /* pass NULL to nvram callback for bcm47xx fallback */
+ if (ret)
+ brcmf_fw_request_nvram_done(NULL, fwctx);
return;
fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
- device_release_driver(fwctx->dev);
+done:
+ fwctx->done(fwctx->dev, ret, fw, NULL, 0);
kfree(fwctx);
}
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len))
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d..8fa4b7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@
*/
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
+ void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw,
void *nvram_image, u32 nvram_len));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3deba90..d3d7921 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1618,16 +1618,23 @@
.write32 = brcmf_pcie_buscore_write32,
};
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+ const struct firmware *fw,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
- struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+ struct brcmf_bus *bus;
+ struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_pciedev_info *devinfo;
struct brcmf_commonring **flowrings;
- int ret;
u32 i;
+ /* check firmware loading result */
+ if (ret)
+ goto fail;
+
+ bus = dev_get_drvdata(dev);
+ pcie_bus_dev = bus->bus_priv.pcie;
+ devinfo = pcie_bus_dev->devinfo;
brcmf_pcie_attach(devinfo);
/* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 2458e6e..8744b9b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3975,21 +3975,26 @@
.get_memdump = brcmf_sdio_bus_get_memdump,
};
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- struct brcmf_sdio *bus = sdiodev->bus;
- int err = 0;
+ struct brcmf_bus *bus_if;
+ struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_sdio *bus;
u8 saveclk;
- brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+ brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+ bus_if = dev_get_drvdata(dev);
+ sdiodev = bus_if->bus_priv.sdio;
+ if (err)
+ goto fail;
if (!bus_if->drvr)
return;
+ bus = sdiodev->bus;
+
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4076,6 +4081,7 @@
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
device_release_driver(dev);
+ device_release_driver(&sdiodev->func[2]->dev);
}
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2f978a3..053f3b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1158,17 +1158,18 @@
return ret;
}
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
const struct firmware *fw,
void *nvram, u32 nvlen)
{
struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_usbdev_info *devinfo;
- int ret;
+ struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+ if (ret)
+ goto error;
brcmf_dbg(USB, "Start fw downloading\n");
- devinfo = bus->bus_priv.usb->devinfo;
ret = check_file(fw->data);
if (ret < 0) {
brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
index 3c89a73..c09e61f 100644
--- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
+++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c
@@ -11,9 +11,14 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/stacktrace.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#ifdef CONFIG_WCNSS_SKB_PRE_ALLOC
+#include <linux/skbuff.h>
+#endif
static DEFINE_SPINLOCK(alloc_lock);
@@ -21,6 +26,11 @@
#define WCNSS_MAX_STACK_TRACE 64
#endif
+#define PRE_ALLOC_DEBUGFS_DIR "cnss-prealloc"
+#define PRE_ALLOC_DEBUGFS_FILE_OBJ "status"
+
+static struct dentry *debug_base;
+
struct wcnss_prealloc {
int occupied;
unsigned int size;
@@ -228,14 +238,89 @@
}
EXPORT_SYMBOL(wcnss_pre_alloc_reset);
+static int prealloc_memory_stats_show(struct seq_file *fp, void *data)
+{
+ int i = 0;
+ int used_slots = 0, free_slots = 0;
+ unsigned int tsize = 0, tused = 0, size = 0;
+
+ seq_puts(fp, "\nSlot_Size(Kb)\t\t[Used : Free]\n");
+ for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
+ tsize += wcnss_allocs[i].size;
+ if (size != wcnss_allocs[i].size) {
+ if (size) {
+ seq_printf(
+ fp, "[%d : %d]\n",
+ used_slots, free_slots);
+ }
+
+ size = wcnss_allocs[i].size;
+ used_slots = 0;
+ free_slots = 0;
+ seq_printf(fp, "%d Kb\t\t\t", size / 1024);
+ }
+
+ if (wcnss_allocs[i].occupied) {
+ tused += wcnss_allocs[i].size;
+ ++used_slots;
+ } else {
+ ++free_slots;
+ }
+ }
+ seq_printf(fp, "[%d : %d]\n", used_slots, free_slots);
+
+ /* Convert byte to Kb */
+ if (tsize)
+ tsize = tsize / 1024;
+ if (tused)
+ tused = tused / 1024;
+ seq_printf(fp, "\nMemory Status:\nTotal Memory: %dKb\n", tsize);
+ seq_printf(fp, "Used: %dKb\nFree: %dKb\n", tused, tsize - tused);
+
+ return 0;
+}
+
+static int prealloc_memory_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, prealloc_memory_stats_show, NULL);
+}
+
+static const struct file_operations prealloc_memory_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = prealloc_memory_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int __init wcnss_pre_alloc_init(void)
{
- return wcnss_prealloc_init();
+ int ret;
+
+ ret = wcnss_prealloc_init();
+ if (ret) {
+ pr_err("%s: Failed to init the prealloc pool\n", __func__);
+ return ret;
+ }
+
+ debug_base = debugfs_create_dir(PRE_ALLOC_DEBUGFS_DIR, NULL);
+ if (IS_ERR_OR_NULL(debug_base)) {
+ pr_err("%s: Failed to create debugfs dir\n", __func__);
+ } else if (IS_ERR_OR_NULL(debugfs_create_file(
+ PRE_ALLOC_DEBUGFS_FILE_OBJ,
+ 0644, debug_base, NULL,
+ &prealloc_memory_stats_fops))) {
+ pr_err("%s: Failed to create debugfs file\n", __func__);
+ debugfs_remove_recursive(debug_base);
+ }
+
+ return ret;
}
static void __exit wcnss_pre_alloc_exit(void)
{
wcnss_prealloc_deinit();
+ debugfs_remove_recursive(debug_base);
}
module_init(wcnss_pre_alloc_init);
diff --git a/drivers/net/wireless/cnss_utils/Kconfig b/drivers/net/wireless/cnss_utils/Kconfig
new file mode 100644
index 0000000..5f43e48
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Kconfig
@@ -0,0 +1,6 @@
+config CNSS_UTILS
+ bool "CNSS utilities support"
+ ---help---
+ Add CNSS utilities support for the WLAN driver module.
+ This feature enable wlan driver to use CNSS utilities APIs to set
+ and get wlan related information.
\ No newline at end of file
diff --git a/drivers/net/wireless/cnss_utils/Makefile b/drivers/net/wireless/cnss_utils/Makefile
new file mode 100644
index 0000000..0d1ed7a
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
new file mode 100644
index 0000000..d73846e
--- /dev/null
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cnss_utils: " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <net/cnss_utils.h>
+
+#define CNSS_MAX_CH_NUM 45
+struct cnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+};
+
+struct cnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+};
+
+#define MAX_NO_OF_MAC_ADDR 4
+struct cnss_wlan_mac_addr {
+ u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+ u32 no_of_mac_addr_set;
+};
+
+static struct cnss_utils_priv {
+ struct cnss_unsafe_channel_list unsafe_channel_list;
+ struct cnss_dfs_nol_info dfs_nol_info;
+ /* generic mutex for unsafe channel */
+ struct mutex unsafe_channel_list_lock;
+ /* generic spin-lock for dfs_nol info */
+ spinlock_t dfs_nol_info_lock;
+ int driver_load_cnt;
+ bool is_wlan_mac_set;
+ struct cnss_wlan_mac_addr wlan_mac_addr;
+ enum cnss_utils_cc_src cc_source;
+} *cnss_utils_priv;
+
+int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list, u16 ch_count)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ priv->unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count == 0)
+ goto end;
+
+ memcpy(priv->unsafe_channel_list.unsafe_ch_list,
+ unsafe_ch_list, ch_count * sizeof(u16));
+
+end:
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel);
+
+int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ mutex_lock(&priv->unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len <
+ (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = priv->unsafe_channel_list.unsafe_ch_count;
+ memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list,
+ priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&priv->unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel);
+
+int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len)
+{
+ void *temp;
+ void *old_nol_info;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ temp = kmalloc(info_len, GFP_ATOMIC);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(temp, info, info_len);
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+ dfs_info = &priv->dfs_nol_info;
+ old_nol_info = dfs_info->dfs_nol_info;
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ kfree(old_nol_info);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol);
+
+int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len)
+{
+ int len;
+ struct cnss_dfs_nol_info *dfs_info;
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!info || !info_len)
+ return -EINVAL;
+
+ spin_lock_bh(&priv->dfs_nol_info_lock);
+
+ dfs_info = &priv->dfs_nol_info;
+ if (!dfs_info->dfs_nol_info ||
+ dfs_info->dfs_nol_info_len == 0) {
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ spin_unlock_bh(&priv->dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol);
+
+void cnss_utils_increment_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ ++(priv->driver_load_cnt);
+}
+EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt);
+
+int cnss_utils_get_driver_load_cnt(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->driver_load_cnt;
+}
+EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
+
+int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ u32 no_of_mac_addr;
+ struct cnss_wlan_mac_addr *addr = NULL;
+ int iter;
+ u8 *temp = NULL;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (priv->is_wlan_mac_set) {
+ pr_debug("WLAN MAC address is already set\n");
+ return 0;
+ }
+
+ if (len == 0 || (len % ETH_ALEN) != 0) {
+ pr_err("Invalid length %d\n", len);
+ return -EINVAL;
+ }
+
+ no_of_mac_addr = len / ETH_ALEN;
+ if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
+ pr_err("Exceed maximum supported MAC address %u %u\n",
+ MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
+ return -EINVAL;
+ }
+
+ priv->is_wlan_mac_set = true;
+ addr = &priv->wlan_mac_addr;
+ addr->no_of_mac_addr_set = no_of_mac_addr;
+ temp = &addr->mac_addr[0][0];
+
+ for (iter = 0; iter < no_of_mac_addr;
+ ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
+ ether_addr_copy(temp, in);
+ pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ temp[0], temp[1], temp[2],
+ temp[3], temp[4], temp[5]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+ struct cnss_wlan_mac_addr *addr = NULL;
+
+ if (!priv)
+ goto out;
+
+ if (!priv->is_wlan_mac_set) {
+ pr_debug("WLAN MAC address is not set\n");
+ goto out;
+ }
+
+ addr = &priv->wlan_mac_addr;
+ *num = addr->no_of_mac_addr_set;
+ return &addr->mac_addr[0][0];
+out:
+ *num = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+
+void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return;
+
+ priv->cc_source = cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_set_cc_source);
+
+enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev)
+{
+ struct cnss_utils_priv *priv = cnss_utils_priv;
+
+ if (!priv)
+ return -EINVAL;
+
+ return priv->cc_source;
+}
+EXPORT_SYMBOL(cnss_utils_get_cc_source);
+
+static int __init cnss_utils_init(void)
+{
+ struct cnss_utils_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->cc_source = CNSS_UTILS_SOURCE_CORE;
+
+ mutex_init(&priv->unsafe_channel_list_lock);
+ spin_lock_init(&priv->dfs_nol_info_lock);
+
+ cnss_utils_priv = priv;
+
+ return 0;
+}
+
+static void __exit cnss_utils_exit(void)
+{
+ kfree(cnss_utils_priv);
+ cnss_utils_priv = NULL;
+}
+
+module_init(cnss_utils_init);
+module_exit(cnss_utils_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DEVICE "CNSS Utilities Driver");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051b..bec7d9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@
return;
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
- thermal_zone_device_unregister(mvm->tz_device.tzone);
- mvm->tz_device.tzone = NULL;
+ if (mvm->tz_device.tzone) {
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+ }
}
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@
return;
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
- thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
- mvm->cooling_dev.cdev = NULL;
+ if (mvm->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+ }
}
#endif /* CONFIG_THERMAL */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d3bad57..0fd7d7e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2662,7 +2662,7 @@
tasklet_hrtimer_init(&data->beacon_timer,
mac80211_hwsim_beacon,
- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index b7273be..c8d9075 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1422,21 +1422,6 @@
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
-#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
- if (rt2x00_is_usb(rt2x00dev)) {
- usb_kill_anchored_urbs(rt2x00dev->anchor);
- hrtimer_cancel(&rt2x00dev->txstatus_timer);
- cancel_work_sync(&rt2x00dev->rxdone_work);
- cancel_work_sync(&rt2x00dev->txdone_work);
- }
-#endif
- if (rt2x00dev->workqueue)
- destroy_workqueue(rt2x00dev->workqueue);
-
- /*
- * Free the tx status fifo.
- */
- kfifo_free(&rt2x00dev->txstatus_fifo);
/*
* Kill the tx status tasklet.
@@ -1452,6 +1437,14 @@
*/
rt2x00lib_uninitialize(rt2x00dev);
+ if (rt2x00dev->workqueue)
+ destroy_workqueue(rt2x00dev->workqueue);
+
+ /*
+ * Free the tx status fifo.
+ */
+ kfifo_free(&rt2x00dev->txstatus_fifo);
+
/*
* Free extra components
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 662705e..631df69 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -740,6 +740,11 @@
{
struct data_queue *queue;
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
+ cancel_work_sync(&rt2x00dev->rxdone_work);
+ cancel_work_sync(&rt2x00dev->txdone_work);
+
queue_for_each(rt2x00dev, queue)
rt2x00usb_free_entries(queue);
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f949ad2b..fa3547e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -70,10 +70,10 @@
#define WSPI_MAX_CHUNK_SIZE 4092
/*
- * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to
- * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx
+ * wl18xx driver aggregation buffer size is (13 * 4K) compared to
+ * (4 * 4K) for wl12xx, so use the larger buffer needed for wl18xx
*/
-#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (13 * SZ_4K)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 74dc2bf..b009d79 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,18 +221,18 @@
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_packets = 0;
unsigned int index;
+ spin_lock(&vif->lock);
if (vif->queues == NULL)
goto out;
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < num_queues; ++index) {
+ for (index = 0; index < vif->num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@
}
out:
+ spin_unlock(&vif->lock);
+
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 8674e18..b44f37f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
+ unsigned int queue_index;
+
xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+ spin_lock(&be->vif->lock);
+ vfree(be->vif->queues);
+ be->vif->num_queues = 0;
+ be->vif->queues = NULL;
+ spin_unlock(&be->vif->lock);
+
xenvif_disconnect_ctrl(be->vif);
}
}
@@ -1040,6 +1051,8 @@
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0cdcb21..cd442e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -281,6 +281,7 @@
{
RING_IDX req_prod = queue->rx.req_prod_pvt;
int notify;
+ int err = 0;
if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
@@ -295,8 +296,10 @@
struct xen_netif_rx_request *req;
skb = xennet_alloc_one_rx_buffer(queue);
- if (!skb)
+ if (!skb) {
+ err = -ENOMEM;
break;
+ }
id = xennet_rxidx(req_prod);
@@ -320,8 +323,13 @@
queue->rx.req_prod_pvt = req_prod;
- /* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ /* Try again later if there are not enough requests or skb allocation
+ * failed.
+ * Enough requests is quantified as the sum of newly created slots and
+ * the unconsumed slots at the backend.
+ */
+ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+ unlikely(err)) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index f8dcdf4..af62c4c 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -459,7 +459,7 @@
INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work);
snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq",
- dev_name(priv->dev));
+ dev_name(&priv->ndev->nfc_dev->dev));
priv->fw_dnld.rx_wq = create_singlethread_workqueue(name);
if (!priv->fw_dnld.rx_wq)
return -ENOMEM;
@@ -496,6 +496,7 @@
{
struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld;
+ int res;
if (!priv->support_fw_dnld)
return -ENOTSUPP;
@@ -511,7 +512,9 @@
*/
/* Retrieve FW binary */
- if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) {
+ res = request_firmware(&fw_dnld->fw, firmware_name,
+ &ndev->nfc_dev->dev);
+ if (res < 0) {
nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name);
return -ENOENT;
}
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 51c8240..a446590 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -124,12 +124,13 @@
memcpy(&priv->config, pdata, sizeof(*pdata));
if (priv->config.reset_n_io) {
- rc = devm_gpio_request_one(dev,
- priv->config.reset_n_io,
- GPIOF_OUT_INIT_LOW,
- "nfcmrvl_reset_n");
- if (rc < 0)
+ rc = gpio_request_one(priv->config.reset_n_io,
+ GPIOF_OUT_INIT_LOW,
+ "nfcmrvl_reset_n");
+ if (rc < 0) {
+ priv->config.reset_n_io = 0;
nfc_err(dev, "failed to request reset_n io\n");
+ }
}
if (phy == NFCMRVL_PHY_SPI) {
@@ -154,32 +155,36 @@
if (!priv->ndev) {
nfc_err(dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
- goto error;
+ goto error_free_gpio;
}
- nci_set_drvdata(priv->ndev, priv);
-
- rc = nci_register_device(priv->ndev);
- if (rc) {
- nfc_err(dev, "nci_register_device failed %d\n", rc);
- goto error_free_dev;
- }
-
- /* Ensure that controller is powered off */
- nfcmrvl_chip_halt(priv);
-
rc = nfcmrvl_fw_dnld_init(priv);
if (rc) {
nfc_err(dev, "failed to initialize FW download %d\n", rc);
goto error_free_dev;
}
+ nci_set_drvdata(priv->ndev, priv);
+
+ rc = nci_register_device(priv->ndev);
+ if (rc) {
+ nfc_err(dev, "nci_register_device failed %d\n", rc);
+ goto error_fw_dnld_deinit;
+ }
+
+ /* Ensure that controller is powered off */
+ nfcmrvl_chip_halt(priv);
+
nfc_info(dev, "registered with nci successfully\n");
return priv;
+error_fw_dnld_deinit:
+ nfcmrvl_fw_dnld_deinit(priv);
error_free_dev:
nci_free_device(priv->ndev);
-error:
+error_free_gpio:
+ if (priv->config.reset_n_io)
+ gpio_free(priv->config.reset_n_io);
kfree(priv);
return ERR_PTR(rc);
}
@@ -195,7 +200,7 @@
nfcmrvl_fw_dnld_deinit(priv);
if (priv->config.reset_n_io)
- devm_gpio_free(priv->dev, priv->config.reset_n_io);
+ gpio_free(priv->config.reset_n_io);
nci_unregister_device(ndev);
nci_free_device(ndev);
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 83a99e3..6c0c301 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -109,6 +109,7 @@
struct nfcmrvl_private *priv;
struct nfcmrvl_platform_data *pdata = NULL;
struct nfcmrvl_platform_data config;
+ struct device *dev = nu->tty->dev;
/*
* Platform data cannot be used here since usually it is already used
@@ -116,9 +117,8 @@
* and check if DT entries were added.
*/
- if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
- if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
- &config) == 0)
+ if (dev && dev->parent && dev->parent->of_node)
+ if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0)
pdata = &config;
if (!pdata) {
@@ -131,7 +131,7 @@
}
priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops,
- nu->tty->dev, pdata);
+ dev, pdata);
if (IS_ERR(priv))
return PTR_ERR(priv);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 368795a..94733f7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1203,10 +1203,13 @@
struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
+ int rc;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
- page_endio(page, is_write, 0);
- return 0;
+ rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ if (rc == 0)
+ page_endio(page, is_write, 0);
+
+ return rc;
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7ceba08..18a0bea 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -450,14 +450,15 @@
static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
{
const unsigned int sector_size = 512;
- sector_t start_sector;
+ sector_t start_sector, end_sector;
u64 num_sectors;
u32 rem;
start_sector = div_u64(ns_offset, sector_size);
- num_sectors = div_u64_rem(len, sector_size, &rem);
+ end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
if (rem)
- num_sectors++;
+ end_sector++;
+ num_sectors = end_sector - start_sector;
if (unlikely(num_sectors > (u64)INT_MAX)) {
u64 remaining = num_sectors;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b480859..3456f53 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -87,7 +87,9 @@
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
- return rc;
+ if (rc)
+ return -EIO;
+ return 0;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3222f3e..286fda4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -88,7 +88,7 @@
struct nvme_rdma_queue {
struct nvme_rdma_qe *rsp_ring;
- u8 sig_count;
+ atomic_t sig_count;
int queue_size;
size_t cmnd_capsule_len;
struct nvme_rdma_ctrl *ctrl;
@@ -555,6 +555,7 @@
queue->cmnd_capsule_len = sizeof(struct nvme_command);
queue->queue_size = queue_size;
+ atomic_set(&queue->sig_count, 0);
queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
RDMA_PS_TCP, IB_QPT_RC);
@@ -1011,17 +1012,16 @@
nvme_rdma_wr_error(cq, wc, "SEND");
}
-static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+/*
+ * We want to signal completion at least every queue depth/2. This returns the
+ * largest power of two that is not above half of (queue size + 1) to optimize
+ * (avoid divisions).
+ */
+static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
{
- int sig_limit;
+ int limit = 1 << ilog2((queue->queue_size + 1) / 2);
- /*
- * We signal completion every queue depth/2 and also handle the
- * degenerated case of a device with queue_depth=1, where we
- * would need to signal every message.
- */
- sig_limit = max(queue->queue_size / 2, 1);
- return (++queue->sig_count % sig_limit) == 0;
+ return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d..ca8ddc3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@
{
struct ib_recv_wr *bad_wr;
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@
first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@
cmd->n_rdma = 0;
cmd->req.port = queue->port;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d..1b4d93e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -488,21 +488,24 @@
rval = device_add(&nvmem->dev);
if (rval)
- goto out;
+ goto err_put_device;
if (config->compat) {
rval = nvmem_setup_compat(nvmem, config);
if (rval)
- goto out;
+ goto err_device_del;
}
if (config->cells)
nvmem_add_cells(nvmem, config);
return nvmem;
-out:
- ida_simple_remove(&nvmem_ida, nvmem->id);
- kfree(nvmem);
+
+err_device_del:
+ device_del(&nvmem->dev);
+err_put_device:
+ put_device(&nvmem->dev);
+
return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(nvmem_register);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index fd5cfad..f7a9701 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -225,6 +225,7 @@
return tsize;
}
+EXPORT_SYMBOL_GPL(of_device_get_modalias);
/**
* of_device_uevent - Display OF related uevent information
@@ -287,3 +288,4 @@
return 0;
}
+EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 66af185..c0914fb 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -741,9 +741,12 @@
const char *pathp;
int offset, rc = 0, depth = -1;
- for (offset = fdt_next_node(blob, -1, &depth);
- offset >= 0 && depth >= 0 && !rc;
- offset = fdt_next_node(blob, offset, &depth)) {
+ if (!blob)
+ return 0;
+
+ for (offset = fdt_next_node(blob, -1, &depth);
+ offset >= 0 && depth >= 0 && !rc;
+ offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
if (*pathp == '/')
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 3ed6238..c4953ec 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@@ -814,6 +816,10 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@@ -918,6 +924,8 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@@ -990,6 +998,10 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 1133b5c..5c63b92 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@
};
/* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct dino_device *)__pdata; })
/*
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 2ec2aef..bc286cb 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@
/* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 151b86b..56918d1 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@
return 0;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/*
* check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@
int pide;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -813,6 +817,10 @@
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@@ -952,6 +960,8 @@
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@@ -1037,6 +1047,10 @@
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 6e3a60c..50f3bb0 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
- printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+ pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@@ -315,7 +315,7 @@
// printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 361d7dd0..0491a86 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -4926,9 +4926,8 @@
{
struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
struct iommu_domain_geometry geometry;
- int ret, fastmap_en = 0, bypass_en = 0;
- dma_addr_t iova;
- phys_addr_t gicm_db_offset;
+ int fastmap_en = 0, bypass_en = 0;
+ dma_addr_t iova, addr;
msg->address_hi = 0;
msg->address_lo = dev->msi_gicm_addr;
@@ -4970,18 +4969,15 @@
iova = rounddown(pcie_base_addr, PAGE_SIZE);
}
- ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
- PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret < 0) {
- PCIE_ERR(dev,
- "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
- dev->rc_idx, ret);
- return -ENOMEM;
+ addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(&pdev->dev, addr)) {
+ PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address",
+ dev->rc_idx);
+ return -EIO;
}
- gicm_db_offset = dev->msi_gicm_addr -
- rounddown(dev->msi_gicm_addr, PAGE_SIZE);
- msg->address_lo = iova + gicm_db_offset;
+ msg->address_lo = iova + addr;
return 0;
}
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 3452983..03ebfd5 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -131,6 +131,7 @@
PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
PCIE_CORE_INT_MMVC)
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
@@ -267,7 +268,9 @@
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 *val)
{
- void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
@@ -291,11 +294,13 @@
int where, int size, u32 val)
{
u32 mask, tmp, offset;
+ void __iomem *addr;
offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
if (size == 4) {
- writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ writel(val, addr);
return PCIBIOS_SUCCESSFUL;
}
@@ -306,9 +311,9 @@
* corrupt RW1C bits in adjacent registers. But the hardware
* doesn't support smaller writes.
*/
- tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+ tmp = readl(addr) & mask;
tmp |= val << ((where & 0x3) * 8);
- writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ writel(tmp, addr);
return PCIBIOS_SUCCESSFUL;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1c..8a68e2b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -954,6 +954,7 @@
return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 579c494..e7d4048 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2142,7 +2142,8 @@
if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev))
+ || platform_pci_need_resume(pci_dev)
+ || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
return false;
/*
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index f731aac..4d00878 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -150,11 +150,15 @@
#define UFS_PHY_MULTI_LANE_CTRL1 PHY_OFF(0x1C4)
/* UFS PHY TX registers */
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX TX_OFF(0, 0x44)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_RX TX_OFF(0, 0x48)
#define QSERDES_TX0_TRANSCEIVER_BIAS_EN TX_OFF(0, 0x5C)
#define QSERDES_TX0_LANE_MODE_1 TX_OFF(0, 0x8C)
#define QSERDES_TX0_LANE_MODE_2 TX_OFF(0, 0x90)
#define QSERDES_TX0_LANE_MODE_3 TX_OFF(0, 0x94)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_TX TX_OFF(1, 0x44)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_RX TX_OFF(1, 0x48)
#define QSERDES_TX1_LANE_MODE_1 TX_OFF(1, 0x8C)
@@ -177,6 +181,7 @@
#define QSERDES_RX0_SIGDET_LVL RX_OFF(0, 0x108)
#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x10C)
#define QSERDES_RX0_RX_INTERFACE_MODE RX_OFF(0, 0x11C)
+#define QSERDES_RX0_RX_MODE_00 RX_OFF(0, 0x164)
#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF RX_OFF(1, 0x24)
#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(1, 0x28)
@@ -193,6 +198,7 @@
#define QSERDES_RX1_SIGDET_LVL RX_OFF(1, 0x108)
#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL RX_OFF(1, 0x10C)
#define QSERDES_RX1_RX_INTERFACE_MODE RX_OFF(1, 0x11C)
+#define QSERDES_RX1_RX_MODE_00 RX_OFF(1, 0x164)
#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
@@ -223,7 +229,7 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x07),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
@@ -255,13 +261,16 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0x81),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_RX, 0x07),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00, 0x59),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6E),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -281,7 +290,7 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
@@ -289,6 +298,9 @@
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0x81),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_TX, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_RX, 0x07),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00, 0x59),
};
static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 0917204..c617ec4 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 41b5b07..6852010 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -194,6 +194,16 @@
return 0;
}
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~(mask << shift);
+ tmp |= value << shift;
+ writel(tmp, reg);
+}
+
static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
@@ -211,8 +221,7 @@
reg += bank * 0x20 + pin / 16 * 0x10;
shift = pin % 16 * 2;
- writel(0x3 << shift, reg + CLR);
- writel(g->muxsel[i] << shift, reg + SET);
+ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
}
return 0;
@@ -279,8 +288,7 @@
/* mA */
if (config & MA_PRESENT) {
shift = pin % 8 * 4;
- writel(0x3 << shift, reg + CLR);
- writel(ma << shift, reg + SET);
+ mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
}
/* vol */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 583ae3f..5419de8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1250,10 +1250,12 @@
debounce = readl(db_reg);
debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+ if (arg)
+ conf |= BYT_DEBOUNCE_EN;
+ else
+ conf &= ~BYT_DEBOUNCE_EN;
+
switch (arg) {
- case 0:
- conf &= BYT_DEBOUNCE_EN;
- break;
case 375:
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
@@ -1276,7 +1278,9 @@
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
- ret = -EINVAL;
+ if (arg)
+ ret = -EINVAL;
+ break;
}
if (!ret)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c43b1e9..0d34d8a4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -1524,10 +1525,32 @@
chained_irq_exit(chip, desc);
}
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+ {
+ /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+ .ident = "Acer Chromebook (CYAN)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+ },
+ },
+ {}
+};
+
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
const struct chv_gpio_pinrange *range;
struct gpio_chip *chip = &pctrl->chip;
+ bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, offset;
*chip = chv_gpio_chip;
@@ -1536,7 +1559,7 @@
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = true;
+ chip->irq_need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@@ -1567,7 +1590,7 @@
intsel &= CHV_PADCTRL0_INTSEL_MASK;
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
- if (intsel >= pctrl->community->nirqs)
+ if (need_valid_mask && intsel >= pctrl->community->nirqs)
clear_bit(i, chip->irq_valid_mask);
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 0144376..b40a074 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@
return 0;
}
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+ u32 value;
+
+ value = readl(padcfg0);
+ if (input) {
+ value &= ~PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
+ } else {
+ value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ }
+ writel(value, padcfg0);
+}
+
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -375,11 +390,11 @@
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- /* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
+ /* Disable TX buffer and enable RX (this will be input) */
+ __intel_gpio_set_direction(padcfg0, true);
+
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -392,18 +407,11 @@
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
- u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
- value = readl(padcfg0);
- if (input)
- value |= PADCFG0_GPIOTXDIS;
- else
- value &= ~PADCFG0_GPIOTXDIS;
- writel(value, padcfg0);
+ __intel_gpio_set_direction(padcfg0, input);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 76f077f..f87ef5a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -739,8 +739,8 @@
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
- "nand_dqs1"
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
+ "nand_dqs_1"
};
static const char * const nor_groups[] = {
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 40ee647..02b28bd 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -150,7 +150,7 @@
config PINCTRL_WCD
tristate "Qualcomm Technologies, Inc WCD pin controller driver"
- depends on WCD934X_CODEC
+ depends on WCD9XXX_CODEC_CORE
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
WCD gpio controller block.
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae42..743d1f4 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@
PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
};
static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c
index fedd5f0..39c35b7 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpi.c
@@ -26,7 +26,7 @@
#include "../core.h"
#include "../pinctrl-utils.h"
-#define LPI_ADDRESS_SIZE 0xC000
+#define LPI_ADDRESS_SIZE 0x20000
#define LPI_GPIO_REG_VAL_CTL 0x00
#define LPI_GPIO_REG_DIR_CTL 0x04
@@ -109,35 +109,35 @@
0x00000000,
0x00001000,
0x00002000,
- 0x00002010,
0x00003000,
- 0x00003010,
0x00004000,
- 0x00004010,
0x00005000,
- 0x00005010,
- 0x00005020,
- 0x00005030,
0x00006000,
- 0x00006010,
0x00007000,
- 0x00007010,
- 0x00005040,
- 0x00005050,
0x00008000,
- 0x00008010,
- 0x00008020,
- 0x00008030,
- 0x00008040,
- 0x00008050,
- 0x00008060,
- 0x00008070,
0x00009000,
- 0x00009010,
0x0000A000,
- 0x0000A010,
0x0000B000,
- 0x0000B010,
+ 0x0000C000,
+ 0x0000D000,
+ 0x0000E000,
+ 0x0000F000,
+ 0x00010000,
+ 0x00011000,
+ 0x00012000,
+ 0x00013000,
+ 0x00014000,
+ 0x00015000,
+ 0x00016000,
+ 0x00017000,
+ 0x00018000,
+ 0x00019000,
+ 0x0001A000,
+ 0x0001B000,
+ 0x0001C000,
+ 0x0001D000,
+ 0x0001E000,
+ 0x0001F000,
};
static const char *const lpi_gpio_functions[] = {
@@ -448,6 +448,7 @@
unsigned int offset,
unsigned int gpio)
{
+ struct lpi_gpio_state *state = gpiochip_get_data(chip);
struct pinctrl_pin_desc pindesc;
struct lpi_gpio_pad *pad;
unsigned int func;
@@ -463,7 +464,7 @@
"pull up"
};
- pctldev = pctldev ? : to_gpio_state(chip)->ctrl;
+ pctldev = pctldev ? : state->ctrl;
pindesc = pctldev->desc->pins[offset];
pad = pctldev->desc->pins[offset].drv_data;
ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_DIR_CTL);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 9520166..c6a5d29 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -40,6 +40,7 @@
#define MAX_NR_GPIO 300
#define PS_HOLD_OFFSET 0x820
+#define STATUS_OFFSET 0x10
/**
* struct msm_pinctrl - state for a pinctrl-msm device
@@ -74,6 +75,35 @@
static struct msm_pinctrl *msm_pinctrl_data;
+static u32 msm_pinctrl_find_base(const struct msm_pinctrl_soc_data *soc_data,
+ u32 gpio_id)
+{
+ int i;
+ u32 val;
+
+ if (gpio_id >= soc_data->ngpios || !soc_data->pin_base)
+ return 0;
+
+ if (soc_data->pin_base[gpio_id])
+ return soc_data->pin_base[gpio_id];
+
+ for (i = 0; i < soc_data->n_tile_offsets; i++) {
+ val = readl_relaxed(msm_pinctrl_data->regs +
+ soc_data->tile_offsets[i] + STATUS_OFFSET
+ + soc_data->reg_size * gpio_id);
+ if (val) {
+ soc_data->pin_base[gpio_id] = soc_data->tile_offsets[i];
+ return soc_data->tile_offsets[i];
+ }
+ }
+
+ /* In the case that the soc_data does not support dynamic base
+ * detection, we return 0 here.
+ */
+ WARN_ONCE(1, "%s:Dynamic base detection is not supported\n", __func__);
+ return 0;
+}
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -143,10 +173,11 @@
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val, mask;
+ u32 val, mask, base;
int i;
g = &pctrl->soc->groups[group];
+ base = msm_pinctrl_find_base(pctrl->soc, group);
mask = GENMASK(g->mux_bit + order_base_2(g->nfuncs) - 1, g->mux_bit);
for (i = 0; i < g->nfuncs; i++) {
@@ -159,10 +190,10 @@
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = readl(pctrl->regs + base + g->ctl_reg);
val &= ~mask;
val |= i << g->mux_bit;
- writel(val, pctrl->regs + g->ctl_reg);
+ writel(val, pctrl->regs + base + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -227,15 +258,16 @@
unsigned arg;
unsigned bit;
int ret;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[group];
+ base = msm_pinctrl_find_base(pctrl->soc, group);
ret = msm_config_reg(pctrl, g, param, &mask, &bit);
if (ret < 0)
return ret;
- val = readl(pctrl->regs + g->ctl_reg);
+ val = readl(pctrl->regs + base + g->ctl_reg);
arg = (val >> bit) & mask;
/* Convert register value to pinconf value */
@@ -260,7 +292,7 @@
if (!arg)
return -EINVAL;
- val = readl(pctrl->regs + g->io_reg);
+ val = readl(pctrl->regs + base + g->io_reg);
arg = !!(val & BIT(g->in_bit));
break;
case PIN_CONFIG_INPUT_ENABLE:
@@ -291,11 +323,12 @@
unsigned arg;
unsigned bit;
int ret;
- u32 val;
+ u32 val, base;
int i;
g = &pctrl->soc->groups[group];
+ base = msm_pinctrl_find_base(pctrl->soc, group);
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -328,12 +361,12 @@
case PIN_CONFIG_OUTPUT:
/* set output value */
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = readl(pctrl->regs + base + g->io_reg);
if (arg)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ writel(val, pctrl->regs + base + g->io_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
/* enable output */
@@ -356,10 +389,10 @@
}
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = readl(pctrl->regs + base + g->ctl_reg);
val &= ~(mask << bit);
val |= arg << bit;
- writel(val, pctrl->regs + g->ctl_reg);
+ writel(val, pctrl->regs + base + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -384,15 +417,16 @@
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[offset];
+ base = msm_pinctrl_find_base(pctrl->soc, offset);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = readl(pctrl->regs + base + g->ctl_reg);
val &= ~BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
+ writel(val, pctrl->regs + base + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -404,22 +438,23 @@
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[offset];
+ base = msm_pinctrl_find_base(pctrl->soc, offset);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = readl(pctrl->regs + base + g->io_reg);
if (value)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ writel(val, pctrl->regs + base + g->io_reg);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = readl(pctrl->regs + base + g->ctl_reg);
val |= BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
+ writel(val, pctrl->regs + base + g->ctl_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -430,11 +465,12 @@
{
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[offset];
+ base = msm_pinctrl_find_base(pctrl->soc, offset);
- val = readl(pctrl->regs + g->io_reg);
+ val = readl(pctrl->regs + base + g->io_reg);
return !!(val & BIT(g->in_bit));
}
@@ -443,18 +479,19 @@
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[offset];
+ base = msm_pinctrl_find_base(pctrl->soc, offset);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = readl(pctrl->regs + base + g->io_reg);
if (value)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ writel(val, pctrl->regs + base + g->io_reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -474,7 +511,7 @@
int is_out;
int drive;
int pull;
- u32 ctl_reg;
+ u32 ctl_reg, base;
static const char * const pulls[] = {
"no pull",
@@ -484,7 +521,9 @@
};
g = &pctrl->soc->groups[offset];
- ctl_reg = readl(pctrl->regs + g->ctl_reg);
+ base = msm_pinctrl_find_base(pctrl->soc, offset);
+
+ ctl_reg = readl(pctrl->regs + base + g->ctl_reg);
is_out = !!(ctl_reg & BIT(g->oe_bit));
func = (ctl_reg >> g->mux_bit) & 7;
@@ -543,21 +582,21 @@
*/
static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
const struct msm_pingroup *g,
- struct irq_data *d)
+ struct irq_data *d, u32 base)
{
int loop_limit = 100;
unsigned val, val2, intstat;
unsigned pol;
do {
- val = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
+ val = readl(pctrl->regs + base + g->io_reg) & BIT(g->in_bit);
- pol = readl(pctrl->regs + g->intr_cfg_reg);
+ pol = readl(pctrl->regs + base + g->intr_cfg_reg);
pol ^= BIT(g->intr_polarity_bit);
- writel(pol, pctrl->regs + g->intr_cfg_reg);
+ writel(pol, pctrl->regs + base + g->intr_cfg_reg);
- val2 = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
- intstat = readl(pctrl->regs + g->intr_status_reg);
+ val2 = readl(pctrl->regs + base + g->io_reg) & BIT(g->in_bit);
+ intstat = readl(pctrl->regs + base + g->intr_status_reg);
if (intstat || (val == val2))
return;
} while (loop_limit-- > 0);
@@ -571,15 +610,16 @@
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[d->hwirq];
+ base = msm_pinctrl_find_base(pctrl->soc, d->hwirq);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = readl(pctrl->regs + base + g->intr_cfg_reg);
val &= ~BIT(g->intr_enable_bit);
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ writel(val, pctrl->regs + base + g->intr_cfg_reg);
clear_bit(d->hwirq, pctrl->enabled_irqs);
@@ -592,15 +632,16 @@
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[d->hwirq];
+ base = msm_pinctrl_find_base(pctrl->soc, d->hwirq);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = readl(pctrl->regs + base + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ writel(val, pctrl->regs + base + g->intr_cfg_reg);
set_bit(d->hwirq, pctrl->enabled_irqs);
@@ -613,21 +654,22 @@
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[d->hwirq];
+ base = msm_pinctrl_find_base(pctrl->soc, d->hwirq);
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
+ val = readl(pctrl->regs + base + g->intr_status_reg);
if (g->intr_ack_high)
val |= BIT(g->intr_status_bit);
else
val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
+ writel(val, pctrl->regs + base + g->intr_status_reg);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
+ msm_gpio_update_dual_edge_pos(pctrl, g, d, base);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -638,10 +680,10 @@
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
+ u32 val, base;
g = &pctrl->soc->groups[d->hwirq];
-
+ base = msm_pinctrl_find_base(pctrl->soc, d->hwirq);
spin_lock_irqsave(&pctrl->lock, flags);
/*
@@ -653,17 +695,17 @@
clear_bit(d->hwirq, pctrl->dual_edge_irqs);
/* Route interrupts to application cpu */
- val = readl(pctrl->regs + g->intr_target_reg);
+ val = readl(pctrl->regs + base + g->intr_target_reg);
val &= ~(7 << g->intr_target_bit);
val |= g->intr_target_kpss_val << g->intr_target_bit;
- writel(val, pctrl->regs + g->intr_target_reg);
+ writel(val, pctrl->regs + base + g->intr_target_reg);
/* Update configuration for gpio.
* RAW_STATUS_EN is left on for all gpio irqs. Due to the
* internal circuitry of TLMM, toggling the RAW_STATUS
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = readl(pctrl->regs + base + g->intr_cfg_reg);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -711,10 +753,10 @@
} else {
BUG();
}
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ writel(val, pctrl->regs + base + g->intr_cfg_reg);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
+ msm_gpio_update_dual_edge_pos(pctrl, g, d, base);
spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -843,7 +885,7 @@
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq_pin;
int handled = 0;
- u32 val;
+ u32 val, base;
int i;
chained_irq_enter(chip, desc);
@@ -854,7 +896,8 @@
*/
for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
g = &pctrl->soc->groups[i];
- val = readl(pctrl->regs + g->intr_status_reg);
+ base = msm_pinctrl_find_base(pctrl->soc, i);
+ val = readl(pctrl->regs + base + g->intr_status_reg);
if (val & BIT(g->intr_status_bit)) {
irq_pin = irq_find_mapping(gc->irqdomain, i);
generic_handle_irq(irq_pin);
@@ -951,7 +994,7 @@
ret = gpiochip_irqchip_add(chip,
&msm_gpio_irq_chip,
0,
- handle_edge_irq,
+ handle_fasteoi_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n");
@@ -1016,6 +1059,7 @@
const struct msm_pingroup *g;
const char *name = "null";
struct msm_pinctrl *pctrl = msm_pinctrl_data;
+ u32 base;
if (!msm_show_resume_irq_mask)
return;
@@ -1023,7 +1067,8 @@
spin_lock_irqsave(&pctrl->lock, flags);
for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
g = &pctrl->soc->groups[i];
- val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+ base = msm_pinctrl_find_base(pctrl->soc, i);
+ val = readl_relaxed(pctrl->regs + base + g->intr_status_reg);
if (val & BIT(g->intr_status_bit)) {
irq = irq_find_mapping(pctrl->chip.irqdomain, i);
desc = irq_to_desc(irq);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 0e223e0..375a962 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -129,6 +129,10 @@
unsigned ngpios;
const struct msm_dir_conn *dir_conn;
unsigned int n_dir_conns;
+ const u32 *tile_offsets;
+ unsigned int n_tile_offsets;
+ u32 *pin_base;
+ unsigned int reg_size;
};
int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index c93628e..b454cc442 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -25,11 +25,8 @@
.ngroups = ARRAY_SIZE(fname##_groups), \
}
-#define NORTH 0x00500000
-#define SOUTH 0x00900000
-#define WEST 0x00100000
#define REG_SIZE 0x1000
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
.name = "gpio" #id, \
.pins = gpio##id##_pins, \
@@ -47,11 +44,11 @@
msm_mux_##f9 \
}, \
.nfuncs = 10, \
- .ctl_reg = base + REG_SIZE * id, \
- .io_reg = base + 0x4 + REG_SIZE * id, \
- .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
- .intr_status_reg = base + 0xc + REG_SIZE * id, \
- .intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -117,6 +114,10 @@
.intr_detection_bit = -1, \
.intr_detection_width = -1, \
}
+
+static const u32 sdm670_tile_offsets[] = {0x100000, 0x500000, 0x900000};
+static u32 sdm670_pin_base[150];
+
static const struct pinctrl_pin_desc sdm670_pins[] = {
PINCTRL_PIN(0, "GPIO_0"),
PINCTRL_PIN(1, "GPIO_1"),
@@ -254,12 +255,14 @@
PINCTRL_PIN(147, "GPIO_147"),
PINCTRL_PIN(148, "GPIO_148"),
PINCTRL_PIN(149, "GPIO_149"),
- PINCTRL_PIN(150, "SDC1_CLK"),
- PINCTRL_PIN(151, "SDC1_CMD"),
- PINCTRL_PIN(152, "SDC1_DATA"),
- PINCTRL_PIN(153, "SDC2_CLK"),
- PINCTRL_PIN(154, "SDC2_CMD"),
- PINCTRL_PIN(155, "SDC2_DATA"),
+ PINCTRL_PIN(150, "SDC1_RCLK"),
+ PINCTRL_PIN(151, "SDC1_CLK"),
+ PINCTRL_PIN(152, "SDC1_CMD"),
+ PINCTRL_PIN(153, "SDC1_DATA"),
+ PINCTRL_PIN(154, "SDC2_CLK"),
+ PINCTRL_PIN(155, "SDC2_CMD"),
+ PINCTRL_PIN(156, "SDC2_DATA"),
+ PINCTRL_PIN(157, "UFS_RESET"),
};
#define DECLARE_MSM_GPIO_PINS(pin) \
@@ -322,10 +325,23 @@
DECLARE_MSM_GPIO_PINS(55);
DECLARE_MSM_GPIO_PINS(56);
DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
DECLARE_MSM_GPIO_PINS(65);
DECLARE_MSM_GPIO_PINS(66);
DECLARE_MSM_GPIO_PINS(67);
DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
DECLARE_MSM_GPIO_PINS(75);
DECLARE_MSM_GPIO_PINS(76);
DECLARE_MSM_GPIO_PINS(77);
@@ -355,6 +371,7 @@
DECLARE_MSM_GPIO_PINS(101);
DECLARE_MSM_GPIO_PINS(102);
DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
DECLARE_MSM_GPIO_PINS(105);
DECLARE_MSM_GPIO_PINS(106);
DECLARE_MSM_GPIO_PINS(107);
@@ -401,243 +418,128 @@
DECLARE_MSM_GPIO_PINS(148);
DECLARE_MSM_GPIO_PINS(149);
-static const unsigned int sdc1_clk_pins[] = { 150 };
-static const unsigned int sdc1_cmd_pins[] = { 151 };
-static const unsigned int sdc1_data_pins[] = { 152 };
-static const unsigned int sdc2_clk_pins[] = { 153 };
-static const unsigned int sdc2_cmd_pins[] = { 154 };
-static const unsigned int sdc2_data_pins[] = { 155 };
+static const unsigned int sdc1_rclk_pins[] = { 150 };
+static const unsigned int sdc1_clk_pins[] = { 151 };
+static const unsigned int sdc1_cmd_pins[] = { 152 };
+static const unsigned int sdc1_data_pins[] = { 153 };
+static const unsigned int sdc2_clk_pins[] = { 154 };
+static const unsigned int sdc2_cmd_pins[] = { 155 };
+static const unsigned int sdc2_data_pins[] = { 156 };
+static const unsigned int ufs_reset_pins[] = { 157 };
enum sdm670_functions {
msm_mux_qup0,
msm_mux_gpio,
- msm_mux_reserved0,
- msm_mux_reserved1,
- msm_mux_reserved2,
- msm_mux_reserved3,
msm_mux_qup9,
msm_mux_qdss_cti,
- msm_mux_reserved4,
- msm_mux_reserved5,
msm_mux_ddr_pxi0,
- msm_mux_reserved6,
msm_mux_ddr_bist,
msm_mux_atest_tsens2,
msm_mux_vsense_trigger,
msm_mux_atest_usb1,
- msm_mux_reserved7,
msm_mux_qup_l4,
msm_mux_GP_PDM1,
- msm_mux_reserved8,
msm_mux_qup_l5,
- msm_mux_reserved9,
msm_mux_mdp_vsync,
msm_mux_qup_l6,
msm_mux_wlan2_adc1,
msm_mux_atest_usb11,
msm_mux_ddr_pxi2,
- msm_mux_reserved10,
msm_mux_edp_lcd,
msm_mux_dbg_out,
msm_mux_wlan2_adc0,
msm_mux_atest_usb10,
- msm_mux_reserved11,
msm_mux_m_voc,
msm_mux_tsif1_sync,
msm_mux_ddr_pxi3,
- msm_mux_reserved12,
msm_mux_cam_mclk,
msm_mux_pll_bypassnl,
msm_mux_qdss_gpio0,
- msm_mux_reserved13,
msm_mux_pll_reset,
msm_mux_qdss_gpio1,
- msm_mux_reserved14,
msm_mux_qdss_gpio2,
- msm_mux_reserved15,
msm_mux_qdss_gpio3,
- msm_mux_reserved16,
msm_mux_cci_i2c,
msm_mux_qup1,
msm_mux_qdss_gpio4,
- msm_mux_reserved17,
msm_mux_qdss_gpio5,
- msm_mux_reserved18,
msm_mux_qdss_gpio6,
- msm_mux_reserved19,
msm_mux_qdss_gpio7,
- msm_mux_reserved20,
msm_mux_cci_timer0,
msm_mux_gcc_gp2,
msm_mux_qdss_gpio8,
- msm_mux_reserved21,
msm_mux_cci_timer1,
msm_mux_gcc_gp3,
msm_mux_qdss_gpio,
- msm_mux_reserved22,
msm_mux_cci_timer2,
msm_mux_qdss_gpio9,
- msm_mux_reserved23,
msm_mux_cci_timer3,
msm_mux_cci_async,
msm_mux_qdss_gpio10,
- msm_mux_reserved24,
msm_mux_cci_timer4,
msm_mux_qdss_gpio11,
- msm_mux_reserved25,
msm_mux_qdss_gpio12,
msm_mux_JITTER_BIST,
- msm_mux_reserved26,
msm_mux_qup2,
msm_mux_qdss_gpio13,
msm_mux_PLL_BIST,
- msm_mux_reserved27,
msm_mux_qdss_gpio14,
msm_mux_AGERA_PLL,
- msm_mux_reserved28,
msm_mux_phase_flag1,
msm_mux_qdss_gpio15,
msm_mux_atest_tsens,
- msm_mux_reserved29,
msm_mux_phase_flag2,
- msm_mux_reserved30,
msm_mux_qup11,
msm_mux_qup14,
- msm_mux_reserved31,
- msm_mux_reserved32,
- msm_mux_reserved33,
- msm_mux_reserved34,
msm_mux_pci_e0,
msm_mux_QUP_L4,
- msm_mux_reserved35,
msm_mux_QUP_L5,
- msm_mux_reserved36,
msm_mux_QUP_L6,
- msm_mux_reserved37,
msm_mux_usb_phy,
- msm_mux_reserved38,
msm_mux_lpass_slimbus,
- msm_mux_reserved39,
msm_mux_sd_write,
msm_mux_tsif1_error,
- msm_mux_reserved40,
msm_mux_qup3,
- msm_mux_reserved41,
- msm_mux_reserved42,
- msm_mux_reserved43,
- msm_mux_reserved44,
- msm_mux_bt_reset,
msm_mux_qup6,
- msm_mux_reserved45,
- msm_mux_reserved46,
- msm_mux_reserved47,
- msm_mux_reserved124,
- msm_mux_reserved125,
- msm_mux_reserved126,
- msm_mux_reserved127,
- msm_mux_reserved128,
- msm_mux_reserved129,
- msm_mux_qlink_request,
- msm_mux_reserved130,
- msm_mux_qlink_enable,
- msm_mux_reserved131,
- msm_mux_reserved132,
- msm_mux_reserved133,
- msm_mux_reserved134,
- msm_mux_pa_indicator,
- msm_mux_reserved135,
- msm_mux_reserved136,
- msm_mux_phase_flag26,
- msm_mux_reserved137,
- msm_mux_phase_flag27,
- msm_mux_reserved138,
- msm_mux_phase_flag28,
- msm_mux_reserved139,
- msm_mux_phase_flag6,
- msm_mux_reserved140,
- msm_mux_phase_flag29,
- msm_mux_reserved141,
- msm_mux_phase_flag30,
- msm_mux_reserved142,
- msm_mux_phase_flag31,
- msm_mux_reserved143,
- msm_mux_mss_lte,
- msm_mux_reserved144,
- msm_mux_reserved145,
- msm_mux_reserved146,
- msm_mux_reserved147,
- msm_mux_reserved148,
- msm_mux_reserved149,
- msm_mux_reserved48,
msm_mux_qup12,
- msm_mux_reserved49,
- msm_mux_reserved50,
- msm_mux_reserved51,
msm_mux_phase_flag16,
- msm_mux_reserved52,
msm_mux_qup10,
msm_mux_phase_flag11,
- msm_mux_reserved53,
msm_mux_GP_PDM0,
msm_mux_phase_flag12,
msm_mux_wlan1_adc1,
msm_mux_atest_usb13,
msm_mux_ddr_pxi1,
- msm_mux_reserved54,
msm_mux_phase_flag13,
msm_mux_wlan1_adc0,
msm_mux_atest_usb12,
- msm_mux_reserved55,
msm_mux_phase_flag17,
- msm_mux_reserved56,
msm_mux_qua_mi2s,
msm_mux_gcc_gp1,
msm_mux_phase_flag18,
- msm_mux_reserved57,
msm_mux_pri_mi2s,
msm_mux_qup8,
msm_mux_wsa_clk,
- msm_mux_reserved65,
msm_mux_pri_mi2s_ws,
msm_mux_wsa_data,
- msm_mux_reserved66,
- msm_mux_wsa_en,
msm_mux_atest_usb2,
- msm_mux_reserved67,
msm_mux_atest_usb23,
- msm_mux_reserved68,
msm_mux_ter_mi2s,
msm_mux_phase_flag8,
msm_mux_atest_usb22,
- msm_mux_reserved75,
msm_mux_phase_flag9,
msm_mux_atest_usb21,
- msm_mux_reserved76,
msm_mux_phase_flag4,
msm_mux_atest_usb20,
- msm_mux_reserved77,
- msm_mux_ssc_irq,
- msm_mux_reserved78,
msm_mux_sec_mi2s,
msm_mux_GP_PDM2,
- msm_mux_reserved79,
- msm_mux_reserved80,
msm_mux_qup15,
- msm_mux_reserved81,
- msm_mux_reserved82,
- msm_mux_reserved83,
- msm_mux_reserved84,
msm_mux_qup5,
- msm_mux_reserved85,
msm_mux_copy_gp,
- msm_mux_reserved86,
- msm_mux_reserved87,
- msm_mux_reserved88,
msm_mux_tsif1_clk,
msm_mux_qup4,
msm_mux_tgu_ch3,
msm_mux_phase_flag10,
- msm_mux_reserved89,
msm_mux_tsif1_en,
msm_mux_mdp_vsync0,
msm_mux_mdp_vsync1,
@@ -645,83 +547,61 @@
msm_mux_mdp_vsync3,
msm_mux_tgu_ch0,
msm_mux_phase_flag0,
- msm_mux_reserved90,
msm_mux_tsif1_data,
msm_mux_sdc4_cmd,
msm_mux_tgu_ch1,
- msm_mux_reserved91,
msm_mux_tsif2_error,
msm_mux_sdc43,
msm_mux_vfr_1,
msm_mux_tgu_ch2,
- msm_mux_reserved92,
msm_mux_tsif2_clk,
msm_mux_sdc4_clk,
msm_mux_qup7,
- msm_mux_reserved93,
msm_mux_tsif2_en,
msm_mux_sdc42,
- msm_mux_reserved94,
msm_mux_tsif2_data,
msm_mux_sdc41,
- msm_mux_reserved95,
msm_mux_tsif2_sync,
msm_mux_sdc40,
msm_mux_phase_flag3,
- msm_mux_reserved96,
msm_mux_ldo_en,
- msm_mux_reserved97,
msm_mux_ldo_update,
- msm_mux_reserved98,
msm_mux_phase_flag14,
msm_mux_prng_rosc,
- msm_mux_reserved99,
msm_mux_phase_flag15,
- msm_mux_reserved100,
msm_mux_phase_flag5,
- msm_mux_reserved101,
msm_mux_pci_e1,
- msm_mux_reserved102,
msm_mux_COPY_PHASE,
- msm_mux_reserved103,
msm_mux_uim2_data,
msm_mux_qup13,
- msm_mux_reserved105,
msm_mux_uim2_clk,
- msm_mux_reserved106,
msm_mux_uim2_reset,
- msm_mux_reserved107,
msm_mux_uim2_present,
- msm_mux_reserved108,
msm_mux_uim1_data,
- msm_mux_reserved109,
msm_mux_uim1_clk,
- msm_mux_reserved110,
msm_mux_uim1_reset,
- msm_mux_reserved111,
msm_mux_uim1_present,
- msm_mux_reserved112,
msm_mux_uim_batt,
msm_mux_edp_hot,
- msm_mux_reserved113,
msm_mux_NAV_PPS,
msm_mux_GPS_TX,
- msm_mux_reserved114,
- msm_mux_reserved115,
- msm_mux_reserved116,
msm_mux_atest_char,
- msm_mux_reserved117,
msm_mux_adsp_ext,
msm_mux_atest_char3,
- msm_mux_reserved118,
msm_mux_atest_char2,
- msm_mux_reserved119,
msm_mux_atest_char1,
- msm_mux_reserved120,
msm_mux_atest_char0,
- msm_mux_reserved121,
- msm_mux_reserved122,
- msm_mux_reserved123,
+ msm_mux_qlink_request,
+ msm_mux_qlink_enable,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_mss_lte,
msm_mux_NA,
};
@@ -735,31 +615,21 @@
"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
- "gpio43", "gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
- "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57",
- "gpio65", "gpio66", "gpio75", "gpio76", "gpio77", "gpio81", "gpio82",
- "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
- "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
- "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
- "gpio103", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
- "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
- "gpio116", "gpio126", "gpio127", "gpio128", "gpio129", "gpio130",
- "gpio131", "gpio132", "gpio133", "gpio134", "gpio135", "gpio136",
- "gpio137", "gpio138", "gpio139", "gpio140", "gpio141", "gpio142",
- "gpio143", "gpio144", "gpio145", "gpio146", "gpio147", "gpio148",
- "gpio149",
-};
-static const char * const reserved0_groups[] = {
- "gpio0",
-};
-static const char * const reserved1_groups[] = {
- "gpio1",
-};
-static const char * const reserved2_groups[] = {
- "gpio2",
-};
-static const char * const reserved3_groups[] = {
- "gpio3",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio65", "gpio66", "gpio67", "gpio68", "gpio75", "gpio76",
+ "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149",
};
static const char * const qup9_groups[] = {
"gpio4", "gpio5", "gpio6", "gpio7",
@@ -767,18 +637,9 @@
static const char * const qdss_cti_groups[] = {
"gpio4", "gpio5", "gpio51", "gpio52", "gpio90", "gpio91",
};
-static const char * const reserved4_groups[] = {
- "gpio4",
-};
-static const char * const reserved5_groups[] = {
- "gpio5",
-};
static const char * const ddr_pxi0_groups[] = {
"gpio6", "gpio7",
};
-static const char * const reserved6_groups[] = {
- "gpio6",
-};
static const char * const ddr_bist_groups[] = {
"gpio7", "gpio8", "gpio9", "gpio10",
};
@@ -791,24 +652,15 @@
static const char * const atest_usb1_groups[] = {
"gpio7",
};
-static const char * const reserved7_groups[] = {
- "gpio7",
-};
static const char * const qup_l4_groups[] = {
"gpio8", "gpio105", "gpio123",
};
static const char * const GP_PDM1_groups[] = {
"gpio8", "gpio66",
};
-static const char * const reserved8_groups[] = {
- "gpio8",
-};
static const char * const qup_l5_groups[] = {
"gpio9", "gpio106", "gpio124",
};
-static const char * const reserved9_groups[] = {
- "gpio9",
-};
static const char * const mdp_vsync_groups[] = {
"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
};
@@ -824,9 +676,6 @@
static const char * const ddr_pxi2_groups[] = {
"gpio10", "gpio11",
};
-static const char * const reserved10_groups[] = {
- "gpio10",
-};
static const char * const edp_lcd_groups[] = {
"gpio11",
};
@@ -839,9 +688,6 @@
static const char * const atest_usb10_groups[] = {
"gpio11",
};
-static const char * const reserved11_groups[] = {
- "gpio11",
-};
static const char * const m_voc_groups[] = {
"gpio12",
};
@@ -851,9 +697,6 @@
static const char * const ddr_pxi3_groups[] = {
"gpio12", "gpio13",
};
-static const char * const reserved12_groups[] = {
- "gpio12",
-};
static const char * const cam_mclk_groups[] = {
"gpio13", "gpio14", "gpio15", "gpio16",
};
@@ -863,30 +706,18 @@
static const char * const qdss_gpio0_groups[] = {
"gpio13", "gpio117",
};
-static const char * const reserved13_groups[] = {
- "gpio13",
-};
static const char * const pll_reset_groups[] = {
"gpio14",
};
static const char * const qdss_gpio1_groups[] = {
"gpio14", "gpio118",
};
-static const char * const reserved14_groups[] = {
- "gpio14",
-};
static const char * const qdss_gpio2_groups[] = {
"gpio15", "gpio119",
};
-static const char * const reserved15_groups[] = {
- "gpio15",
-};
static const char * const qdss_gpio3_groups[] = {
"gpio16", "gpio120",
};
-static const char * const reserved16_groups[] = {
- "gpio16",
-};
static const char * const cci_i2c_groups[] = {
"gpio17", "gpio18", "gpio19", "gpio20",
};
@@ -896,27 +727,15 @@
static const char * const qdss_gpio4_groups[] = {
"gpio17", "gpio121",
};
-static const char * const reserved17_groups[] = {
- "gpio17",
-};
static const char * const qdss_gpio5_groups[] = {
"gpio18", "gpio122",
};
-static const char * const reserved18_groups[] = {
- "gpio18",
-};
static const char * const qdss_gpio6_groups[] = {
"gpio19", "gpio41",
};
-static const char * const reserved19_groups[] = {
- "gpio19",
-};
static const char * const qdss_gpio7_groups[] = {
"gpio20", "gpio42",
};
-static const char * const reserved20_groups[] = {
- "gpio20",
-};
static const char * const cci_timer0_groups[] = {
"gpio21",
};
@@ -926,9 +745,6 @@
static const char * const qdss_gpio8_groups[] = {
"gpio21", "gpio75",
};
-static const char * const reserved21_groups[] = {
- "gpio21",
-};
static const char * const cci_timer1_groups[] = {
"gpio22",
};
@@ -938,18 +754,12 @@
static const char * const qdss_gpio_groups[] = {
"gpio22", "gpio30", "gpio123", "gpio124",
};
-static const char * const reserved22_groups[] = {
- "gpio22",
-};
static const char * const cci_timer2_groups[] = {
"gpio23",
};
static const char * const qdss_gpio9_groups[] = {
"gpio23", "gpio76",
};
-static const char * const reserved23_groups[] = {
- "gpio23",
-};
static const char * const cci_timer3_groups[] = {
"gpio24",
};
@@ -959,27 +769,18 @@
static const char * const qdss_gpio10_groups[] = {
"gpio24", "gpio77",
};
-static const char * const reserved24_groups[] = {
- "gpio24",
-};
static const char * const cci_timer4_groups[] = {
"gpio25",
};
static const char * const qdss_gpio11_groups[] = {
"gpio25", "gpio79",
};
-static const char * const reserved25_groups[] = {
- "gpio25",
-};
static const char * const qdss_gpio12_groups[] = {
"gpio26", "gpio80",
};
static const char * const JITTER_BIST_groups[] = {
"gpio26", "gpio35",
};
-static const char * const reserved26_groups[] = {
- "gpio26",
-};
static const char * const qup2_groups[] = {
"gpio27", "gpio28", "gpio29", "gpio30",
};
@@ -989,18 +790,12 @@
static const char * const PLL_BIST_groups[] = {
"gpio27", "gpio36",
};
-static const char * const reserved27_groups[] = {
- "gpio27",
-};
static const char * const qdss_gpio14_groups[] = {
"gpio28", "gpio43",
};
static const char * const AGERA_PLL_groups[] = {
"gpio28", "gpio37",
};
-static const char * const reserved28_groups[] = {
- "gpio28",
-};
static const char * const phase_flag1_groups[] = {
"gpio29",
};
@@ -1010,246 +805,57 @@
static const char * const atest_tsens_groups[] = {
"gpio29",
};
-static const char * const reserved29_groups[] = {
- "gpio29",
-};
static const char * const phase_flag2_groups[] = {
"gpio30",
};
-static const char * const reserved30_groups[] = {
- "gpio30",
-};
static const char * const qup11_groups[] = {
"gpio31", "gpio32", "gpio33", "gpio34",
};
static const char * const qup14_groups[] = {
"gpio31", "gpio32", "gpio33", "gpio34",
};
-static const char * const reserved31_groups[] = {
- "gpio31",
-};
-static const char * const reserved32_groups[] = {
- "gpio32",
-};
-static const char * const reserved33_groups[] = {
- "gpio33",
-};
-static const char * const reserved34_groups[] = {
- "gpio34",
-};
static const char * const pci_e0_groups[] = {
"gpio35", "gpio36",
};
static const char * const QUP_L4_groups[] = {
"gpio35", "gpio75",
};
-static const char * const reserved35_groups[] = {
- "gpio35",
-};
static const char * const QUP_L5_groups[] = {
"gpio36", "gpio76",
};
-static const char * const reserved36_groups[] = {
- "gpio36",
-};
static const char * const QUP_L6_groups[] = {
"gpio37", "gpio77",
};
-static const char * const reserved37_groups[] = {
- "gpio37",
-};
static const char * const usb_phy_groups[] = {
"gpio38",
};
-static const char * const reserved38_groups[] = {
- "gpio38",
-};
static const char * const lpass_slimbus_groups[] = {
"gpio39",
};
-static const char * const reserved39_groups[] = {
- "gpio39",
-};
static const char * const sd_write_groups[] = {
"gpio40",
};
static const char * const tsif1_error_groups[] = {
"gpio40",
};
-static const char * const reserved40_groups[] = {
- "gpio40",
-};
static const char * const qup3_groups[] = {
"gpio41", "gpio42", "gpio43", "gpio44",
};
-static const char * const reserved41_groups[] = {
- "gpio41",
-};
-static const char * const reserved42_groups[] = {
- "gpio42",
-};
-static const char * const reserved43_groups[] = {
- "gpio43",
-};
-static const char * const reserved44_groups[] = {
- "gpio44",
-};
-static const char * const bt_reset_groups[] = {
- "gpio45",
-};
static const char * const qup6_groups[] = {
"gpio45", "gpio46", "gpio47", "gpio48",
};
-static const char * const reserved45_groups[] = {
- "gpio45",
-};
-static const char * const reserved46_groups[] = {
- "gpio46",
-};
-static const char * const reserved47_groups[] = {
- "gpio47",
-};
-static const char * const reserved124_groups[] = {
- "gpio124",
-};
-static const char * const reserved125_groups[] = {
- "gpio125",
-};
-static const char * const reserved126_groups[] = {
- "gpio126",
-};
-static const char * const reserved127_groups[] = {
- "gpio127",
-};
-static const char * const reserved128_groups[] = {
- "gpio128",
-};
-static const char * const reserved129_groups[] = {
- "gpio129",
-};
-static const char * const qlink_request_groups[] = {
- "gpio130",
-};
-static const char * const reserved130_groups[] = {
- "gpio130",
-};
-static const char * const qlink_enable_groups[] = {
- "gpio131",
-};
-static const char * const reserved131_groups[] = {
- "gpio131",
-};
-static const char * const reserved132_groups[] = {
- "gpio132",
-};
-static const char * const reserved133_groups[] = {
- "gpio133",
-};
-static const char * const reserved134_groups[] = {
- "gpio134",
-};
-static const char * const pa_indicator_groups[] = {
- "gpio135",
-};
-static const char * const reserved135_groups[] = {
- "gpio135",
-};
-static const char * const reserved136_groups[] = {
- "gpio136",
-};
-static const char * const phase_flag26_groups[] = {
- "gpio137",
-};
-static const char * const reserved137_groups[] = {
- "gpio137",
-};
-static const char * const phase_flag27_groups[] = {
- "gpio138",
-};
-static const char * const reserved138_groups[] = {
- "gpio138",
-};
-static const char * const phase_flag28_groups[] = {
- "gpio139",
-};
-static const char * const reserved139_groups[] = {
- "gpio139",
-};
-static const char * const phase_flag6_groups[] = {
- "gpio140",
-};
-static const char * const reserved140_groups[] = {
- "gpio140",
-};
-static const char * const phase_flag29_groups[] = {
- "gpio141",
-};
-static const char * const reserved141_groups[] = {
- "gpio141",
-};
-static const char * const phase_flag30_groups[] = {
- "gpio142",
-};
-static const char * const reserved142_groups[] = {
- "gpio142",
-};
-static const char * const phase_flag31_groups[] = {
- "gpio143",
-};
-static const char * const reserved143_groups[] = {
- "gpio143",
-};
-static const char * const mss_lte_groups[] = {
- "gpio144", "gpio145",
-};
-static const char * const reserved144_groups[] = {
- "gpio144",
-};
-static const char * const reserved145_groups[] = {
- "gpio145",
-};
-static const char * const reserved146_groups[] = {
- "gpio146",
-};
-static const char * const reserved147_groups[] = {
- "gpio147",
-};
-static const char * const reserved148_groups[] = {
- "gpio148",
-};
-static const char * const reserved149_groups[] = {
- "gpio149", "gpio149",
-};
-static const char * const reserved48_groups[] = {
- "gpio48",
-};
static const char * const qup12_groups[] = {
"gpio49", "gpio50", "gpio51", "gpio52",
};
-static const char * const reserved49_groups[] = {
- "gpio49",
-};
-static const char * const reserved50_groups[] = {
- "gpio50",
-};
-static const char * const reserved51_groups[] = {
- "gpio51",
-};
static const char * const phase_flag16_groups[] = {
"gpio52",
};
-static const char * const reserved52_groups[] = {
- "gpio52",
-};
static const char * const qup10_groups[] = {
"gpio53", "gpio54", "gpio55", "gpio56",
};
static const char * const phase_flag11_groups[] = {
"gpio53",
};
-static const char * const reserved53_groups[] = {
- "gpio53",
-};
static const char * const GP_PDM0_groups[] = {
"gpio54", "gpio95",
};
@@ -1265,9 +871,6 @@
static const char * const ddr_pxi1_groups[] = {
"gpio54", "gpio55",
};
-static const char * const reserved54_groups[] = {
- "gpio54",
-};
static const char * const phase_flag13_groups[] = {
"gpio55",
};
@@ -1277,15 +880,9 @@
static const char * const atest_usb12_groups[] = {
"gpio55",
};
-static const char * const reserved55_groups[] = {
- "gpio55",
-};
static const char * const phase_flag17_groups[] = {
"gpio56",
};
-static const char * const reserved56_groups[] = {
- "gpio56",
-};
static const char * const qua_mi2s_groups[] = {
"gpio57",
};
@@ -1295,9 +892,6 @@
static const char * const phase_flag18_groups[] = {
"gpio57",
};
-static const char * const reserved57_groups[] = {
- "gpio57",
-};
static const char * const pri_mi2s_groups[] = {
"gpio65", "gpio67", "gpio68",
};
@@ -1307,33 +901,18 @@
static const char * const wsa_clk_groups[] = {
"gpio65",
};
-static const char * const reserved65_groups[] = {
- "gpio65",
-};
static const char * const pri_mi2s_ws_groups[] = {
"gpio66",
};
static const char * const wsa_data_groups[] = {
"gpio66",
};
-static const char * const reserved66_groups[] = {
- "gpio66",
-};
-static const char * const wsa_en_groups[] = {
- "gpio67", "gpio68",
-};
static const char * const atest_usb2_groups[] = {
"gpio67",
};
-static const char * const reserved67_groups[] = {
- "gpio67",
-};
static const char * const atest_usb23_groups[] = {
"gpio68",
};
-static const char * const reserved68_groups[] = {
- "gpio68",
-};
static const char * const ter_mi2s_groups[] = {
"gpio75", "gpio76", "gpio77", "gpio78",
};
@@ -1343,79 +922,33 @@
static const char * const atest_usb22_groups[] = {
"gpio75",
};
-static const char * const reserved75_groups[] = {
- "gpio75",
-};
static const char * const phase_flag9_groups[] = {
"gpio76",
};
static const char * const atest_usb21_groups[] = {
"gpio76",
};
-static const char * const reserved76_groups[] = {
- "gpio76",
-};
static const char * const phase_flag4_groups[] = {
"gpio77",
};
static const char * const atest_usb20_groups[] = {
"gpio77",
};
-static const char * const reserved77_groups[] = {
- "gpio77",
-};
-static const char * const ssc_irq_groups[] = {
- "gpio78", "gpio79", "gpio80", "gpio117", "gpio118", "gpio119",
- "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
-};
-static const char * const reserved78_groups[] = {
- "gpio78",
-};
static const char * const sec_mi2s_groups[] = {
"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
};
static const char * const GP_PDM2_groups[] = {
"gpio79",
};
-static const char * const reserved79_groups[] = {
- "gpio79",
-};
-static const char * const reserved80_groups[] = {
- "gpio80",
-};
static const char * const qup15_groups[] = {
"gpio81", "gpio82", "gpio83", "gpio84",
};
-static const char * const reserved81_groups[] = {
- "gpio81",
-};
-static const char * const reserved82_groups[] = {
- "gpio82",
-};
-static const char * const reserved83_groups[] = {
- "gpio83",
-};
-static const char * const reserved84_groups[] = {
- "gpio84",
-};
static const char * const qup5_groups[] = {
"gpio85", "gpio86", "gpio87", "gpio88",
};
-static const char * const reserved85_groups[] = {
- "gpio85",
-};
static const char * const copy_gp_groups[] = {
"gpio86",
};
-static const char * const reserved86_groups[] = {
- "gpio86",
-};
-static const char * const reserved87_groups[] = {
- "gpio87",
-};
-static const char * const reserved88_groups[] = {
- "gpio88",
-};
static const char * const tsif1_clk_groups[] = {
"gpio89",
};
@@ -1428,9 +961,6 @@
static const char * const phase_flag10_groups[] = {
"gpio89",
};
-static const char * const reserved89_groups[] = {
- "gpio89",
-};
static const char * const tsif1_en_groups[] = {
"gpio90",
};
@@ -1452,9 +982,6 @@
static const char * const phase_flag0_groups[] = {
"gpio90",
};
-static const char * const reserved90_groups[] = {
- "gpio90",
-};
static const char * const tsif1_data_groups[] = {
"gpio91",
};
@@ -1464,9 +991,6 @@
static const char * const tgu_ch1_groups[] = {
"gpio91",
};
-static const char * const reserved91_groups[] = {
- "gpio91",
-};
static const char * const tsif2_error_groups[] = {
"gpio92",
};
@@ -1479,9 +1003,6 @@
static const char * const tgu_ch2_groups[] = {
"gpio92",
};
-static const char * const reserved92_groups[] = {
- "gpio92",
-};
static const char * const tsif2_clk_groups[] = {
"gpio93",
};
@@ -1491,27 +1012,18 @@
static const char * const qup7_groups[] = {
"gpio93", "gpio94", "gpio95", "gpio96",
};
-static const char * const reserved93_groups[] = {
- "gpio93",
-};
static const char * const tsif2_en_groups[] = {
"gpio94",
};
static const char * const sdc42_groups[] = {
"gpio94",
};
-static const char * const reserved94_groups[] = {
- "gpio94",
-};
static const char * const tsif2_data_groups[] = {
"gpio95",
};
static const char * const sdc41_groups[] = {
"gpio95",
};
-static const char * const reserved95_groups[] = {
- "gpio95",
-};
static const char * const tsif2_sync_groups[] = {
"gpio96",
};
@@ -1521,114 +1033,63 @@
static const char * const phase_flag3_groups[] = {
"gpio96",
};
-static const char * const reserved96_groups[] = {
- "gpio96",
-};
static const char * const ldo_en_groups[] = {
"gpio97",
};
-static const char * const reserved97_groups[] = {
- "gpio97",
-};
static const char * const ldo_update_groups[] = {
"gpio98",
};
-static const char * const reserved98_groups[] = {
- "gpio98",
-};
static const char * const phase_flag14_groups[] = {
"gpio99",
};
static const char * const prng_rosc_groups[] = {
"gpio99", "gpio102",
};
-static const char * const reserved99_groups[] = {
- "gpio99",
-};
static const char * const phase_flag15_groups[] = {
"gpio100",
};
-static const char * const reserved100_groups[] = {
- "gpio100",
-};
static const char * const phase_flag5_groups[] = {
"gpio101",
};
-static const char * const reserved101_groups[] = {
- "gpio101",
-};
static const char * const pci_e1_groups[] = {
"gpio102", "gpio103",
};
-static const char * const reserved102_groups[] = {
- "gpio102",
-};
static const char * const COPY_PHASE_groups[] = {
"gpio103",
};
-static const char * const reserved103_groups[] = {
- "gpio103",
-};
static const char * const uim2_data_groups[] = {
"gpio105",
};
static const char * const qup13_groups[] = {
"gpio105", "gpio106", "gpio107", "gpio108",
};
-static const char * const reserved105_groups[] = {
- "gpio105",
-};
static const char * const uim2_clk_groups[] = {
"gpio106",
};
-static const char * const reserved106_groups[] = {
- "gpio106",
-};
static const char * const uim2_reset_groups[] = {
"gpio107",
};
-static const char * const reserved107_groups[] = {
- "gpio107",
-};
static const char * const uim2_present_groups[] = {
"gpio108",
};
-static const char * const reserved108_groups[] = {
- "gpio108",
-};
static const char * const uim1_data_groups[] = {
"gpio109",
};
-static const char * const reserved109_groups[] = {
- "gpio109",
-};
static const char * const uim1_clk_groups[] = {
"gpio110",
};
-static const char * const reserved110_groups[] = {
- "gpio110",
-};
static const char * const uim1_reset_groups[] = {
"gpio111",
};
-static const char * const reserved111_groups[] = {
- "gpio111",
-};
static const char * const uim1_present_groups[] = {
"gpio112",
};
-static const char * const reserved112_groups[] = {
- "gpio112",
-};
static const char * const uim_batt_groups[] = {
"gpio113",
};
static const char * const edp_hot_groups[] = {
"gpio113",
};
-static const char * const reserved113_groups[] = {
- "gpio113",
-};
static const char * const NAV_PPS_groups[] = {
"gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
"gpio129", "gpio129", "gpio143", "gpio143",
@@ -1636,285 +1097,171 @@
static const char * const GPS_TX_groups[] = {
"gpio114", "gpio115", "gpio128", "gpio129", "gpio143", "gpio145",
};
-static const char * const reserved114_groups[] = {
- "gpio114",
-};
-static const char * const reserved115_groups[] = {
- "gpio115",
-};
-static const char * const reserved116_groups[] = {
- "gpio116",
-};
static const char * const atest_char_groups[] = {
"gpio117",
};
-static const char * const reserved117_groups[] = {
- "gpio117",
-};
static const char * const adsp_ext_groups[] = {
"gpio118",
};
static const char * const atest_char3_groups[] = {
"gpio118",
};
-static const char * const reserved118_groups[] = {
- "gpio118",
-};
static const char * const atest_char2_groups[] = {
"gpio119",
};
-static const char * const reserved119_groups[] = {
- "gpio119",
-};
static const char * const atest_char1_groups[] = {
"gpio120",
};
-static const char * const reserved120_groups[] = {
- "gpio120",
-};
static const char * const atest_char0_groups[] = {
"gpio121",
};
-static const char * const reserved121_groups[] = {
- "gpio121",
+static const char * const qlink_request_groups[] = {
+ "gpio130",
};
-static const char * const reserved122_groups[] = {
- "gpio122",
+static const char * const qlink_enable_groups[] = {
+ "gpio131",
};
-static const char * const reserved123_groups[] = {
- "gpio123",
+static const char * const pa_indicator_groups[] = {
+ "gpio135",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio143",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio144", "gpio145",
};
static const struct msm_function sdm670_functions[] = {
FUNCTION(qup0),
FUNCTION(gpio),
- FUNCTION(reserved0),
- FUNCTION(reserved1),
- FUNCTION(reserved2),
- FUNCTION(reserved3),
FUNCTION(qup9),
FUNCTION(qdss_cti),
- FUNCTION(reserved4),
- FUNCTION(reserved5),
FUNCTION(ddr_pxi0),
- FUNCTION(reserved6),
FUNCTION(ddr_bist),
FUNCTION(atest_tsens2),
FUNCTION(vsense_trigger),
FUNCTION(atest_usb1),
- FUNCTION(reserved7),
FUNCTION(qup_l4),
FUNCTION(GP_PDM1),
- FUNCTION(reserved8),
FUNCTION(qup_l5),
- FUNCTION(reserved9),
FUNCTION(mdp_vsync),
FUNCTION(qup_l6),
FUNCTION(wlan2_adc1),
FUNCTION(atest_usb11),
FUNCTION(ddr_pxi2),
- FUNCTION(reserved10),
FUNCTION(edp_lcd),
FUNCTION(dbg_out),
FUNCTION(wlan2_adc0),
FUNCTION(atest_usb10),
- FUNCTION(reserved11),
FUNCTION(m_voc),
FUNCTION(tsif1_sync),
FUNCTION(ddr_pxi3),
- FUNCTION(reserved12),
FUNCTION(cam_mclk),
FUNCTION(pll_bypassnl),
FUNCTION(qdss_gpio0),
- FUNCTION(reserved13),
FUNCTION(pll_reset),
FUNCTION(qdss_gpio1),
- FUNCTION(reserved14),
FUNCTION(qdss_gpio2),
- FUNCTION(reserved15),
FUNCTION(qdss_gpio3),
- FUNCTION(reserved16),
FUNCTION(cci_i2c),
FUNCTION(qup1),
FUNCTION(qdss_gpio4),
- FUNCTION(reserved17),
FUNCTION(qdss_gpio5),
- FUNCTION(reserved18),
FUNCTION(qdss_gpio6),
- FUNCTION(reserved19),
FUNCTION(qdss_gpio7),
- FUNCTION(reserved20),
FUNCTION(cci_timer0),
FUNCTION(gcc_gp2),
FUNCTION(qdss_gpio8),
- FUNCTION(reserved21),
FUNCTION(cci_timer1),
FUNCTION(gcc_gp3),
FUNCTION(qdss_gpio),
- FUNCTION(reserved22),
FUNCTION(cci_timer2),
FUNCTION(qdss_gpio9),
- FUNCTION(reserved23),
FUNCTION(cci_timer3),
FUNCTION(cci_async),
FUNCTION(qdss_gpio10),
- FUNCTION(reserved24),
FUNCTION(cci_timer4),
FUNCTION(qdss_gpio11),
- FUNCTION(reserved25),
FUNCTION(qdss_gpio12),
FUNCTION(JITTER_BIST),
- FUNCTION(reserved26),
FUNCTION(qup2),
FUNCTION(qdss_gpio13),
FUNCTION(PLL_BIST),
- FUNCTION(reserved27),
FUNCTION(qdss_gpio14),
FUNCTION(AGERA_PLL),
- FUNCTION(reserved28),
FUNCTION(phase_flag1),
FUNCTION(qdss_gpio15),
FUNCTION(atest_tsens),
- FUNCTION(reserved29),
FUNCTION(phase_flag2),
- FUNCTION(reserved30),
FUNCTION(qup11),
FUNCTION(qup14),
- FUNCTION(reserved31),
- FUNCTION(reserved32),
- FUNCTION(reserved33),
- FUNCTION(reserved34),
FUNCTION(pci_e0),
FUNCTION(QUP_L4),
- FUNCTION(reserved35),
FUNCTION(QUP_L5),
- FUNCTION(reserved36),
FUNCTION(QUP_L6),
- FUNCTION(reserved37),
FUNCTION(usb_phy),
- FUNCTION(reserved38),
FUNCTION(lpass_slimbus),
- FUNCTION(reserved39),
FUNCTION(sd_write),
FUNCTION(tsif1_error),
- FUNCTION(reserved40),
FUNCTION(qup3),
- FUNCTION(reserved41),
- FUNCTION(reserved42),
- FUNCTION(reserved43),
- FUNCTION(reserved44),
- FUNCTION(bt_reset),
FUNCTION(qup6),
- FUNCTION(reserved45),
- FUNCTION(reserved46),
- FUNCTION(reserved47),
- FUNCTION(reserved124),
- FUNCTION(reserved125),
- FUNCTION(reserved126),
- FUNCTION(reserved127),
- FUNCTION(reserved128),
- FUNCTION(reserved129),
- FUNCTION(qlink_request),
- FUNCTION(reserved130),
- FUNCTION(qlink_enable),
- FUNCTION(reserved131),
- FUNCTION(reserved132),
- FUNCTION(reserved133),
- FUNCTION(reserved134),
- FUNCTION(pa_indicator),
- FUNCTION(reserved135),
- FUNCTION(reserved136),
- FUNCTION(phase_flag26),
- FUNCTION(reserved137),
- FUNCTION(phase_flag27),
- FUNCTION(reserved138),
- FUNCTION(phase_flag28),
- FUNCTION(reserved139),
- FUNCTION(phase_flag6),
- FUNCTION(reserved140),
- FUNCTION(phase_flag29),
- FUNCTION(reserved141),
- FUNCTION(phase_flag30),
- FUNCTION(reserved142),
- FUNCTION(phase_flag31),
- FUNCTION(reserved143),
- FUNCTION(mss_lte),
- FUNCTION(reserved144),
- FUNCTION(reserved145),
- FUNCTION(reserved146),
- FUNCTION(reserved147),
- FUNCTION(reserved148),
- FUNCTION(reserved149),
- FUNCTION(reserved48),
FUNCTION(qup12),
- FUNCTION(reserved49),
- FUNCTION(reserved50),
- FUNCTION(reserved51),
FUNCTION(phase_flag16),
- FUNCTION(reserved52),
FUNCTION(qup10),
FUNCTION(phase_flag11),
- FUNCTION(reserved53),
FUNCTION(GP_PDM0),
FUNCTION(phase_flag12),
FUNCTION(wlan1_adc1),
FUNCTION(atest_usb13),
FUNCTION(ddr_pxi1),
- FUNCTION(reserved54),
FUNCTION(phase_flag13),
FUNCTION(wlan1_adc0),
FUNCTION(atest_usb12),
- FUNCTION(reserved55),
FUNCTION(phase_flag17),
- FUNCTION(reserved56),
FUNCTION(qua_mi2s),
FUNCTION(gcc_gp1),
FUNCTION(phase_flag18),
- FUNCTION(reserved57),
FUNCTION(pri_mi2s),
FUNCTION(qup8),
FUNCTION(wsa_clk),
- FUNCTION(reserved65),
FUNCTION(pri_mi2s_ws),
FUNCTION(wsa_data),
- FUNCTION(reserved66),
- FUNCTION(wsa_en),
FUNCTION(atest_usb2),
- FUNCTION(reserved67),
FUNCTION(atest_usb23),
- FUNCTION(reserved68),
FUNCTION(ter_mi2s),
FUNCTION(phase_flag8),
FUNCTION(atest_usb22),
- FUNCTION(reserved75),
FUNCTION(phase_flag9),
FUNCTION(atest_usb21),
- FUNCTION(reserved76),
FUNCTION(phase_flag4),
FUNCTION(atest_usb20),
- FUNCTION(reserved77),
- FUNCTION(ssc_irq),
- FUNCTION(reserved78),
FUNCTION(sec_mi2s),
FUNCTION(GP_PDM2),
- FUNCTION(reserved79),
- FUNCTION(reserved80),
FUNCTION(qup15),
- FUNCTION(reserved81),
- FUNCTION(reserved82),
- FUNCTION(reserved83),
- FUNCTION(reserved84),
FUNCTION(qup5),
- FUNCTION(reserved85),
FUNCTION(copy_gp),
- FUNCTION(reserved86),
- FUNCTION(reserved87),
- FUNCTION(reserved88),
FUNCTION(tsif1_clk),
FUNCTION(qup4),
FUNCTION(tgu_ch3),
FUNCTION(phase_flag10),
- FUNCTION(reserved89),
FUNCTION(tsif1_en),
FUNCTION(mdp_vsync0),
FUNCTION(mdp_vsync1),
@@ -1922,326 +1269,388 @@
FUNCTION(mdp_vsync3),
FUNCTION(tgu_ch0),
FUNCTION(phase_flag0),
- FUNCTION(reserved90),
FUNCTION(tsif1_data),
FUNCTION(sdc4_cmd),
FUNCTION(tgu_ch1),
- FUNCTION(reserved91),
FUNCTION(tsif2_error),
FUNCTION(sdc43),
FUNCTION(vfr_1),
FUNCTION(tgu_ch2),
- FUNCTION(reserved92),
FUNCTION(tsif2_clk),
FUNCTION(sdc4_clk),
FUNCTION(qup7),
- FUNCTION(reserved93),
FUNCTION(tsif2_en),
FUNCTION(sdc42),
- FUNCTION(reserved94),
FUNCTION(tsif2_data),
FUNCTION(sdc41),
- FUNCTION(reserved95),
FUNCTION(tsif2_sync),
FUNCTION(sdc40),
FUNCTION(phase_flag3),
- FUNCTION(reserved96),
FUNCTION(ldo_en),
- FUNCTION(reserved97),
FUNCTION(ldo_update),
- FUNCTION(reserved98),
FUNCTION(phase_flag14),
FUNCTION(prng_rosc),
- FUNCTION(reserved99),
FUNCTION(phase_flag15),
- FUNCTION(reserved100),
FUNCTION(phase_flag5),
- FUNCTION(reserved101),
FUNCTION(pci_e1),
- FUNCTION(reserved102),
FUNCTION(COPY_PHASE),
- FUNCTION(reserved103),
FUNCTION(uim2_data),
FUNCTION(qup13),
- FUNCTION(reserved105),
FUNCTION(uim2_clk),
- FUNCTION(reserved106),
FUNCTION(uim2_reset),
- FUNCTION(reserved107),
FUNCTION(uim2_present),
- FUNCTION(reserved108),
FUNCTION(uim1_data),
- FUNCTION(reserved109),
FUNCTION(uim1_clk),
- FUNCTION(reserved110),
FUNCTION(uim1_reset),
- FUNCTION(reserved111),
FUNCTION(uim1_present),
- FUNCTION(reserved112),
FUNCTION(uim_batt),
FUNCTION(edp_hot),
- FUNCTION(reserved113),
FUNCTION(NAV_PPS),
FUNCTION(GPS_TX),
- FUNCTION(reserved114),
- FUNCTION(reserved115),
- FUNCTION(reserved116),
FUNCTION(atest_char),
- FUNCTION(reserved117),
FUNCTION(adsp_ext),
FUNCTION(atest_char3),
- FUNCTION(reserved118),
FUNCTION(atest_char2),
- FUNCTION(reserved119),
FUNCTION(atest_char1),
- FUNCTION(reserved120),
FUNCTION(atest_char0),
- FUNCTION(reserved121),
- FUNCTION(reserved122),
- FUNCTION(reserved123),
+ FUNCTION(qlink_request),
+ FUNCTION(qlink_enable),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(mss_lte),
};
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
static const struct msm_pingroup sdm670_groups[] = {
- PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
- PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
- PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
- PINGROUP(3, SOUTH, qup0, NA, reserved3, NA, NA, NA, NA, NA, NA),
- PINGROUP(4, NORTH, qup9, qdss_cti, reserved4, NA, NA, NA, NA, NA, NA),
- PINGROUP(5, NORTH, qup9, qdss_cti, reserved5, NA, NA, NA, NA, NA, NA),
- PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, reserved6, NA, NA, NA, NA, NA),
- PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2, vsense_trigger,
- atest_usb1, ddr_pxi0, reserved7, NA),
- PINGROUP(8, WEST, qup_l4, GP_PDM1, ddr_bist, NA, reserved8, NA, NA, NA,
- NA),
- PINGROUP(9, WEST, qup_l5, ddr_bist, reserved9, NA, NA, NA, NA, NA, NA),
- PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
- atest_usb11, ddr_pxi2, reserved10, NA, NA),
- PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
- atest_usb10, ddr_pxi2, reserved11, NA, NA),
- PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, reserved12,
- NA, NA, NA, NA),
- PINGROUP(13, WEST, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
- reserved13, NA, NA, NA, NA),
- PINGROUP(14, WEST, cam_mclk, pll_reset, qdss_gpio1, reserved14, NA, NA,
- NA, NA, NA),
- PINGROUP(15, WEST, cam_mclk, qdss_gpio2, reserved15, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(16, WEST, cam_mclk, qdss_gpio3, reserved16, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(17, WEST, cci_i2c, qup1, qdss_gpio4, reserved17, NA, NA, NA,
- NA, NA),
- PINGROUP(18, WEST, cci_i2c, qup1, NA, qdss_gpio5, reserved18, NA, NA,
- NA, NA),
- PINGROUP(19, WEST, cci_i2c, qup1, NA, qdss_gpio6, reserved19, NA, NA,
- NA, NA),
- PINGROUP(20, WEST, cci_i2c, qup1, NA, qdss_gpio7, reserved20, NA, NA,
- NA, NA),
- PINGROUP(21, WEST, cci_timer0, gcc_gp2, qdss_gpio8, NA, reserved21, NA,
- NA, NA, NA),
- PINGROUP(22, WEST, cci_timer1, gcc_gp3, qdss_gpio, NA, reserved22, NA,
- NA, NA, NA),
- PINGROUP(23, WEST, cci_timer2, qdss_gpio9, NA, reserved23, NA, NA, NA,
- NA, NA),
- PINGROUP(24, WEST, cci_timer3, cci_async, qdss_gpio10, reserved24, NA,
- NA, NA, NA, NA),
- PINGROUP(25, WEST, cci_timer4, cci_async, qdss_gpio11, NA, reserved25,
- NA, NA, NA, NA),
- PINGROUP(26, WEST, cci_async, qdss_gpio12, JITTER_BIST, NA, reserved26,
- NA, NA, NA, NA),
- PINGROUP(27, WEST, qup2, qdss_gpio13, PLL_BIST, NA, reserved27, NA, NA,
- NA, NA),
- PINGROUP(28, WEST, qup2, qdss_gpio14, AGERA_PLL, NA, reserved28, NA,
- NA, NA, NA),
- PINGROUP(29, WEST, qup2, NA, phase_flag1, qdss_gpio15, atest_tsens,
- reserved29, NA, NA, NA),
- PINGROUP(30, WEST, qup2, phase_flag2, qdss_gpio, reserved30, NA, NA,
- NA, NA, NA),
- PINGROUP(31, WEST, qup11, qup14, reserved31, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, WEST, qup11, qup14, NA, reserved32, NA, NA, NA, NA, NA),
- PINGROUP(33, WEST, qup11, qup14, NA, reserved33, NA, NA, NA, NA, NA),
- PINGROUP(34, WEST, qup11, qup14, NA, reserved34, NA, NA, NA, NA, NA),
- PINGROUP(35, NORTH, pci_e0, QUP_L4, JITTER_BIST, NA, reserved35, NA,
- NA, NA, NA),
- PINGROUP(36, NORTH, pci_e0, QUP_L5, PLL_BIST, NA, reserved36, NA, NA,
- NA, NA),
- PINGROUP(37, NORTH, QUP_L6, AGERA_PLL, NA, reserved37, NA, NA, NA, NA,
- NA),
- PINGROUP(38, NORTH, usb_phy, NA, reserved38, NA, NA, NA, NA, NA, NA),
- PINGROUP(39, NORTH, lpass_slimbus, NA, reserved39, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(40, NORTH, sd_write, tsif1_error, NA, reserved40, NA, NA, NA,
- NA, NA),
- PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, reserved41, NA, NA, NA, NA,
- NA),
- PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, reserved42, NA, NA, NA, NA,
- NA),
- PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, reserved43, NA, NA, NA, NA,
- NA),
- PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, reserved44, NA, NA, NA, NA,
- NA),
- PINGROUP(45, SOUTH, qup6, NA, reserved45, NA, NA, NA, NA, NA, NA),
- PINGROUP(46, SOUTH, qup6, NA, reserved46, NA, NA, NA, NA, NA, NA),
- PINGROUP(47, SOUTH, qup6, reserved47, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(48, SOUTH, qup6, reserved48, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(49, NORTH, qup12, reserved49, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(50, NORTH, qup12, reserved50, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(51, NORTH, qup12, qdss_cti, reserved51, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, reserved52, NA, NA,
- NA, NA, NA),
- PINGROUP(53, NORTH, qup10, phase_flag11, reserved53, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(54, NORTH, qup10, GP_PDM0, phase_flag12, NA, wlan1_adc1,
- atest_usb13, ddr_pxi1, reserved54, NA),
- PINGROUP(55, NORTH, qup10, phase_flag13, NA, wlan1_adc0, atest_usb12,
- ddr_pxi1, reserved55, NA, NA),
- PINGROUP(56, NORTH, qup10, phase_flag17, reserved56, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, reserved57, NA,
- NA, NA, NA, NA),
- PINGROUP(65, NORTH, pri_mi2s, qup8, wsa_clk, NA, reserved65, NA, NA,
- NA, NA),
- PINGROUP(66, NORTH, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
- reserved66, NA, NA, NA),
- PINGROUP(67, NORTH, pri_mi2s, qup8, NA, atest_usb2, reserved67, NA, NA,
- NA, NA),
- PINGROUP(68, NORTH, pri_mi2s, qup8, NA, atest_usb23, reserved68, NA,
- NA, NA, NA),
- PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8, atest_usb22,
- QUP_L4, reserved75, NA, NA, NA),
- PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9, atest_usb21,
- QUP_L5, reserved76, NA, NA, NA),
- PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
- QUP_L6, reserved77, NA, NA, NA),
- PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, reserved78, NA, NA, NA, NA,
- NA),
- PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, reserved79,
- NA, NA, NA),
- PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
- NA, NA),
- PINGROUP(81, NORTH, sec_mi2s, qup15, NA, reserved81, NA, NA, NA, NA,
- NA),
- PINGROUP(82, NORTH, sec_mi2s, qup15, NA, reserved82, NA, NA, NA, NA,
- NA),
- PINGROUP(83, NORTH, sec_mi2s, qup15, NA, reserved83, NA, NA, NA, NA,
- NA),
- PINGROUP(84, NORTH, qup15, NA, reserved84, NA, NA, NA, NA, NA, NA),
- PINGROUP(85, SOUTH, qup5, NA, reserved85, NA, NA, NA, NA, NA, NA),
- PINGROUP(86, SOUTH, qup5, copy_gp, NA, reserved86, NA, NA, NA, NA, NA),
- PINGROUP(87, SOUTH, qup5, NA, reserved87, NA, NA, NA, NA, NA, NA),
- PINGROUP(88, SOUTH, qup5, NA, reserved88, NA, NA, NA, NA, NA, NA),
- PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, reserved89,
- NA, NA, NA, NA),
- PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1, mdp_vsync2,
- mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
- PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA, qdss_cti,
- reserved91, NA, NA),
- PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2, NA,
- reserved92, NA, NA),
- PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
- reserved93, NA, NA, NA),
- PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, reserved94, NA, NA, NA,
- NA),
- PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, GP_PDM0, NA, reserved95,
- NA, NA, NA),
- PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, reserved96,
- NA, NA, NA, NA),
- PINGROUP(97, WEST, NA, NA, mdp_vsync, ldo_en, reserved97, NA, NA, NA,
- NA),
- PINGROUP(98, WEST, NA, mdp_vsync, ldo_update, reserved98, NA, NA, NA,
- NA, NA),
- PINGROUP(99, NORTH, phase_flag14, prng_rosc, reserved99, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(100, WEST, phase_flag15, reserved100, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(101, WEST, NA, phase_flag5, reserved101, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(102, WEST, pci_e1, prng_rosc, reserved102, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(103, WEST, pci_e1, COPY_PHASE, reserved103, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, reserved105, NA, NA,
- NA, NA),
- PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, reserved106, NA, NA,
- NA, NA),
- PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, reserved107, NA, NA,
- NA, NA, NA),
- PINGROUP(108, NORTH, uim2_present, qup13, reserved108, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(109, NORTH, uim1_data, reserved109, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(110, NORTH, uim1_clk, reserved110, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(111, NORTH, uim1_reset, reserved111, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(112, NORTH, uim1_present, reserved112, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(113, NORTH, uim_batt, edp_hot, reserved113, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(114, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved114, NA,
- NA, NA),
- PINGROUP(115, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved115, NA,
- NA, NA),
- PINGROUP(116, SOUTH, NA, reserved116, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, reserved117, NA, NA,
- NA, NA, NA),
- PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3,
- reserved118, NA, NA, NA, NA),
- PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, reserved119, NA, NA,
- NA, NA, NA),
- PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, reserved120, NA, NA,
- NA, NA, NA),
- PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, reserved121, NA, NA,
- NA, NA, NA),
- PINGROUP(122, NORTH, NA, qdss_gpio5, reserved122, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, reserved123, NA, NA, NA,
- NA, NA),
- PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, reserved124, NA, NA, NA,
- NA, NA),
- PINGROUP(125, NORTH, qup_l6, NA, reserved125, NA, NA, NA, NA, NA, NA),
- PINGROUP(126, NORTH, NA, reserved126, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(127, WEST, NA, NA, reserved127, NA, NA, NA, NA, NA, NA),
- PINGROUP(128, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved128, NA, NA,
- NA, NA),
- PINGROUP(129, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, reserved129, NA, NA,
- NA, NA),
- PINGROUP(130, WEST, qlink_request, NA, reserved130, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(131, WEST, qlink_enable, NA, reserved131, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(132, WEST, NA, NA, reserved132, NA, NA, NA, NA, NA, NA),
- PINGROUP(133, NORTH, NA, reserved133, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(134, NORTH, NA, reserved134, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(135, WEST, NA, pa_indicator, NA, reserved135, NA, NA, NA, NA,
- NA),
- PINGROUP(136, WEST, NA, NA, reserved136, NA, NA, NA, NA, NA, NA),
- PINGROUP(137, WEST, NA, NA, phase_flag26, reserved137, NA, NA, NA, NA,
- NA),
- PINGROUP(138, WEST, NA, NA, phase_flag27, reserved138, NA, NA, NA, NA,
- NA),
- PINGROUP(139, WEST, NA, phase_flag28, reserved139, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(140, WEST, NA, NA, phase_flag6, reserved140, NA, NA, NA, NA,
- NA),
- PINGROUP(141, WEST, NA, phase_flag29, reserved141, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(142, WEST, NA, phase_flag30, reserved142, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(143, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
- reserved143, NA, NA, NA),
- PINGROUP(144, SOUTH, mss_lte, reserved144, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(145, SOUTH, mss_lte, GPS_TX, reserved145, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(146, WEST, NA, NA, reserved146, NA, NA, NA, NA, NA, NA),
- PINGROUP(147, WEST, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
- PINGROUP(148, WEST, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(149, WEST, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
- SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
- SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
- SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ [0] = PINGROUP(0, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [1] = PINGROUP(1, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [2] = PINGROUP(2, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [3] = PINGROUP(3, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [4] = PINGROUP(4, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [5] = PINGROUP(5, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [6] = PINGROUP(6, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
+ [7] = PINGROUP(7, qup9, ddr_bist, NA, atest_tsens2,
+ vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
+ [8] = PINGROUP(8, qup_l4, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA,
+ NA),
+ [9] = PINGROUP(9, qup_l5, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+ [10] = PINGROUP(10, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+ atest_usb11, ddr_pxi2, NA, NA, NA),
+ [11] = PINGROUP(11, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+ atest_usb10, ddr_pxi2, NA, NA, NA),
+ [12] = PINGROUP(12, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
+ NA, NA, NA, NA),
+ [13] = PINGROUP(13, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
+ NA, NA, NA, NA, NA),
+ [14] = PINGROUP(14, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
+ NA, NA, NA),
+ [15] = PINGROUP(15, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA, NA,
+ NA),
+ [16] = PINGROUP(16, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA, NA,
+ NA),
+ [17] = PINGROUP(17, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+ NA, NA),
+ [18] = PINGROUP(18, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+ NA, NA),
+ [19] = PINGROUP(19, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+ NA, NA),
+ [20] = PINGROUP(20, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+ NA, NA),
+ [21] = PINGROUP(21, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
+ NA, NA, NA),
+ [22] = PINGROUP(22, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
+ NA, NA, NA),
+ [23] = PINGROUP(23, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+ NA, NA),
+ [24] = PINGROUP(24, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+ NA, NA, NA, NA),
+ [25] = PINGROUP(25, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+ NA, NA, NA, NA),
+ [26] = PINGROUP(26, cci_async, qdss_gpio12, JITTER_BIST, NA, NA,
+ NA, NA, NA, NA),
+ [27] = PINGROUP(27, qup2, qdss_gpio13, PLL_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [28] = PINGROUP(28, qup2, qdss_gpio14, AGERA_PLL, NA, NA, NA, NA,
+ NA, NA),
+ [29] = PINGROUP(29, qup2, NA, phase_flag1, qdss_gpio15,
+ atest_tsens, NA, NA, NA, NA),
+ [30] = PINGROUP(30, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
+ NA, NA),
+ [31] = PINGROUP(31, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [32] = PINGROUP(32, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [33] = PINGROUP(33, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [34] = PINGROUP(34, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [35] = PINGROUP(35, pci_e0, QUP_L4, JITTER_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [36] = PINGROUP(36, pci_e0, QUP_L5, PLL_BIST, NA, NA, NA, NA,
+ NA, NA),
+ [37] = PINGROUP(37, QUP_L6, AGERA_PLL, NA, NA, NA, NA, NA, NA,
+ NA),
+ [38] = PINGROUP(38, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
+ [39] = PINGROUP(39, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [40] = PINGROUP(40, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+ NA, NA),
+ [41] = PINGROUP(41, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA,
+ NA),
+ [42] = PINGROUP(42, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA,
+ NA),
+ [43] = PINGROUP(43, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+ NA),
+ [44] = PINGROUP(44, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+ NA),
+ [45] = PINGROUP(45, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [46] = PINGROUP(46, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [47] = PINGROUP(47, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [48] = PINGROUP(48, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [49] = PINGROUP(49, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [50] = PINGROUP(50, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [51] = PINGROUP(51, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [52] = PINGROUP(52, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+ NA, NA, NA),
+ [53] = PINGROUP(53, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+ NA),
+ [54] = PINGROUP(54, qup10, GP_PDM0, phase_flag12, NA,
+ wlan1_adc1, atest_usb13, ddr_pxi1, NA, NA),
+ [55] = PINGROUP(55, qup10, phase_flag13, NA, wlan1_adc0,
+ atest_usb12, ddr_pxi1, NA, NA, NA),
+ [56] = PINGROUP(56, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+ NA),
+ [57] = PINGROUP(57, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+ NA, NA, NA),
+ [58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [59] = PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [60] = PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [61] = PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [62] = PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [63] = PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [64] = PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [65] = PINGROUP(65, pri_mi2s, qup8, wsa_clk, NA, NA, NA, NA, NA,
+ NA),
+ [66] = PINGROUP(66, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
+ NA, NA, NA, NA),
+ [67] = PINGROUP(67, pri_mi2s, qup8, NA, atest_usb2, NA, NA, NA,
+ NA, NA),
+ [68] = PINGROUP(68, pri_mi2s, qup8, NA, atest_usb23, NA, NA, NA,
+ NA, NA),
+ [69] = PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [70] = PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [71] = PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [72] = PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [73] = PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [74] = PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [75] = PINGROUP(75, ter_mi2s, phase_flag8, qdss_gpio8,
+ atest_usb22, QUP_L4, NA, NA, NA, NA),
+ [76] = PINGROUP(76, ter_mi2s, phase_flag9, qdss_gpio9,
+ atest_usb21, QUP_L5, NA, NA, NA, NA),
+ [77] = PINGROUP(77, ter_mi2s, phase_flag4, qdss_gpio10,
+ atest_usb20, QUP_L6, NA, NA, NA, NA),
+ [78] = PINGROUP(78, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+ NA),
+ [79] = PINGROUP(79, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, NA,
+ NA, NA, NA),
+ [80] = PINGROUP(80, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+ NA, NA),
+ [81] = PINGROUP(81, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [82] = PINGROUP(82, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [83] = PINGROUP(83, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [84] = PINGROUP(84, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
+ [85] = PINGROUP(85, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [86] = PINGROUP(86, qup5, copy_gp, NA, NA, NA, NA, NA, NA, NA),
+ [87] = PINGROUP(87, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [88] = PINGROUP(88, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [89] = PINGROUP(89, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+ NA, NA, NA, NA),
+ [90] = PINGROUP(90, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
+ mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
+ [91] = PINGROUP(91, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
+ qdss_cti, NA, NA, NA),
+ [92] = PINGROUP(92, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+ NA, NA, NA, NA),
+ [93] = PINGROUP(93, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+ NA, NA, NA, NA),
+ [94] = PINGROUP(94, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
+ NA),
+ [95] = PINGROUP(95, tsif2_data, sdc41, qup7, GP_PDM0, NA, NA,
+ NA, NA, NA),
+ [96] = PINGROUP(96, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+ NA, NA, NA, NA),
+ [97] = PINGROUP(97, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+ NA),
+ [98] = PINGROUP(98, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+ NA, NA),
+ [99] = PINGROUP(99, phase_flag14, prng_rosc, NA, NA, NA, NA, NA,
+ NA, NA),
+ [100] = PINGROUP(100, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [101] = PINGROUP(101, NA, phase_flag5, NA, NA, NA, NA, NA, NA,
+ NA),
+ [102] = PINGROUP(102, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
+ NA),
+ [103] = PINGROUP(103, pci_e1, COPY_PHASE, NA, NA, NA, NA, NA, NA,
+ NA),
+ [104] = PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [105] = PINGROUP(105, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+ NA, NA),
+ [106] = PINGROUP(106, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+ NA, NA),
+ [107] = PINGROUP(107, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+ NA, NA),
+ [108] = PINGROUP(108, uim2_present, qup13, NA, NA, NA, NA, NA,
+ NA, NA),
+ [109] = PINGROUP(109, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ [110] = PINGROUP(110, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ [111] = PINGROUP(111, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [112] = PINGROUP(112, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [113] = PINGROUP(113, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+ NA),
+ [114] = PINGROUP(114, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+ NA, NA),
+ [115] = PINGROUP(115, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+ NA, NA),
+ [116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [117] = PINGROUP(117, NA, qdss_gpio0, atest_char, NA, NA, NA,
+ NA, NA, NA),
+ [118] = PINGROUP(118, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+ NA, NA, NA, NA),
+ [119] = PINGROUP(119, NA, qdss_gpio2, atest_char2, NA, NA, NA,
+ NA, NA, NA),
+ [120] = PINGROUP(120, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+ NA, NA, NA),
+ [121] = PINGROUP(121, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+ NA, NA, NA),
+ [122] = PINGROUP(122, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA,
+ NA),
+ [123] = PINGROUP(123, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [124] = PINGROUP(124, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [125] = PINGROUP(125, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [127] = PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [128] = PINGROUP(128, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+ NA, NA),
+ [129] = PINGROUP(129, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+ NA, NA),
+ [130] = PINGROUP(130, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [131] = PINGROUP(131, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [132] = PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [133] = PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [134] = PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [135] = PINGROUP(135, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+ NA),
+ [136] = PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [137] = PINGROUP(137, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+ NA),
+ [138] = PINGROUP(138, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+ NA),
+ [139] = PINGROUP(139, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+ NA),
+ [140] = PINGROUP(140, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+ NA),
+ [141] = PINGROUP(141, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+ NA),
+ [142] = PINGROUP(142, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+ NA),
+ [143] = PINGROUP(143, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
+ NA, NA, NA, NA),
+ [144] = PINGROUP(144, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+ [145] = PINGROUP(145, mss_lte, GPS_TX, NA, NA, NA, NA, NA, NA,
+ NA),
+ [146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [148] = PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [149] = PINGROUP(149, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x599000, 15, 0),
+ [151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
+ [152] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
+ [153] = SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
+ [154] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+ [155] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+ [156] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ [157] = UFS_RESET(ufs_reset, 0x99f000),
+};
+
+static const struct msm_dir_conn sdm670_dir_conn[] = {
+ {1, 510},
+ {3, 511},
+ {5, 512},
+ {10, 513},
+ {11, 514},
+ {20, 515},
+ {22, 516},
+ {24, 517},
+ {26, 518},
+ {30, 519},
+ {31, 639},
+ {32, 521},
+ {34, 522},
+ {36, 523},
+ {37, 524},
+ {38, 525},
+ {39, 526},
+ {40, 527},
+ {41, 637},
+ {43, 529},
+ {44, 530},
+ {46, 531},
+ {48, 532},
+ {49, 640},
+ {52, 534},
+ {53, 535},
+ {54, 536},
+ {56, 537},
+ {57, 538},
+ {66, 546},
+ {68, 547},
+ {77, 550},
+ {78, 551},
+ {79, 552},
+ {80, 553},
+ {84, 554},
+ {85, 555},
+ {86, 556},
+ {88, 557},
+ {89, 638},
+ {91, 559},
+ {92, 560},
+ {95, 561},
+ {96, 562},
+ {97, 563},
+ {101, 564},
+ {103, 565},
+ {115, 570},
+ {116, 571},
+ {117, 572},
+ {118, 573},
+ {119, 609},
+ {120, 610},
+ {121, 611},
+ {122, 612},
+ {123, 613},
+ {124, 614},
+ {125, 615},
+ {127, 617},
+ {128, 618},
+ {129, 619},
+ {130, 620},
+ {132, 621},
+ {133, 622},
+ {145, 623},
};
static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
@@ -2251,7 +1660,13 @@
.nfunctions = ARRAY_SIZE(sdm670_functions),
.groups = sdm670_groups,
.ngroups = ARRAY_SIZE(sdm670_groups),
- .ngpios = 136,
+ .ngpios = 150,
+ .dir_conn = sdm670_dir_conn,
+ .n_dir_conns = ARRAY_SIZE(sdm670_dir_conn),
+ .tile_offsets = sdm670_tile_offsets,
+ .n_tile_offsets = ARRAY_SIZE(sdm670_tile_offsets),
+ .pin_base = sdm670_pin_base,
+ .reg_size = REG_SIZE,
};
static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 7d125eb..8faabb0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -25,10 +25,8 @@
.ngroups = ARRAY_SIZE(fname##_groups), \
}
-#define NORTH 0x00500000
-#define SOUTH 0x00900000
#define REG_SIZE 0x1000
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
.name = "gpio" #id, \
.pins = gpio##id##_pins, \
@@ -46,11 +44,11 @@
msm_mux_##f9 \
}, \
.nfuncs = 10, \
- .ctl_reg = base + REG_SIZE * id, \
- .io_reg = base + 0x4 + REG_SIZE * id, \
- .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
- .intr_status_reg = base + 0xc + REG_SIZE * id, \
- .intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -116,6 +114,10 @@
.intr_detection_bit = -1, \
.intr_detection_width = -1, \
}
+
+static const u32 sdm845_tile_offsets[] = {0x500000, 0x900000, 0x100000};
+static u32 sdm845_pin_base[150];
+
static const struct pinctrl_pin_desc sdm845_pins[] = {
PINCTRL_PIN(0, "GPIO_0"),
PINCTRL_PIN(1, "GPIO_1"),
@@ -432,306 +434,99 @@
static const unsigned int ufs_reset_pins[] = { 153 };
enum sdm845_functions {
- msm_mux_gpio,
- msm_mux_qup0,
- msm_mux_reserved0,
- msm_mux_reserved1,
- msm_mux_reserved2,
- msm_mux_reserved3,
- msm_mux_qup9,
- msm_mux_qdss_cti,
- msm_mux_reserved4,
- msm_mux_reserved5,
- msm_mux_ddr_pxi0,
- msm_mux_reserved6,
- msm_mux_ddr_bist,
- msm_mux_atest_tsens2,
- msm_mux_vsense_trigger,
- msm_mux_atest_usb1,
- msm_mux_reserved7,
- msm_mux_qup_l4,
- msm_mux_wlan1_adc1,
- msm_mux_atest_usb13,
- msm_mux_ddr_pxi1,
- msm_mux_reserved8,
- msm_mux_qup_l5,
- msm_mux_wlan1_adc0,
- msm_mux_atest_usb12,
- msm_mux_reserved9,
- msm_mux_mdp_vsync,
- msm_mux_qup_l6,
- msm_mux_wlan2_adc1,
- msm_mux_atest_usb11,
- msm_mux_ddr_pxi2,
- msm_mux_reserved10,
- msm_mux_edp_lcd,
- msm_mux_dbg_out,
- msm_mux_wlan2_adc0,
- msm_mux_atest_usb10,
- msm_mux_reserved11,
- msm_mux_m_voc,
- msm_mux_tsif1_sync,
msm_mux_ddr_pxi3,
- msm_mux_reserved12,
msm_mux_cam_mclk,
msm_mux_pll_bypassnl,
msm_mux_qdss_gpio0,
- msm_mux_reserved13,
msm_mux_pll_reset,
msm_mux_qdss_gpio1,
- msm_mux_reserved14,
msm_mux_qdss_gpio2,
- msm_mux_reserved15,
msm_mux_qdss_gpio3,
- msm_mux_reserved16,
msm_mux_cci_i2c,
msm_mux_qup1,
msm_mux_qdss_gpio4,
- msm_mux_reserved17,
msm_mux_qdss_gpio5,
- msm_mux_reserved18,
msm_mux_qdss_gpio6,
- msm_mux_reserved19,
msm_mux_qdss_gpio7,
- msm_mux_reserved20,
msm_mux_cci_timer0,
msm_mux_gcc_gp2,
msm_mux_qdss_gpio8,
- msm_mux_reserved21,
msm_mux_cci_timer1,
msm_mux_gcc_gp3,
msm_mux_qdss_gpio,
- msm_mux_reserved22,
msm_mux_cci_timer2,
msm_mux_qdss_gpio9,
- msm_mux_reserved23,
msm_mux_cci_timer3,
msm_mux_cci_async,
msm_mux_qdss_gpio10,
- msm_mux_reserved24,
msm_mux_cci_timer4,
msm_mux_qdss_gpio11,
- msm_mux_reserved25,
msm_mux_qdss_gpio12,
- msm_mux_reserved26,
msm_mux_qup2,
msm_mux_qdss_gpio13,
- msm_mux_reserved27,
msm_mux_qdss_gpio14,
- msm_mux_reserved28,
msm_mux_phase_flag1,
msm_mux_qdss_gpio15,
- msm_mux_reserved29,
msm_mux_phase_flag2,
- msm_mux_reserved30,
msm_mux_qup11,
msm_mux_qup14,
- msm_mux_reserved96,
- msm_mux_ldo_en,
- msm_mux_reserved97,
- msm_mux_ldo_update,
- msm_mux_reserved98,
- msm_mux_phase_flag14,
- msm_mux_reserved99,
- msm_mux_phase_flag15,
- msm_mux_reserved100,
- msm_mux_reserved101,
- msm_mux_pci_e1,
- msm_mux_prng_rosc,
- msm_mux_reserved102,
- msm_mux_phase_flag5,
- msm_mux_reserved103,
- msm_mux_reserved104,
- msm_mux_uim2_data,
- msm_mux_qup13,
- msm_mux_reserved105,
- msm_mux_uim2_clk,
- msm_mux_reserved106,
- msm_mux_uim2_reset,
- msm_mux_reserved107,
- msm_mux_uim2_present,
- msm_mux_reserved108,
- msm_mux_uim1_data,
- msm_mux_reserved109,
- msm_mux_uim1_clk,
- msm_mux_reserved110,
- msm_mux_uim1_reset,
- msm_mux_reserved111,
- msm_mux_uim1_present,
- msm_mux_reserved112,
- msm_mux_uim_batt,
- msm_mux_edp_hot,
- msm_mux_reserved113,
- msm_mux_nav_pps,
- msm_mux_reserved114,
- msm_mux_reserved115,
- msm_mux_reserved116,
- msm_mux_atest_char,
- msm_mux_reserved117,
- msm_mux_adsp_ext,
- msm_mux_atest_char3,
- msm_mux_reserved118,
- msm_mux_atest_char2,
- msm_mux_reserved119,
- msm_mux_atest_char1,
- msm_mux_reserved120,
- msm_mux_atest_char0,
- msm_mux_reserved121,
- msm_mux_reserved122,
- msm_mux_reserved123,
- msm_mux_reserved124,
- msm_mux_reserved125,
- msm_mux_reserved126,
- msm_mux_reserved127,
- msm_mux_reserved128,
- msm_mux_reserved129,
- msm_mux_qlink_request,
- msm_mux_reserved130,
- msm_mux_qlink_enable,
- msm_mux_reserved131,
- msm_mux_reserved132,
- msm_mux_reserved133,
- msm_mux_reserved134,
- msm_mux_pa_indicator,
- msm_mux_reserved135,
- msm_mux_reserved136,
- msm_mux_phase_flag26,
- msm_mux_reserved137,
- msm_mux_phase_flag27,
- msm_mux_reserved138,
- msm_mux_phase_flag28,
- msm_mux_reserved139,
- msm_mux_phase_flag6,
- msm_mux_reserved140,
- msm_mux_phase_flag29,
- msm_mux_reserved141,
- msm_mux_phase_flag30,
- msm_mux_reserved142,
- msm_mux_phase_flag31,
- msm_mux_reserved143,
- msm_mux_mss_lte,
- msm_mux_reserved144,
- msm_mux_reserved145,
- msm_mux_reserved146,
- msm_mux_reserved147,
- msm_mux_reserved148,
- msm_mux_reserved149,
- msm_mux_reserved31,
- msm_mux_reserved32,
- msm_mux_reserved33,
- msm_mux_reserved34,
msm_mux_pci_e0,
msm_mux_jitter_bist,
- msm_mux_reserved35,
msm_mux_pll_bist,
msm_mux_atest_tsens,
- msm_mux_reserved36,
msm_mux_agera_pll,
- msm_mux_reserved37,
msm_mux_usb_phy,
- msm_mux_reserved38,
msm_mux_lpass_slimbus,
- msm_mux_reserved39,
msm_mux_sd_write,
msm_mux_tsif1_error,
- msm_mux_reserved40,
msm_mux_qup3,
- msm_mux_reserved41,
- msm_mux_reserved42,
- msm_mux_reserved43,
- msm_mux_reserved44,
msm_mux_qup6,
- msm_mux_reserved45,
- msm_mux_reserved46,
- msm_mux_reserved47,
- msm_mux_reserved48,
msm_mux_qup12,
- msm_mux_reserved49,
- msm_mux_reserved50,
- msm_mux_reserved51,
msm_mux_phase_flag16,
- msm_mux_reserved52,
msm_mux_qup10,
msm_mux_phase_flag11,
- msm_mux_reserved53,
msm_mux_phase_flag12,
- msm_mux_reserved54,
msm_mux_phase_flag13,
- msm_mux_reserved55,
msm_mux_phase_flag17,
- msm_mux_reserved56,
msm_mux_qua_mi2s,
msm_mux_gcc_gp1,
msm_mux_phase_flag18,
- msm_mux_reserved57,
msm_mux_phase_flag19,
- msm_mux_reserved58,
msm_mux_phase_flag20,
- msm_mux_reserved59,
msm_mux_cri_trng0,
msm_mux_phase_flag21,
- msm_mux_reserved60,
msm_mux_cri_trng1,
msm_mux_phase_flag22,
- msm_mux_reserved61,
msm_mux_cri_trng,
msm_mux_phase_flag23,
- msm_mux_reserved62,
msm_mux_phase_flag24,
- msm_mux_reserved63,
msm_mux_pri_mi2s,
msm_mux_sp_cmu,
msm_mux_phase_flag25,
- msm_mux_reserved64,
msm_mux_qup8,
- msm_mux_reserved65,
msm_mux_pri_mi2s_ws,
- msm_mux_reserved66,
- msm_mux_reserved67,
- msm_mux_reserved68,
msm_mux_spkr_i2s,
msm_mux_audio_ref,
- msm_mux_reserved69,
- msm_mux_reserved70,
msm_mux_tsense_pwm1,
msm_mux_tsense_pwm2,
- msm_mux_reserved71,
- msm_mux_reserved72,
msm_mux_btfm_slimbus,
msm_mux_atest_usb2,
- msm_mux_reserved73,
msm_mux_ter_mi2s,
msm_mux_phase_flag7,
msm_mux_atest_usb23,
- msm_mux_reserved74,
msm_mux_phase_flag8,
msm_mux_atest_usb22,
- msm_mux_reserved75,
msm_mux_phase_flag9,
msm_mux_atest_usb21,
- msm_mux_reserved76,
msm_mux_phase_flag4,
msm_mux_atest_usb20,
- msm_mux_reserved77,
- msm_mux_reserved78,
msm_mux_sec_mi2s,
- msm_mux_reserved79,
- msm_mux_reserved80,
msm_mux_qup15,
- msm_mux_reserved81,
- msm_mux_reserved82,
- msm_mux_reserved83,
- msm_mux_reserved84,
msm_mux_qup5,
- msm_mux_reserved85,
- msm_mux_reserved86,
- msm_mux_reserved87,
- msm_mux_reserved88,
msm_mux_tsif1_clk,
msm_mux_qup4,
msm_mux_tgu_ch3,
msm_mux_phase_flag10,
- msm_mux_reserved89,
msm_mux_tsif1_en,
msm_mux_mdp_vsync0,
msm_mux_mdp_vsync1,
@@ -739,32 +534,552 @@
msm_mux_mdp_vsync3,
msm_mux_tgu_ch0,
msm_mux_phase_flag0,
- msm_mux_reserved90,
msm_mux_tsif1_data,
msm_mux_sdc4_cmd,
msm_mux_tgu_ch1,
- msm_mux_reserved91,
msm_mux_tsif2_error,
msm_mux_sdc43,
msm_mux_vfr_1,
msm_mux_tgu_ch2,
- msm_mux_reserved92,
msm_mux_tsif2_clk,
msm_mux_sdc4_clk,
msm_mux_qup7,
- msm_mux_reserved93,
msm_mux_tsif2_en,
msm_mux_sdc42,
- msm_mux_reserved94,
msm_mux_tsif2_data,
msm_mux_sdc41,
- msm_mux_reserved95,
msm_mux_tsif2_sync,
msm_mux_sdc40,
msm_mux_phase_flag3,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_pci_e1,
+ msm_mux_prng_rosc,
+ msm_mux_phase_flag5,
+ msm_mux_uim2_data,
+ msm_mux_qup13,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim_batt,
+ msm_mux_edp_hot,
+ msm_mux_nav_pps,
+ msm_mux_atest_char,
+ msm_mux_adsp_ext,
+ msm_mux_atest_char3,
+ msm_mux_atest_char2,
+ msm_mux_atest_char1,
+ msm_mux_atest_char0,
+ msm_mux_qlink_request,
+ msm_mux_qlink_enable,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_mss_lte,
+ msm_mux_qup0,
+ msm_mux_gpio,
+ msm_mux_qup9,
+ msm_mux_qdss_cti,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_bist,
+ msm_mux_atest_tsens2,
+ msm_mux_vsense_trigger,
+ msm_mux_atest_usb1,
+ msm_mux_qup_l4,
+ msm_mux_wlan1_adc1,
+ msm_mux_atest_usb13,
+ msm_mux_ddr_pxi1,
+ msm_mux_qup_l5,
+ msm_mux_wlan1_adc0,
+ msm_mux_atest_usb12,
+ msm_mux_mdp_vsync,
+ msm_mux_qup_l6,
+ msm_mux_wlan2_adc1,
+ msm_mux_atest_usb11,
+ msm_mux_ddr_pxi2,
+ msm_mux_edp_lcd,
+ msm_mux_dbg_out,
+ msm_mux_wlan2_adc0,
+ msm_mux_atest_usb10,
+ msm_mux_m_voc,
+ msm_mux_tsif1_sync,
msm_mux_NA,
};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio12", "gpio13",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio13",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio13", "gpio117",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio14",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio14", "gpio118",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio15", "gpio119",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio16", "gpio120",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qup1_groups[] = {
+ "gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio17", "gpio121",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio18", "gpio122",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio19", "gpio41",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio20", "gpio42",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio21", "gpio58",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio21", "gpio75",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio22",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio22", "gpio59",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio22", "gpio30", "gpio123", "gpio124",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio23",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio23", "gpio76",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio24",
+};
+static const char * const cci_async_groups[] = {
+ "gpio24", "gpio25", "gpio26",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio24", "gpio77",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio25",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio25", "gpio79",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio26", "gpio80",
+};
+static const char * const qup2_groups[] = {
+ "gpio27", "gpio28", "gpio29", "gpio30",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio27", "gpio93",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio28", "gpio43",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio29",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio29", "gpio44",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio30",
+};
+static const char * const qup11_groups[] = {
+ "gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const qup14_groups[] = {
+ "gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const pci_e0_groups[] = {
+ "gpio35", "gpio36",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio35",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio36",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio36",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio37",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio38",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio39", "gpio70", "gpio71", "gpio72",
+};
+static const char * const sd_write_groups[] = {
+ "gpio40",
+};
+static const char * const tsif1_error_groups[] = {
+ "gpio40",
+};
+static const char * const qup3_groups[] = {
+ "gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const qup6_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const qup12_groups[] = {
+ "gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio52",
+};
+static const char * const qup10_groups[] = {
+ "gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio54",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio55",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio57", "gpio78",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio58",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio59",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio60",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio61",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio61",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio62",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio63",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio64", "gpio65", "gpio67", "gpio68",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio64",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio64",
+};
+static const char * const qup8_groups[] = {
+ "gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio66",
+};
+static const char * const spkr_i2s_groups[] = {
+ "gpio69", "gpio70", "gpio71", "gpio72",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio69",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio71",
+};
+static const char * const btfm_slimbus_groups[] = {
+ "gpio73", "gpio74",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio73",
+};
+static const char * const ter_mi2s_groups[] = {
+ "gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio74",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio74",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio75",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio75",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio76",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio76",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio77",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio77",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const qup15_groups[] = {
+ "gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const qup5_groups[] = {
+ "gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const tsif1_clk_groups[] = {
+ "gpio89",
+};
+static const char * const qup4_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio92",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio89",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio89",
+};
+static const char * const tsif1_en_groups[] = {
+ "gpio90",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio90",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio90",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio90",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio90",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio90",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio90",
+};
+static const char * const tsif1_data_groups[] = {
+ "gpio91",
+};
+static const char * const sdc4_cmd_groups[] = {
+ "gpio91",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio91",
+};
+static const char * const tsif2_error_groups[] = {
+ "gpio92",
+};
+static const char * const sdc43_groups[] = {
+ "gpio92",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio92",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio92",
+};
+static const char * const tsif2_clk_groups[] = {
+ "gpio93",
+};
+static const char * const sdc4_clk_groups[] = {
+ "gpio93",
+};
+static const char * const qup7_groups[] = {
+ "gpio93", "gpio94", "gpio95", "gpio96",
+};
+static const char * const tsif2_en_groups[] = {
+ "gpio94",
+};
+static const char * const sdc42_groups[] = {
+ "gpio94",
+};
+static const char * const tsif2_data_groups[] = {
+ "gpio95",
+};
+static const char * const sdc41_groups[] = {
+ "gpio95",
+};
+static const char * const tsif2_sync_groups[] = {
+ "gpio96",
+};
+static const char * const sdc40_groups[] = {
+ "gpio96",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio96",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio97",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio98",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio99",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio100",
+};
+static const char * const pci_e1_groups[] = {
+ "gpio102", "gpio103",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio102",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio103",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio105",
+};
+static const char * const qup13_groups[] = {
+ "gpio105", "gpio106", "gpio107", "gpio108",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio106",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio107",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio108",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio109",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio110",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio111",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio112",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio113",
+};
+static const char * const edp_hot_groups[] = {
+ "gpio113",
+};
+static const char * const nav_pps_groups[] = {
+ "gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
+ "gpio129", "gpio129", "gpio143", "gpio143",
+};
+static const char * const atest_char_groups[] = {
+ "gpio117",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio118",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio118",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio119",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio120",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio121",
+};
+static const char * const qlink_request_groups[] = {
+ "gpio130",
+};
+static const char * const qlink_enable_groups[] = {
+ "gpio131",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio135",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio143",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio144", "gpio145",
+};
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
static const char * const gpio_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
@@ -790,21 +1105,6 @@
"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
"gpio147", "gpio148", "gpio149",
};
-static const char * const qup0_groups[] = {
- "gpio0", "gpio1", "gpio2", "gpio3",
-};
-static const char * const reserved0_groups[] = {
- "gpio0",
-};
-static const char * const reserved1_groups[] = {
- "gpio1",
-};
-static const char * const reserved2_groups[] = {
- "gpio2",
-};
-static const char * const reserved3_groups[] = {
- "gpio3",
-};
static const char * const qup9_groups[] = {
"gpio4", "gpio5", "gpio6", "gpio7",
};
@@ -812,18 +1112,9 @@
"gpio4", "gpio5", "gpio51", "gpio52", "gpio62", "gpio63", "gpio90",
"gpio91",
};
-static const char * const reserved4_groups[] = {
- "gpio4",
-};
-static const char * const reserved5_groups[] = {
- "gpio5",
-};
static const char * const ddr_pxi0_groups[] = {
"gpio6", "gpio7",
};
-static const char * const reserved6_groups[] = {
- "gpio6",
-};
static const char * const ddr_bist_groups[] = {
"gpio7", "gpio8", "gpio9", "gpio10",
};
@@ -836,9 +1127,6 @@
static const char * const atest_usb1_groups[] = {
"gpio7",
};
-static const char * const reserved7_groups[] = {
- "gpio7",
-};
static const char * const qup_l4_groups[] = {
"gpio8", "gpio35", "gpio105", "gpio123",
};
@@ -851,9 +1139,6 @@
static const char * const ddr_pxi1_groups[] = {
"gpio8", "gpio9",
};
-static const char * const reserved8_groups[] = {
- "gpio8",
-};
static const char * const qup_l5_groups[] = {
"gpio9", "gpio36", "gpio106", "gpio124",
};
@@ -863,9 +1148,6 @@
static const char * const atest_usb12_groups[] = {
"gpio9",
};
-static const char * const reserved9_groups[] = {
- "gpio9",
-};
static const char * const mdp_vsync_groups[] = {
"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
};
@@ -881,9 +1163,6 @@
static const char * const ddr_pxi2_groups[] = {
"gpio10", "gpio11",
};
-static const char * const reserved10_groups[] = {
- "gpio10",
-};
static const char * const edp_lcd_groups[] = {
"gpio11",
};
@@ -896,1191 +1175,107 @@
static const char * const atest_usb10_groups[] = {
"gpio11",
};
-static const char * const reserved11_groups[] = {
- "gpio11",
-};
static const char * const m_voc_groups[] = {
"gpio12",
};
static const char * const tsif1_sync_groups[] = {
"gpio12",
};
-static const char * const ddr_pxi3_groups[] = {
- "gpio12", "gpio13",
-};
-static const char * const reserved12_groups[] = {
- "gpio12",
-};
-static const char * const cam_mclk_groups[] = {
- "gpio13", "gpio14", "gpio15", "gpio16",
-};
-static const char * const pll_bypassnl_groups[] = {
- "gpio13",
-};
-static const char * const qdss_gpio0_groups[] = {
- "gpio13", "gpio117",
-};
-static const char * const reserved13_groups[] = {
- "gpio13",
-};
-static const char * const pll_reset_groups[] = {
- "gpio14",
-};
-static const char * const qdss_gpio1_groups[] = {
- "gpio14", "gpio118",
-};
-static const char * const reserved14_groups[] = {
- "gpio14",
-};
-static const char * const qdss_gpio2_groups[] = {
- "gpio15", "gpio119",
-};
-static const char * const reserved15_groups[] = {
- "gpio15",
-};
-static const char * const qdss_gpio3_groups[] = {
- "gpio16", "gpio120",
-};
-static const char * const reserved16_groups[] = {
- "gpio16",
-};
-static const char * const cci_i2c_groups[] = {
- "gpio17", "gpio18", "gpio19", "gpio20",
-};
-static const char * const qup1_groups[] = {
- "gpio17", "gpio18", "gpio19", "gpio20",
-};
-static const char * const qdss_gpio4_groups[] = {
- "gpio17", "gpio121",
-};
-static const char * const reserved17_groups[] = {
- "gpio17",
-};
-static const char * const qdss_gpio5_groups[] = {
- "gpio18", "gpio122",
-};
-static const char * const reserved18_groups[] = {
- "gpio18",
-};
-static const char * const qdss_gpio6_groups[] = {
- "gpio19", "gpio41",
-};
-static const char * const reserved19_groups[] = {
- "gpio19",
-};
-static const char * const qdss_gpio7_groups[] = {
- "gpio20", "gpio42",
-};
-static const char * const reserved20_groups[] = {
- "gpio20",
-};
-static const char * const cci_timer0_groups[] = {
- "gpio21",
-};
-static const char * const gcc_gp2_groups[] = {
- "gpio21", "gpio58",
-};
-static const char * const qdss_gpio8_groups[] = {
- "gpio21", "gpio75",
-};
-static const char * const reserved21_groups[] = {
- "gpio21",
-};
-static const char * const cci_timer1_groups[] = {
- "gpio22",
-};
-static const char * const gcc_gp3_groups[] = {
- "gpio22", "gpio59",
-};
-static const char * const qdss_gpio_groups[] = {
- "gpio22", "gpio30", "gpio123", "gpio124",
-};
-static const char * const reserved22_groups[] = {
- "gpio22",
-};
-static const char * const cci_timer2_groups[] = {
- "gpio23",
-};
-static const char * const qdss_gpio9_groups[] = {
- "gpio23", "gpio76",
-};
-static const char * const reserved23_groups[] = {
- "gpio23",
-};
-static const char * const cci_timer3_groups[] = {
- "gpio24",
-};
-static const char * const cci_async_groups[] = {
- "gpio24", "gpio25", "gpio26",
-};
-static const char * const qdss_gpio10_groups[] = {
- "gpio24", "gpio77",
-};
-static const char * const reserved24_groups[] = {
- "gpio24",
-};
-static const char * const cci_timer4_groups[] = {
- "gpio25",
-};
-static const char * const qdss_gpio11_groups[] = {
- "gpio25", "gpio79",
-};
-static const char * const reserved25_groups[] = {
- "gpio25",
-};
-static const char * const qdss_gpio12_groups[] = {
- "gpio26", "gpio80",
-};
-static const char * const reserved26_groups[] = {
- "gpio26",
-};
-static const char * const qup2_groups[] = {
- "gpio27", "gpio28", "gpio29", "gpio30",
-};
-static const char * const qdss_gpio13_groups[] = {
- "gpio27", "gpio93",
-};
-static const char * const reserved27_groups[] = {
- "gpio27",
-};
-static const char * const qdss_gpio14_groups[] = {
- "gpio28", "gpio43",
-};
-static const char * const reserved28_groups[] = {
- "gpio28",
-};
-static const char * const phase_flag1_groups[] = {
- "gpio29",
-};
-static const char * const qdss_gpio15_groups[] = {
- "gpio29", "gpio44",
-};
-static const char * const reserved29_groups[] = {
- "gpio29",
-};
-static const char * const phase_flag2_groups[] = {
- "gpio30",
-};
-static const char * const reserved30_groups[] = {
- "gpio30",
-};
-static const char * const qup11_groups[] = {
- "gpio31", "gpio32", "gpio33", "gpio34",
-};
-static const char * const qup14_groups[] = {
- "gpio31", "gpio32", "gpio33", "gpio34",
-};
-static const char * const reserved96_groups[] = {
- "gpio96",
-};
-static const char * const ldo_en_groups[] = {
- "gpio97",
-};
-static const char * const reserved97_groups[] = {
- "gpio97",
-};
-static const char * const ldo_update_groups[] = {
- "gpio98",
-};
-static const char * const reserved98_groups[] = {
- "gpio98",
-};
-static const char * const phase_flag14_groups[] = {
- "gpio99",
-};
-static const char * const reserved99_groups[] = {
- "gpio99",
-};
-static const char * const phase_flag15_groups[] = {
- "gpio100",
-};
-static const char * const reserved100_groups[] = {
- "gpio100",
-};
-static const char * const reserved101_groups[] = {
- "gpio101",
-};
-static const char * const pci_e1_groups[] = {
- "gpio102", "gpio103",
-};
-static const char * const prng_rosc_groups[] = {
- "gpio102",
-};
-static const char * const reserved102_groups[] = {
- "gpio102",
-};
-static const char * const phase_flag5_groups[] = {
- "gpio103",
-};
-static const char * const reserved103_groups[] = {
- "gpio103",
-};
-static const char * const reserved104_groups[] = {
- "gpio104",
-};
-static const char * const uim2_data_groups[] = {
- "gpio105",
-};
-static const char * const qup13_groups[] = {
- "gpio105", "gpio106", "gpio107", "gpio108",
-};
-static const char * const reserved105_groups[] = {
- "gpio105",
-};
-static const char * const uim2_clk_groups[] = {
- "gpio106",
-};
-static const char * const reserved106_groups[] = {
- "gpio106",
-};
-static const char * const uim2_reset_groups[] = {
- "gpio107",
-};
-static const char * const reserved107_groups[] = {
- "gpio107",
-};
-static const char * const uim2_present_groups[] = {
- "gpio108",
-};
-static const char * const reserved108_groups[] = {
- "gpio108",
-};
-static const char * const uim1_data_groups[] = {
- "gpio109",
-};
-static const char * const reserved109_groups[] = {
- "gpio109",
-};
-static const char * const uim1_clk_groups[] = {
- "gpio110",
-};
-static const char * const reserved110_groups[] = {
- "gpio110",
-};
-static const char * const uim1_reset_groups[] = {
- "gpio111",
-};
-static const char * const reserved111_groups[] = {
- "gpio111",
-};
-static const char * const uim1_present_groups[] = {
- "gpio112",
-};
-static const char * const reserved112_groups[] = {
- "gpio112",
-};
-static const char * const uim_batt_groups[] = {
- "gpio113",
-};
-static const char * const edp_hot_groups[] = {
- "gpio113",
-};
-static const char * const reserved113_groups[] = {
- "gpio113",
-};
-static const char * const nav_pps_groups[] = {
- "gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
- "gpio129", "gpio129", "gpio143", "gpio143",
-};
-static const char * const reserved114_groups[] = {
- "gpio114",
-};
-static const char * const reserved115_groups[] = {
- "gpio115",
-};
-static const char * const reserved116_groups[] = {
- "gpio116",
-};
-static const char * const atest_char_groups[] = {
- "gpio117",
-};
-static const char * const reserved117_groups[] = {
- "gpio117",
-};
-static const char * const adsp_ext_groups[] = {
- "gpio118",
-};
-static const char * const atest_char3_groups[] = {
- "gpio118",
-};
-static const char * const reserved118_groups[] = {
- "gpio118",
-};
-static const char * const atest_char2_groups[] = {
- "gpio119",
-};
-static const char * const reserved119_groups[] = {
- "gpio119",
-};
-static const char * const atest_char1_groups[] = {
- "gpio120",
-};
-static const char * const reserved120_groups[] = {
- "gpio120",
-};
-static const char * const atest_char0_groups[] = {
- "gpio121",
-};
-static const char * const reserved121_groups[] = {
- "gpio121",
-};
-static const char * const reserved122_groups[] = {
- "gpio122",
-};
-static const char * const reserved123_groups[] = {
- "gpio123",
-};
-static const char * const reserved124_groups[] = {
- "gpio124",
-};
-static const char * const reserved125_groups[] = {
- "gpio125",
-};
-static const char * const reserved126_groups[] = {
- "gpio126",
-};
-static const char * const reserved127_groups[] = {
- "gpio127",
-};
-static const char * const reserved128_groups[] = {
- "gpio128",
-};
-static const char * const reserved129_groups[] = {
- "gpio129",
-};
-static const char * const qlink_request_groups[] = {
- "gpio130",
-};
-static const char * const reserved130_groups[] = {
- "gpio130",
-};
-static const char * const qlink_enable_groups[] = {
- "gpio131",
-};
-static const char * const reserved131_groups[] = {
- "gpio131",
-};
-static const char * const reserved132_groups[] = {
- "gpio132",
-};
-static const char * const reserved133_groups[] = {
- "gpio133",
-};
-static const char * const reserved134_groups[] = {
- "gpio134",
-};
-static const char * const pa_indicator_groups[] = {
- "gpio135",
-};
-static const char * const reserved135_groups[] = {
- "gpio135",
-};
-static const char * const reserved136_groups[] = {
- "gpio136",
-};
-static const char * const phase_flag26_groups[] = {
- "gpio137",
-};
-static const char * const reserved137_groups[] = {
- "gpio137",
-};
-static const char * const phase_flag27_groups[] = {
- "gpio138",
-};
-static const char * const reserved138_groups[] = {
- "gpio138",
-};
-static const char * const phase_flag28_groups[] = {
- "gpio139",
-};
-static const char * const reserved139_groups[] = {
- "gpio139",
-};
-static const char * const phase_flag6_groups[] = {
- "gpio140",
-};
-static const char * const reserved140_groups[] = {
- "gpio140",
-};
-static const char * const phase_flag29_groups[] = {
- "gpio141",
-};
-static const char * const reserved141_groups[] = {
- "gpio141",
-};
-static const char * const phase_flag30_groups[] = {
- "gpio142",
-};
-static const char * const reserved142_groups[] = {
- "gpio142",
-};
-static const char * const phase_flag31_groups[] = {
- "gpio143",
-};
-static const char * const reserved143_groups[] = {
- "gpio143",
-};
-static const char * const mss_lte_groups[] = {
- "gpio144", "gpio145",
-};
-static const char * const reserved144_groups[] = {
- "gpio144",
-};
-static const char * const reserved145_groups[] = {
- "gpio145",
-};
-static const char * const reserved146_groups[] = {
- "gpio146",
-};
-static const char * const reserved147_groups[] = {
- "gpio147",
-};
-static const char * const reserved148_groups[] = {
- "gpio148",
-};
-static const char * const reserved149_groups[] = {
- "gpio149", "gpio149",
-};
-static const char * const reserved31_groups[] = {
- "gpio31",
-};
-static const char * const reserved32_groups[] = {
- "gpio32",
-};
-static const char * const reserved33_groups[] = {
- "gpio33",
-};
-static const char * const reserved34_groups[] = {
- "gpio34",
-};
-static const char * const pci_e0_groups[] = {
- "gpio35", "gpio36",
-};
-static const char * const jitter_bist_groups[] = {
- "gpio35",
-};
-static const char * const reserved35_groups[] = {
- "gpio35",
-};
-static const char * const pll_bist_groups[] = {
- "gpio36",
-};
-static const char * const atest_tsens_groups[] = {
- "gpio36",
-};
-static const char * const reserved36_groups[] = {
- "gpio36",
-};
-static const char * const agera_pll_groups[] = {
- "gpio37",
-};
-static const char * const reserved37_groups[] = {
- "gpio37",
-};
-static const char * const usb_phy_groups[] = {
- "gpio38",
-};
-static const char * const reserved38_groups[] = {
- "gpio38",
-};
-static const char * const lpass_slimbus_groups[] = {
- "gpio39", "gpio70", "gpio71", "gpio72",
-};
-static const char * const reserved39_groups[] = {
- "gpio39",
-};
-static const char * const sd_write_groups[] = {
- "gpio40",
-};
-static const char * const tsif1_error_groups[] = {
- "gpio40",
-};
-static const char * const reserved40_groups[] = {
- "gpio40",
-};
-static const char * const qup3_groups[] = {
- "gpio41", "gpio42", "gpio43", "gpio44",
-};
-static const char * const reserved41_groups[] = {
- "gpio41",
-};
-static const char * const reserved42_groups[] = {
- "gpio42",
-};
-static const char * const reserved43_groups[] = {
- "gpio43",
-};
-static const char * const reserved44_groups[] = {
- "gpio44",
-};
-static const char * const qup6_groups[] = {
- "gpio45", "gpio46", "gpio47", "gpio48",
-};
-static const char * const reserved45_groups[] = {
- "gpio45",
-};
-static const char * const reserved46_groups[] = {
- "gpio46",
-};
-static const char * const reserved47_groups[] = {
- "gpio47",
-};
-static const char * const reserved48_groups[] = {
- "gpio48",
-};
-static const char * const qup12_groups[] = {
- "gpio49", "gpio50", "gpio51", "gpio52",
-};
-static const char * const reserved49_groups[] = {
- "gpio49",
-};
-static const char * const reserved50_groups[] = {
- "gpio50",
-};
-static const char * const reserved51_groups[] = {
- "gpio51",
-};
-static const char * const phase_flag16_groups[] = {
- "gpio52",
-};
-static const char * const reserved52_groups[] = {
- "gpio52",
-};
-static const char * const qup10_groups[] = {
- "gpio53", "gpio54", "gpio55", "gpio56",
-};
-static const char * const phase_flag11_groups[] = {
- "gpio53",
-};
-static const char * const reserved53_groups[] = {
- "gpio53",
-};
-static const char * const phase_flag12_groups[] = {
- "gpio54",
-};
-static const char * const reserved54_groups[] = {
- "gpio54",
-};
-static const char * const phase_flag13_groups[] = {
- "gpio55",
-};
-static const char * const reserved55_groups[] = {
- "gpio55",
-};
-static const char * const phase_flag17_groups[] = {
- "gpio56",
-};
-static const char * const reserved56_groups[] = {
- "gpio56",
-};
-static const char * const qua_mi2s_groups[] = {
- "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
-};
-static const char * const gcc_gp1_groups[] = {
- "gpio57", "gpio78",
-};
-static const char * const phase_flag18_groups[] = {
- "gpio57",
-};
-static const char * const reserved57_groups[] = {
- "gpio57",
-};
-static const char * const phase_flag19_groups[] = {
- "gpio58",
-};
-static const char * const reserved58_groups[] = {
- "gpio58",
-};
-static const char * const phase_flag20_groups[] = {
- "gpio59",
-};
-static const char * const reserved59_groups[] = {
- "gpio59",
-};
-static const char * const cri_trng0_groups[] = {
- "gpio60",
-};
-static const char * const phase_flag21_groups[] = {
- "gpio60",
-};
-static const char * const reserved60_groups[] = {
- "gpio60",
-};
-static const char * const cri_trng1_groups[] = {
- "gpio61",
-};
-static const char * const phase_flag22_groups[] = {
- "gpio61",
-};
-static const char * const reserved61_groups[] = {
- "gpio61",
-};
-static const char * const cri_trng_groups[] = {
- "gpio62",
-};
-static const char * const phase_flag23_groups[] = {
- "gpio62",
-};
-static const char * const reserved62_groups[] = {
- "gpio62",
-};
-static const char * const phase_flag24_groups[] = {
- "gpio63",
-};
-static const char * const reserved63_groups[] = {
- "gpio63",
-};
-static const char * const pri_mi2s_groups[] = {
- "gpio64", "gpio65", "gpio67", "gpio68",
-};
-static const char * const sp_cmu_groups[] = {
- "gpio64",
-};
-static const char * const phase_flag25_groups[] = {
- "gpio64",
-};
-static const char * const reserved64_groups[] = {
- "gpio64",
-};
-static const char * const qup8_groups[] = {
- "gpio65", "gpio66", "gpio67", "gpio68",
-};
-static const char * const reserved65_groups[] = {
- "gpio65",
-};
-static const char * const pri_mi2s_ws_groups[] = {
- "gpio66",
-};
-static const char * const reserved66_groups[] = {
- "gpio66",
-};
-static const char * const reserved67_groups[] = {
- "gpio67",
-};
-static const char * const reserved68_groups[] = {
- "gpio68",
-};
-static const char * const spkr_i2s_groups[] = {
- "gpio69", "gpio70", "gpio71", "gpio72",
-};
-static const char * const audio_ref_groups[] = {
- "gpio69",
-};
-static const char * const reserved69_groups[] = {
- "gpio69",
-};
-static const char * const reserved70_groups[] = {
- "gpio70",
-};
-static const char * const tsense_pwm1_groups[] = {
- "gpio71",
-};
-static const char * const tsense_pwm2_groups[] = {
- "gpio71",
-};
-static const char * const reserved71_groups[] = {
- "gpio71",
-};
-static const char * const reserved72_groups[] = {
- "gpio72",
-};
-static const char * const btfm_slimbus_groups[] = {
- "gpio73", "gpio74",
-};
-static const char * const atest_usb2_groups[] = {
- "gpio73",
-};
-static const char * const reserved73_groups[] = {
- "gpio73",
-};
-static const char * const ter_mi2s_groups[] = {
- "gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
-};
-static const char * const phase_flag7_groups[] = {
- "gpio74",
-};
-static const char * const atest_usb23_groups[] = {
- "gpio74",
-};
-static const char * const reserved74_groups[] = {
- "gpio74",
-};
-static const char * const phase_flag8_groups[] = {
- "gpio75",
-};
-static const char * const atest_usb22_groups[] = {
- "gpio75",
-};
-static const char * const reserved75_groups[] = {
- "gpio75",
-};
-static const char * const phase_flag9_groups[] = {
- "gpio76",
-};
-static const char * const atest_usb21_groups[] = {
- "gpio76",
-};
-static const char * const reserved76_groups[] = {
- "gpio76",
-};
-static const char * const phase_flag4_groups[] = {
- "gpio77",
-};
-static const char * const atest_usb20_groups[] = {
- "gpio77",
-};
-static const char * const reserved77_groups[] = {
- "gpio77",
-};
-static const char * const reserved78_groups[] = {
- "gpio78",
-};
-static const char * const sec_mi2s_groups[] = {
- "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
-};
-static const char * const reserved79_groups[] = {
- "gpio79",
-};
-static const char * const reserved80_groups[] = {
- "gpio80",
-};
-static const char * const qup15_groups[] = {
- "gpio81", "gpio82", "gpio83", "gpio84",
-};
-static const char * const reserved81_groups[] = {
- "gpio81",
-};
-static const char * const reserved82_groups[] = {
- "gpio82",
-};
-static const char * const reserved83_groups[] = {
- "gpio83",
-};
-static const char * const reserved84_groups[] = {
- "gpio84",
-};
-static const char * const qup5_groups[] = {
- "gpio85", "gpio86", "gpio87", "gpio88",
-};
-static const char * const reserved85_groups[] = {
- "gpio85",
-};
-static const char * const reserved86_groups[] = {
- "gpio86",
-};
-static const char * const reserved87_groups[] = {
- "gpio87",
-};
-static const char * const reserved88_groups[] = {
- "gpio88",
-};
-static const char * const tsif1_clk_groups[] = {
- "gpio89",
-};
-static const char * const qup4_groups[] = {
- "gpio89", "gpio90", "gpio91", "gpio92",
-};
-static const char * const tgu_ch3_groups[] = {
- "gpio89",
-};
-static const char * const phase_flag10_groups[] = {
- "gpio89",
-};
-static const char * const reserved89_groups[] = {
- "gpio89",
-};
-static const char * const tsif1_en_groups[] = {
- "gpio90",
-};
-static const char * const mdp_vsync0_groups[] = {
- "gpio90",
-};
-static const char * const mdp_vsync1_groups[] = {
- "gpio90",
-};
-static const char * const mdp_vsync2_groups[] = {
- "gpio90",
-};
-static const char * const mdp_vsync3_groups[] = {
- "gpio90",
-};
-static const char * const tgu_ch0_groups[] = {
- "gpio90",
-};
-static const char * const phase_flag0_groups[] = {
- "gpio90",
-};
-static const char * const reserved90_groups[] = {
- "gpio90",
-};
-static const char * const tsif1_data_groups[] = {
- "gpio91",
-};
-static const char * const sdc4_cmd_groups[] = {
- "gpio91",
-};
-static const char * const tgu_ch1_groups[] = {
- "gpio91",
-};
-static const char * const reserved91_groups[] = {
- "gpio91",
-};
-static const char * const tsif2_error_groups[] = {
- "gpio92",
-};
-static const char * const sdc43_groups[] = {
- "gpio92",
-};
-static const char * const vfr_1_groups[] = {
- "gpio92",
-};
-static const char * const tgu_ch2_groups[] = {
- "gpio92",
-};
-static const char * const reserved92_groups[] = {
- "gpio92",
-};
-static const char * const tsif2_clk_groups[] = {
- "gpio93",
-};
-static const char * const sdc4_clk_groups[] = {
- "gpio93",
-};
-static const char * const qup7_groups[] = {
- "gpio93", "gpio94", "gpio95", "gpio96",
-};
-static const char * const reserved93_groups[] = {
- "gpio93",
-};
-static const char * const tsif2_en_groups[] = {
- "gpio94",
-};
-static const char * const sdc42_groups[] = {
- "gpio94",
-};
-static const char * const reserved94_groups[] = {
- "gpio94",
-};
-static const char * const tsif2_data_groups[] = {
- "gpio95",
-};
-static const char * const sdc41_groups[] = {
- "gpio95",
-};
-static const char * const reserved95_groups[] = {
- "gpio95",
-};
-static const char * const tsif2_sync_groups[] = {
- "gpio96",
-};
-static const char * const sdc40_groups[] = {
- "gpio96",
-};
-static const char * const phase_flag3_groups[] = {
- "gpio96",
-};
static const struct msm_function sdm845_functions[] = {
- FUNCTION(gpio),
- FUNCTION(qup0),
- FUNCTION(reserved0),
- FUNCTION(reserved1),
- FUNCTION(reserved2),
- FUNCTION(reserved3),
- FUNCTION(qup9),
- FUNCTION(qdss_cti),
- FUNCTION(reserved4),
- FUNCTION(reserved5),
- FUNCTION(ddr_pxi0),
- FUNCTION(reserved6),
- FUNCTION(ddr_bist),
- FUNCTION(atest_tsens2),
- FUNCTION(vsense_trigger),
- FUNCTION(atest_usb1),
- FUNCTION(reserved7),
- FUNCTION(qup_l4),
- FUNCTION(wlan1_adc1),
- FUNCTION(atest_usb13),
- FUNCTION(ddr_pxi1),
- FUNCTION(reserved8),
- FUNCTION(qup_l5),
- FUNCTION(wlan1_adc0),
- FUNCTION(atest_usb12),
- FUNCTION(reserved9),
- FUNCTION(mdp_vsync),
- FUNCTION(qup_l6),
- FUNCTION(wlan2_adc1),
- FUNCTION(atest_usb11),
- FUNCTION(ddr_pxi2),
- FUNCTION(reserved10),
- FUNCTION(edp_lcd),
- FUNCTION(dbg_out),
- FUNCTION(wlan2_adc0),
- FUNCTION(atest_usb10),
- FUNCTION(reserved11),
- FUNCTION(m_voc),
- FUNCTION(tsif1_sync),
FUNCTION(ddr_pxi3),
- FUNCTION(reserved12),
FUNCTION(cam_mclk),
FUNCTION(pll_bypassnl),
FUNCTION(qdss_gpio0),
- FUNCTION(reserved13),
FUNCTION(pll_reset),
FUNCTION(qdss_gpio1),
- FUNCTION(reserved14),
FUNCTION(qdss_gpio2),
- FUNCTION(reserved15),
FUNCTION(qdss_gpio3),
- FUNCTION(reserved16),
FUNCTION(cci_i2c),
FUNCTION(qup1),
FUNCTION(qdss_gpio4),
- FUNCTION(reserved17),
FUNCTION(qdss_gpio5),
- FUNCTION(reserved18),
FUNCTION(qdss_gpio6),
- FUNCTION(reserved19),
FUNCTION(qdss_gpio7),
- FUNCTION(reserved20),
FUNCTION(cci_timer0),
FUNCTION(gcc_gp2),
FUNCTION(qdss_gpio8),
- FUNCTION(reserved21),
FUNCTION(cci_timer1),
FUNCTION(gcc_gp3),
FUNCTION(qdss_gpio),
- FUNCTION(reserved22),
FUNCTION(cci_timer2),
FUNCTION(qdss_gpio9),
- FUNCTION(reserved23),
FUNCTION(cci_timer3),
FUNCTION(cci_async),
FUNCTION(qdss_gpio10),
- FUNCTION(reserved24),
FUNCTION(cci_timer4),
FUNCTION(qdss_gpio11),
- FUNCTION(reserved25),
FUNCTION(qdss_gpio12),
- FUNCTION(reserved26),
FUNCTION(qup2),
FUNCTION(qdss_gpio13),
- FUNCTION(reserved27),
FUNCTION(qdss_gpio14),
- FUNCTION(reserved28),
FUNCTION(phase_flag1),
FUNCTION(qdss_gpio15),
- FUNCTION(reserved29),
FUNCTION(phase_flag2),
- FUNCTION(reserved30),
FUNCTION(qup11),
FUNCTION(qup14),
- FUNCTION(reserved96),
- FUNCTION(ldo_en),
- FUNCTION(reserved97),
- FUNCTION(ldo_update),
- FUNCTION(reserved98),
- FUNCTION(phase_flag14),
- FUNCTION(reserved99),
- FUNCTION(phase_flag15),
- FUNCTION(reserved100),
- FUNCTION(reserved101),
- FUNCTION(pci_e1),
- FUNCTION(prng_rosc),
- FUNCTION(reserved102),
- FUNCTION(phase_flag5),
- FUNCTION(reserved103),
- FUNCTION(reserved104),
- FUNCTION(uim2_data),
- FUNCTION(qup13),
- FUNCTION(reserved105),
- FUNCTION(uim2_clk),
- FUNCTION(reserved106),
- FUNCTION(uim2_reset),
- FUNCTION(reserved107),
- FUNCTION(uim2_present),
- FUNCTION(reserved108),
- FUNCTION(uim1_data),
- FUNCTION(reserved109),
- FUNCTION(uim1_clk),
- FUNCTION(reserved110),
- FUNCTION(uim1_reset),
- FUNCTION(reserved111),
- FUNCTION(uim1_present),
- FUNCTION(reserved112),
- FUNCTION(uim_batt),
- FUNCTION(edp_hot),
- FUNCTION(reserved113),
- FUNCTION(nav_pps),
- FUNCTION(reserved114),
- FUNCTION(reserved115),
- FUNCTION(reserved116),
- FUNCTION(atest_char),
- FUNCTION(reserved117),
- FUNCTION(adsp_ext),
- FUNCTION(atest_char3),
- FUNCTION(reserved118),
- FUNCTION(atest_char2),
- FUNCTION(reserved119),
- FUNCTION(atest_char1),
- FUNCTION(reserved120),
- FUNCTION(atest_char0),
- FUNCTION(reserved121),
- FUNCTION(reserved122),
- FUNCTION(reserved123),
- FUNCTION(reserved124),
- FUNCTION(reserved125),
- FUNCTION(reserved126),
- FUNCTION(reserved127),
- FUNCTION(reserved128),
- FUNCTION(reserved129),
- FUNCTION(qlink_request),
- FUNCTION(reserved130),
- FUNCTION(qlink_enable),
- FUNCTION(reserved131),
- FUNCTION(reserved132),
- FUNCTION(reserved133),
- FUNCTION(reserved134),
- FUNCTION(pa_indicator),
- FUNCTION(reserved135),
- FUNCTION(reserved136),
- FUNCTION(phase_flag26),
- FUNCTION(reserved137),
- FUNCTION(phase_flag27),
- FUNCTION(reserved138),
- FUNCTION(phase_flag28),
- FUNCTION(reserved139),
- FUNCTION(phase_flag6),
- FUNCTION(reserved140),
- FUNCTION(phase_flag29),
- FUNCTION(reserved141),
- FUNCTION(phase_flag30),
- FUNCTION(reserved142),
- FUNCTION(phase_flag31),
- FUNCTION(reserved143),
- FUNCTION(mss_lte),
- FUNCTION(reserved144),
- FUNCTION(reserved145),
- FUNCTION(reserved146),
- FUNCTION(reserved147),
- FUNCTION(reserved148),
- FUNCTION(reserved149),
- FUNCTION(reserved31),
- FUNCTION(reserved32),
- FUNCTION(reserved33),
- FUNCTION(reserved34),
FUNCTION(pci_e0),
FUNCTION(jitter_bist),
- FUNCTION(reserved35),
FUNCTION(pll_bist),
FUNCTION(atest_tsens),
- FUNCTION(reserved36),
FUNCTION(agera_pll),
- FUNCTION(reserved37),
FUNCTION(usb_phy),
- FUNCTION(reserved38),
FUNCTION(lpass_slimbus),
- FUNCTION(reserved39),
FUNCTION(sd_write),
FUNCTION(tsif1_error),
- FUNCTION(reserved40),
FUNCTION(qup3),
- FUNCTION(reserved41),
- FUNCTION(reserved42),
- FUNCTION(reserved43),
- FUNCTION(reserved44),
FUNCTION(qup6),
- FUNCTION(reserved45),
- FUNCTION(reserved46),
- FUNCTION(reserved47),
- FUNCTION(reserved48),
FUNCTION(qup12),
- FUNCTION(reserved49),
- FUNCTION(reserved50),
- FUNCTION(reserved51),
FUNCTION(phase_flag16),
- FUNCTION(reserved52),
FUNCTION(qup10),
FUNCTION(phase_flag11),
- FUNCTION(reserved53),
FUNCTION(phase_flag12),
- FUNCTION(reserved54),
FUNCTION(phase_flag13),
- FUNCTION(reserved55),
FUNCTION(phase_flag17),
- FUNCTION(reserved56),
FUNCTION(qua_mi2s),
FUNCTION(gcc_gp1),
FUNCTION(phase_flag18),
- FUNCTION(reserved57),
FUNCTION(phase_flag19),
- FUNCTION(reserved58),
FUNCTION(phase_flag20),
- FUNCTION(reserved59),
FUNCTION(cri_trng0),
FUNCTION(phase_flag21),
- FUNCTION(reserved60),
FUNCTION(cri_trng1),
FUNCTION(phase_flag22),
- FUNCTION(reserved61),
FUNCTION(cri_trng),
FUNCTION(phase_flag23),
- FUNCTION(reserved62),
FUNCTION(phase_flag24),
- FUNCTION(reserved63),
FUNCTION(pri_mi2s),
FUNCTION(sp_cmu),
FUNCTION(phase_flag25),
- FUNCTION(reserved64),
FUNCTION(qup8),
- FUNCTION(reserved65),
FUNCTION(pri_mi2s_ws),
- FUNCTION(reserved66),
- FUNCTION(reserved67),
- FUNCTION(reserved68),
FUNCTION(spkr_i2s),
FUNCTION(audio_ref),
- FUNCTION(reserved69),
- FUNCTION(reserved70),
FUNCTION(tsense_pwm1),
FUNCTION(tsense_pwm2),
- FUNCTION(reserved71),
- FUNCTION(reserved72),
FUNCTION(btfm_slimbus),
FUNCTION(atest_usb2),
- FUNCTION(reserved73),
FUNCTION(ter_mi2s),
FUNCTION(phase_flag7),
FUNCTION(atest_usb23),
- FUNCTION(reserved74),
FUNCTION(phase_flag8),
FUNCTION(atest_usb22),
- FUNCTION(reserved75),
FUNCTION(phase_flag9),
FUNCTION(atest_usb21),
- FUNCTION(reserved76),
FUNCTION(phase_flag4),
FUNCTION(atest_usb20),
- FUNCTION(reserved77),
- FUNCTION(reserved78),
FUNCTION(sec_mi2s),
- FUNCTION(reserved79),
- FUNCTION(reserved80),
FUNCTION(qup15),
- FUNCTION(reserved81),
- FUNCTION(reserved82),
- FUNCTION(reserved83),
- FUNCTION(reserved84),
FUNCTION(qup5),
- FUNCTION(reserved85),
- FUNCTION(reserved86),
- FUNCTION(reserved87),
- FUNCTION(reserved88),
FUNCTION(tsif1_clk),
FUNCTION(qup4),
FUNCTION(tgu_ch3),
FUNCTION(phase_flag10),
- FUNCTION(reserved89),
FUNCTION(tsif1_en),
FUNCTION(mdp_vsync0),
FUNCTION(mdp_vsync1),
@@ -2088,293 +1283,347 @@
FUNCTION(mdp_vsync3),
FUNCTION(tgu_ch0),
FUNCTION(phase_flag0),
- FUNCTION(reserved90),
FUNCTION(tsif1_data),
FUNCTION(sdc4_cmd),
FUNCTION(tgu_ch1),
- FUNCTION(reserved91),
FUNCTION(tsif2_error),
FUNCTION(sdc43),
FUNCTION(vfr_1),
FUNCTION(tgu_ch2),
- FUNCTION(reserved92),
FUNCTION(tsif2_clk),
FUNCTION(sdc4_clk),
FUNCTION(qup7),
- FUNCTION(reserved93),
FUNCTION(tsif2_en),
FUNCTION(sdc42),
- FUNCTION(reserved94),
FUNCTION(tsif2_data),
FUNCTION(sdc41),
- FUNCTION(reserved95),
FUNCTION(tsif2_sync),
FUNCTION(sdc40),
FUNCTION(phase_flag3),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(pci_e1),
+ FUNCTION(prng_rosc),
+ FUNCTION(phase_flag5),
+ FUNCTION(uim2_data),
+ FUNCTION(qup13),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim_batt),
+ FUNCTION(edp_hot),
+ FUNCTION(nav_pps),
+ FUNCTION(atest_char),
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char0),
+ FUNCTION(qlink_request),
+ FUNCTION(qlink_enable),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(mss_lte),
+ FUNCTION(qup0),
+ FUNCTION(gpio),
+ FUNCTION(qup9),
+ FUNCTION(qdss_cti),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_bist),
+ FUNCTION(atest_tsens2),
+ FUNCTION(vsense_trigger),
+ FUNCTION(atest_usb1),
+ FUNCTION(qup_l4),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(atest_usb13),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(qup_l5),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(atest_usb12),
+ FUNCTION(mdp_vsync),
+ FUNCTION(qup_l6),
+ FUNCTION(wlan2_adc1),
+ FUNCTION(atest_usb11),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(edp_lcd),
+ FUNCTION(dbg_out),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(atest_usb10),
+ FUNCTION(m_voc),
+ FUNCTION(tsif1_sync),
};
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
static const struct msm_pingroup sdm845_groups[] = {
- PINGROUP(0, NORTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
- PINGROUP(1, NORTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
- PINGROUP(2, NORTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
- PINGROUP(3, NORTH, qup0, NA, reserved3, NA, NA, NA, NA, NA, NA),
- PINGROUP(4, NORTH, qup9, qdss_cti, reserved4, NA, NA, NA, NA, NA, NA),
- PINGROUP(5, NORTH, qup9, qdss_cti, reserved5, NA, NA, NA, NA, NA, NA),
- PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, reserved6, NA, NA, NA, NA, NA),
- PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2, vsense_trigger,
- atest_usb1, ddr_pxi0, reserved7, NA),
- PINGROUP(8, NORTH, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
- atest_usb13, ddr_pxi1, reserved8),
- PINGROUP(9, NORTH, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
- ddr_pxi1, reserved9, NA, NA),
- PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
- atest_usb11, ddr_pxi2, reserved10, NA, NA),
- PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
- atest_usb10, ddr_pxi2, reserved11, NA, NA),
- PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, reserved12,
- NA, NA, NA, NA),
- PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
- reserved13, NA, NA, NA, NA),
- PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss_gpio1, reserved14, NA,
- NA, NA, NA, NA),
- PINGROUP(15, SOUTH, cam_mclk, qdss_gpio2, reserved15, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(16, SOUTH, cam_mclk, qdss_gpio3, reserved16, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(17, SOUTH, cci_i2c, qup1, qdss_gpio4, reserved17, NA, NA, NA,
- NA, NA),
- PINGROUP(18, SOUTH, cci_i2c, qup1, NA, qdss_gpio5, reserved18, NA, NA,
- NA, NA),
- PINGROUP(19, SOUTH, cci_i2c, qup1, NA, qdss_gpio6, reserved19, NA, NA,
- NA, NA),
- PINGROUP(20, SOUTH, cci_i2c, qup1, NA, qdss_gpio7, reserved20, NA, NA,
- NA, NA),
- PINGROUP(21, SOUTH, cci_timer0, gcc_gp2, qdss_gpio8, reserved21, NA,
- NA, NA, NA, NA),
- PINGROUP(22, SOUTH, cci_timer1, gcc_gp3, qdss_gpio, reserved22, NA, NA,
- NA, NA, NA),
- PINGROUP(23, SOUTH, cci_timer2, qdss_gpio9, reserved23, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(24, SOUTH, cci_timer3, cci_async, qdss_gpio10, reserved24, NA,
- NA, NA, NA, NA),
- PINGROUP(25, SOUTH, cci_timer4, cci_async, qdss_gpio11, reserved25, NA,
- NA, NA, NA, NA),
- PINGROUP(26, SOUTH, cci_async, qdss_gpio12, reserved26, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(27, NORTH, qup2, qdss_gpio13, reserved27, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(28, NORTH, qup2, qdss_gpio14, reserved28, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(29, NORTH, qup2, NA, phase_flag1, qdss_gpio15, reserved29, NA,
- NA, NA, NA),
- PINGROUP(30, NORTH, qup2, phase_flag2, qdss_gpio, reserved30, NA, NA,
- NA, NA, NA),
- PINGROUP(31, NORTH, qup11, qup14, reserved31, NA, NA, NA, NA, NA, NA),
- PINGROUP(32, NORTH, qup11, qup14, NA, reserved32, NA, NA, NA, NA, NA),
- PINGROUP(33, NORTH, qup11, qup14, NA, reserved33, NA, NA, NA, NA, NA),
- PINGROUP(34, NORTH, qup11, qup14, NA, reserved34, NA, NA, NA, NA, NA),
- PINGROUP(35, SOUTH, pci_e0, qup_l4, jitter_bist, NA, reserved35, NA,
- NA, NA, NA),
- PINGROUP(36, SOUTH, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
- reserved36, NA, NA, NA),
- PINGROUP(37, SOUTH, qup_l6, agera_pll, NA, reserved37, NA, NA, NA, NA,
- NA),
- PINGROUP(38, NORTH, usb_phy, NA, reserved38, NA, NA, NA, NA, NA, NA),
- PINGROUP(39, NORTH, lpass_slimbus, NA, reserved39, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(40, SOUTH, sd_write, tsif1_error, NA, reserved40, NA, NA, NA,
- NA, NA),
- PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, reserved41, NA, NA, NA, NA,
- NA),
- PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, reserved42, NA, NA, NA, NA,
- NA),
- PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, reserved43, NA, NA, NA, NA,
- NA),
- PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, reserved44, NA, NA, NA, NA,
- NA),
- PINGROUP(45, NORTH, qup6, NA, reserved45, NA, NA, NA, NA, NA, NA),
- PINGROUP(46, NORTH, qup6, NA, reserved46, NA, NA, NA, NA, NA, NA),
- PINGROUP(47, NORTH, qup6, reserved47, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(48, NORTH, qup6, reserved48, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(49, NORTH, qup12, reserved49, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(50, NORTH, qup12, reserved50, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(51, NORTH, qup12, qdss_cti, reserved51, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, reserved52, NA, NA,
- NA, NA, NA),
- PINGROUP(53, NORTH, qup10, phase_flag11, reserved53, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(54, NORTH, qup10, NA, phase_flag12, reserved54, NA, NA, NA,
- NA, NA),
- PINGROUP(55, NORTH, qup10, phase_flag13, reserved55, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(56, NORTH, qup10, phase_flag17, reserved56, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, reserved57, NA,
- NA, NA, NA, NA),
- PINGROUP(58, NORTH, qua_mi2s, gcc_gp2, phase_flag19, reserved58, NA,
- NA, NA, NA, NA),
- PINGROUP(59, NORTH, qua_mi2s, gcc_gp3, phase_flag20, reserved59, NA,
- NA, NA, NA, NA),
- PINGROUP(60, NORTH, qua_mi2s, cri_trng0, phase_flag21, reserved60, NA,
- NA, NA, NA, NA),
- PINGROUP(61, NORTH, qua_mi2s, cri_trng1, phase_flag22, reserved61, NA,
- NA, NA, NA, NA),
- PINGROUP(62, NORTH, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
- reserved62, NA, NA, NA, NA),
- PINGROUP(63, NORTH, qua_mi2s, NA, phase_flag24, qdss_cti, reserved63,
- NA, NA, NA, NA),
- PINGROUP(64, NORTH, pri_mi2s, sp_cmu, phase_flag25, reserved64, NA, NA,
- NA, NA, NA),
- PINGROUP(65, NORTH, pri_mi2s, qup8, reserved65, NA, NA, NA, NA, NA, NA),
- PINGROUP(66, NORTH, pri_mi2s_ws, qup8, reserved66, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(67, NORTH, pri_mi2s, qup8, reserved67, NA, NA, NA, NA, NA, NA),
- PINGROUP(68, NORTH, pri_mi2s, qup8, reserved68, NA, NA, NA, NA, NA, NA),
- PINGROUP(69, NORTH, spkr_i2s, audio_ref, reserved69, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(70, NORTH, lpass_slimbus, spkr_i2s, reserved70, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(71, NORTH, lpass_slimbus, spkr_i2s, tsense_pwm1, tsense_pwm2,
- reserved71, NA, NA, NA, NA),
- PINGROUP(72, NORTH, lpass_slimbus, spkr_i2s, reserved72, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(73, NORTH, btfm_slimbus, atest_usb2, reserved73, NA, NA, NA,
- NA, NA, NA),
- PINGROUP(74, NORTH, btfm_slimbus, ter_mi2s, phase_flag7, atest_usb23,
- reserved74, NA, NA, NA, NA),
- PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8, atest_usb22,
- reserved75, NA, NA, NA, NA),
- PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9, atest_usb21,
- reserved76, NA, NA, NA, NA),
- PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
- reserved77, NA, NA, NA, NA),
- PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, reserved78, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(79, NORTH, sec_mi2s, NA, NA, qdss_gpio11, reserved79, NA, NA,
- NA, NA),
- PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
- NA, NA),
- PINGROUP(81, NORTH, sec_mi2s, qup15, NA, reserved81, NA, NA, NA, NA,
- NA),
- PINGROUP(82, NORTH, sec_mi2s, qup15, NA, reserved82, NA, NA, NA, NA,
- NA),
- PINGROUP(83, NORTH, sec_mi2s, qup15, NA, reserved83, NA, NA, NA, NA,
- NA),
- PINGROUP(84, NORTH, qup15, NA, reserved84, NA, NA, NA, NA, NA, NA),
- PINGROUP(85, SOUTH, qup5, NA, reserved85, NA, NA, NA, NA, NA, NA),
- PINGROUP(86, SOUTH, qup5, NA, NA, reserved86, NA, NA, NA, NA, NA),
- PINGROUP(87, SOUTH, qup5, NA, reserved87, NA, NA, NA, NA, NA, NA),
- PINGROUP(88, SOUTH, qup5, NA, reserved88, NA, NA, NA, NA, NA, NA),
- PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, reserved89,
- NA, NA, NA, NA),
- PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1, mdp_vsync2,
- mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
- PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA, qdss_cti,
- reserved91, NA, NA),
- PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2, NA,
- reserved92, NA, NA),
- PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
- reserved93, NA, NA, NA),
- PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, reserved94, NA, NA, NA,
- NA),
- PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, NA, NA, reserved95, NA,
- NA, NA),
- PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, reserved96,
- NA, NA, NA, NA),
- PINGROUP(97, NORTH, NA, NA, mdp_vsync, ldo_en, reserved97, NA, NA, NA,
- NA),
- PINGROUP(98, NORTH, NA, mdp_vsync, ldo_update, reserved98, NA, NA, NA,
- NA, NA),
- PINGROUP(99, NORTH, phase_flag14, reserved99, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(100, NORTH, phase_flag15, reserved100, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(101, NORTH, NA, reserved101, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(102, NORTH, pci_e1, prng_rosc, reserved102, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(103, NORTH, pci_e1, phase_flag5, reserved103, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(104, NORTH, NA, reserved104, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, reserved105, NA, NA,
- NA, NA),
- PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, reserved106, NA, NA,
- NA, NA),
- PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, reserved107, NA, NA,
- NA, NA, NA),
- PINGROUP(108, NORTH, uim2_present, qup13, reserved108, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(109, NORTH, uim1_data, reserved109, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(110, NORTH, uim1_clk, reserved110, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(111, NORTH, uim1_reset, reserved111, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(112, NORTH, uim1_present, reserved112, NA, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(113, NORTH, uim_batt, edp_hot, reserved113, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(114, NORTH, NA, nav_pps, nav_pps, NA, NA, reserved114, NA, NA,
- NA),
- PINGROUP(115, NORTH, NA, nav_pps, nav_pps, NA, NA, reserved115, NA, NA,
- NA),
- PINGROUP(116, NORTH, NA, reserved116, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, reserved117, NA, NA,
- NA, NA, NA),
- PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3,
- reserved118, NA, NA, NA, NA),
- PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, reserved119, NA, NA,
- NA, NA, NA),
- PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, reserved120, NA, NA,
- NA, NA, NA),
- PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, reserved121, NA, NA,
- NA, NA, NA),
- PINGROUP(122, NORTH, NA, qdss_gpio5, reserved122, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, reserved123, NA, NA, NA,
- NA, NA),
- PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, reserved124, NA, NA, NA,
- NA, NA),
- PINGROUP(125, NORTH, qup_l6, NA, reserved125, NA, NA, NA, NA, NA, NA),
- PINGROUP(126, NORTH, NA, reserved126, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(127, NORTH, NA, reserved127, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(128, NORTH, nav_pps, nav_pps, NA, NA, reserved128, NA, NA, NA,
- NA),
- PINGROUP(129, NORTH, nav_pps, nav_pps, NA, NA, reserved129, NA, NA, NA,
- NA),
- PINGROUP(130, NORTH, qlink_request, NA, reserved130, NA, NA, NA, NA,
- NA, NA),
- PINGROUP(131, NORTH, qlink_enable, NA, reserved131, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(132, NORTH, NA, NA, reserved132, NA, NA, NA, NA, NA, NA),
- PINGROUP(133, NORTH, NA, reserved133, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(134, NORTH, NA, reserved134, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(135, NORTH, NA, pa_indicator, NA, reserved135, NA, NA, NA, NA,
- NA),
- PINGROUP(136, NORTH, NA, reserved136, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(137, NORTH, NA, NA, phase_flag26, reserved137, NA, NA, NA, NA,
- NA),
- PINGROUP(138, NORTH, NA, NA, phase_flag27, reserved138, NA, NA, NA, NA,
- NA),
- PINGROUP(139, NORTH, NA, phase_flag28, reserved139, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(140, NORTH, NA, NA, phase_flag6, reserved140, NA, NA, NA, NA,
- NA),
- PINGROUP(141, NORTH, NA, phase_flag29, reserved141, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(142, NORTH, NA, phase_flag30, reserved142, NA, NA, NA, NA, NA,
- NA),
- PINGROUP(143, NORTH, NA, nav_pps, nav_pps, NA, phase_flag31,
- reserved143, NA, NA, NA),
- PINGROUP(144, NORTH, mss_lte, reserved144, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(145, NORTH, mss_lte, NA, reserved145, NA, NA, NA, NA, NA, NA),
- PINGROUP(146, NORTH, NA, NA, reserved146, NA, NA, NA, NA, NA, NA),
- PINGROUP(147, NORTH, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
- PINGROUP(148, NORTH, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(149, NORTH, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
- UFS_RESET(ufs_reset, 0x99f000),
+ [0] = PINGROUP(0, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [1] = PINGROUP(1, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [2] = PINGROUP(2, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [3] = PINGROUP(3, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+ [4] = PINGROUP(4, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [5] = PINGROUP(5, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [6] = PINGROUP(6, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
+ [7] = PINGROUP(7, qup9, ddr_bist, NA, atest_tsens2,
+ vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
+ [8] = PINGROUP(8, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
+ atest_usb13, ddr_pxi1, NA),
+ [9] = PINGROUP(9, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
+ ddr_pxi1, NA, NA, NA),
+ [10] = PINGROUP(10, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+ atest_usb11, ddr_pxi2, NA, NA, NA),
+ [11] = PINGROUP(11, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+ atest_usb10, ddr_pxi2, NA, NA, NA),
+ [12] = PINGROUP(12, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
+ NA, NA, NA, NA),
+ [13] = PINGROUP(13, cam_mclk, pll_bypassnl, qdss_gpio0,
+ ddr_pxi3, NA, NA, NA, NA, NA),
+ [14] = PINGROUP(14, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
+ NA, NA, NA),
+ [15] = PINGROUP(15, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA,
+ NA, NA),
+ [16] = PINGROUP(16, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA,
+ NA, NA),
+ [17] = PINGROUP(17, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+ NA, NA),
+ [18] = PINGROUP(18, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+ NA, NA),
+ [19] = PINGROUP(19, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+ NA, NA),
+ [20] = PINGROUP(20, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+ NA, NA),
+ [21] = PINGROUP(21, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
+ NA, NA, NA),
+ [22] = PINGROUP(22, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
+ NA, NA, NA),
+ [23] = PINGROUP(23, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+ NA, NA),
+ [24] = PINGROUP(24, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+ NA, NA, NA, NA),
+ [25] = PINGROUP(25, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+ NA, NA, NA, NA),
+ [26] = PINGROUP(26, cci_async, qdss_gpio12, NA, NA, NA, NA, NA,
+ NA, NA),
+ [27] = PINGROUP(27, qup2, qdss_gpio13, NA, NA, NA, NA, NA, NA,
+ NA),
+ [28] = PINGROUP(28, qup2, qdss_gpio14, NA, NA, NA, NA, NA, NA,
+ NA),
+ [29] = PINGROUP(29, qup2, NA, phase_flag1, qdss_gpio15, NA, NA,
+ NA, NA, NA),
+ [30] = PINGROUP(30, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
+ NA, NA),
+ [31] = PINGROUP(31, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [32] = PINGROUP(32, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [33] = PINGROUP(33, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [34] = PINGROUP(34, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+ [35] = PINGROUP(35, pci_e0, qup_l4, jitter_bist, NA, NA, NA, NA,
+ NA, NA),
+ [36] = PINGROUP(36, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
+ NA, NA, NA, NA),
+ [37] = PINGROUP(37, qup_l6, agera_pll, NA, NA, NA, NA, NA, NA,
+ NA),
+ [38] = PINGROUP(38, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
+ [39] = PINGROUP(39, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [40] = PINGROUP(40, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+ NA, NA),
+ [41] = PINGROUP(41, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA, NA),
+ [42] = PINGROUP(42, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA, NA),
+ [43] = PINGROUP(43, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+ NA),
+ [44] = PINGROUP(44, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+ NA),
+ [45] = PINGROUP(45, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [46] = PINGROUP(46, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [47] = PINGROUP(47, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [48] = PINGROUP(48, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [49] = PINGROUP(49, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [50] = PINGROUP(50, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+ [51] = PINGROUP(51, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+ [52] = PINGROUP(52, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+ NA, NA, NA),
+ [53] = PINGROUP(53, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+ NA),
+ [54] = PINGROUP(54, qup10, NA, phase_flag12, NA, NA, NA, NA, NA,
+ NA),
+ [55] = PINGROUP(55, qup10, phase_flag13, NA, NA, NA, NA, NA, NA,
+ NA),
+ [56] = PINGROUP(56, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+ NA),
+ [57] = PINGROUP(57, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+ NA, NA, NA),
+ [58] = PINGROUP(58, qua_mi2s, gcc_gp2, phase_flag19, NA, NA, NA,
+ NA, NA, NA),
+ [59] = PINGROUP(59, qua_mi2s, gcc_gp3, phase_flag20, NA, NA, NA,
+ NA, NA, NA),
+ [60] = PINGROUP(60, qua_mi2s, cri_trng0, phase_flag21, NA, NA,
+ NA, NA, NA, NA),
+ [61] = PINGROUP(61, qua_mi2s, cri_trng1, phase_flag22, NA, NA,
+ NA, NA, NA, NA),
+ [62] = PINGROUP(62, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
+ NA, NA, NA, NA, NA),
+ [63] = PINGROUP(63, qua_mi2s, NA, phase_flag24, qdss_cti, NA,
+ NA, NA, NA, NA),
+ [64] = PINGROUP(64, pri_mi2s, sp_cmu, phase_flag25, NA, NA, NA,
+ NA, NA, NA),
+ [65] = PINGROUP(65, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
+ [66] = PINGROUP(66, pri_mi2s_ws, qup8, NA, NA, NA, NA, NA, NA,
+ NA),
+ [67] = PINGROUP(67, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
+ [68] = PINGROUP(68, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
+ [69] = PINGROUP(69, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA,
+ NA),
+ [70] = PINGROUP(70, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+ NA, NA),
+ [71] = PINGROUP(71, lpass_slimbus, spkr_i2s, tsense_pwm1,
+ tsense_pwm2, NA, NA, NA, NA, NA),
+ [72] = PINGROUP(72, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+ NA, NA),
+ [73] = PINGROUP(73, btfm_slimbus, atest_usb2, NA, NA, NA, NA, NA,
+ NA, NA),
+ [74] = PINGROUP(74, btfm_slimbus, ter_mi2s, phase_flag7,
+ atest_usb23, NA, NA, NA, NA, NA),
+ [75] = PINGROUP(75, ter_mi2s, phase_flag8, qdss_gpio8,
+ atest_usb22, NA, NA, NA, NA, NA),
+ [76] = PINGROUP(76, ter_mi2s, phase_flag9, qdss_gpio9,
+ atest_usb21, NA, NA, NA, NA, NA),
+ [77] = PINGROUP(77, ter_mi2s, phase_flag4, qdss_gpio10,
+ atest_usb20, NA, NA, NA, NA, NA),
+ [78] = PINGROUP(78, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+ NA),
+ [79] = PINGROUP(79, sec_mi2s, NA, NA, qdss_gpio11, NA, NA, NA,
+ NA, NA),
+ [80] = PINGROUP(80, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+ NA, NA),
+ [81] = PINGROUP(81, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [82] = PINGROUP(82, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [83] = PINGROUP(83, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+ [84] = PINGROUP(84, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
+ [85] = PINGROUP(85, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [86] = PINGROUP(86, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [87] = PINGROUP(87, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [88] = PINGROUP(88, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+ [89] = PINGROUP(89, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+ NA, NA, NA, NA),
+ [90] = PINGROUP(90, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
+ mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
+ [91] = PINGROUP(91, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
+ qdss_cti, NA, NA, NA),
+ [92] = PINGROUP(92, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+ NA, NA, NA, NA),
+ [93] = PINGROUP(93, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+ NA, NA, NA, NA),
+ [94] = PINGROUP(94, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
+ NA),
+ [95] = PINGROUP(95, tsif2_data, sdc41, qup7, NA, NA, NA, NA, NA,
+ NA),
+ [96] = PINGROUP(96, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+ NA, NA, NA, NA),
+ [97] = PINGROUP(97, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+ NA),
+ [98] = PINGROUP(98, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+ NA, NA),
+ [99] = PINGROUP(99, phase_flag14, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [100] = PINGROUP(100, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [101] = PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [102] = PINGROUP(102, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
+ NA),
+ [103] = PINGROUP(103, pci_e1, phase_flag5, NA, NA, NA, NA, NA,
+ NA, NA),
+ [104] = PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [105] = PINGROUP(105, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+ NA, NA),
+ [106] = PINGROUP(106, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+ NA, NA),
+ [107] = PINGROUP(107, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+ NA, NA),
+ [108] = PINGROUP(108, uim2_present, qup13, NA, NA, NA, NA, NA,
+ NA, NA),
+ [109] = PINGROUP(109, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ [110] = PINGROUP(110, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ [111] = PINGROUP(111, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [112] = PINGROUP(112, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [113] = PINGROUP(113, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+ NA),
+ [114] = PINGROUP(114, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+ NA),
+ [115] = PINGROUP(115, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+ NA),
+ [116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [117] = PINGROUP(117, NA, qdss_gpio0, atest_char, NA, NA, NA,
+ NA, NA, NA),
+ [118] = PINGROUP(118, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+ NA, NA, NA, NA),
+ [119] = PINGROUP(119, NA, qdss_gpio2, atest_char2, NA, NA, NA,
+ NA, NA, NA),
+ [120] = PINGROUP(120, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+ NA, NA, NA),
+ [121] = PINGROUP(121, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+ NA, NA, NA),
+ [122] = PINGROUP(122, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA, NA),
+ [123] = PINGROUP(123, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [124] = PINGROUP(124, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+ NA),
+ [125] = PINGROUP(125, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
+ [126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [127] = PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [128] = PINGROUP(128, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+ NA),
+ [129] = PINGROUP(129, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+ NA),
+ [130] = PINGROUP(130, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [131] = PINGROUP(131, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+ NA),
+ [132] = PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [133] = PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [134] = PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [135] = PINGROUP(135, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+ NA),
+ [136] = PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [137] = PINGROUP(137, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+ NA),
+ [138] = PINGROUP(138, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+ NA),
+ [139] = PINGROUP(139, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+ NA),
+ [140] = PINGROUP(140, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+ NA),
+ [141] = PINGROUP(141, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+ NA),
+ [142] = PINGROUP(142, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+ NA),
+ [143] = PINGROUP(143, NA, nav_pps, nav_pps, NA, phase_flag31,
+ NA, NA, NA, NA),
+ [144] = PINGROUP(144, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+ [145] = PINGROUP(145, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+ [146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [148] = PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [149] = PINGROUP(149, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ [150] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+ [151] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+ [152] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+ [153] = UFS_RESET(ufs_reset, 0x99f000),
};
static const struct msm_dir_conn sdm845_dir_conn[] = {
@@ -2465,6 +1714,10 @@
.ngpios = 150,
.dir_conn = sdm845_dir_conn,
.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
+ .tile_offsets = sdm845_tile_offsets,
+ .n_tile_offsets = ARRAY_SIZE(sdm845_tile_offsets),
+ .pin_base = sdm845_pin_base,
+ .reg_size = REG_SIZE,
};
static int sdm845_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index cf80ce1..4a5a0fe 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -586,6 +586,9 @@
ret = info->ops->init(pfc);
if (ret < 0)
return ret;
+
+ /* .init() may have overridden pfc->info */
+ info = pfc->info;
}
/* Enable dummy states for those platforms without pinctrl support */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 7ca37c3..baa98d7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -203,7 +203,7 @@
/* IPSR6 */
FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
- FN_SCIF_CLK, FN_BPFCLK_E,
+ FN_SCIF_CLK, FN_DVC_MUTE, FN_BPFCLK_E,
FN_AUDIO_CLKC, FN_SCIFB0_SCK_C, FN_MSIOF1_SYNC_B, FN_RX2,
FN_SCIFA2_RXD, FN_FMIN_E,
FN_AUDIO_CLKOUT, FN_MSIOF1_SS1_B, FN_TX2, FN_SCIFA2_TXD,
@@ -573,7 +573,7 @@
/* IPSR6 */
AUDIO_CLKB_MARK, STP_OPWM_0_B_MARK, MSIOF1_SCK_B_MARK,
- SCIF_CLK_MARK, BPFCLK_E_MARK,
+ SCIF_CLK_MARK, DVC_MUTE_MARK, BPFCLK_E_MARK,
AUDIO_CLKC_MARK, SCIFB0_SCK_C_MARK, MSIOF1_SYNC_B_MARK, RX2_MARK,
SCIFA2_RXD_MARK, FMIN_E_MARK,
AUDIO_CLKOUT_MARK, MSIOF1_SS1_B_MARK, TX2_MARK, SCIFA2_TXD_MARK,
@@ -1010,14 +1010,17 @@
PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
+ PINMUX_IPSR_MSEL(IP4_12_10, HSCK1_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_15_13, SSI_WS2),
PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
+ PINMUX_IPSR_MSEL(IP4_15_13, HCTS1_N_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_18_16, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
+ PINMUX_IPSR_MSEL(IP4_18_16, HRTS1_N_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_19, SSI_SCK34),
PINMUX_IPSR_GPSR(IP4_20, SSI_WS34),
PINMUX_IPSR_GPSR(IP4_21, SSI_SDATA3),
@@ -1090,6 +1093,7 @@
PINMUX_IPSR_MSEL(IP6_2_0, STP_OPWM_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
+ PINMUX_IPSR_GPSR(IP6_2_0, DVC_MUTE),
PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
PINMUX_IPSR_GPSR(IP6_5_3, AUDIO_CLKC),
PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
@@ -1099,7 +1103,7 @@
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
PINMUX_IPSR_GPSR(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
- PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_GPSR(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
@@ -5810,7 +5814,7 @@
0, 0,
/* IP6_2_0 [3] */
FN_AUDIO_CLKB, FN_STP_OPWM_0_B, FN_MSIOF1_SCK_B,
- FN_SCIF_CLK, 0, FN_BPFCLK_E,
+ FN_SCIF_CLK, FN_DVC_MUTE, FN_BPFCLK_E,
0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index ed734f56..ef093ac 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -281,8 +281,8 @@
FN_AVB_AVTP_CAPTURE, FN_ETH_CRS_DV_B, FN_SSI_WS1, FN_SCIF1_TXD_B,
FN_IIC1_SDA_C, FN_VI1_DATA0, FN_CAN0_TX_D, FN_AVB_AVTP_MATCH,
FN_ETH_RX_ER_B, FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_SDATA, FN_VI1_DATA1,
- FN_ATAG0_N, FN_ETH_RXD0_B, FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2,
- FN_MDATA, FN_ATAWR0_N, FN_ETH_RXD1_B,
+ FN_ATAWR0_N, FN_ETH_RXD0_B, FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2,
+ FN_MDATA, FN_ATAG0_N, FN_ETH_RXD1_B,
/* IPSR13 */
FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3, FN_SCKZ,
@@ -575,8 +575,8 @@
ETH_CRS_DV_B_MARK, SSI_WS1_MARK, SCIF1_TXD_B_MARK, IIC1_SDA_C_MARK,
VI1_DATA0_MARK, CAN0_TX_D_MARK, AVB_AVTP_MATCH_MARK, ETH_RX_ER_B_MARK,
SSI_SDATA1_MARK, HSCIF1_HRX_B_MARK, VI1_DATA1_MARK, SDATA_MARK,
- ATAG0_N_MARK, ETH_RXD0_B_MARK, SSI_SCK2_MARK, HSCIF1_HTX_B_MARK,
- VI1_DATA2_MARK, MDATA_MARK, ATAWR0_N_MARK, ETH_RXD1_B_MARK,
+ ATAWR0_N_MARK, ETH_RXD0_B_MARK, SSI_SCK2_MARK, HSCIF1_HTX_B_MARK,
+ VI1_DATA2_MARK, MDATA_MARK, ATAG0_N_MARK, ETH_RXD1_B_MARK,
/* IPSR13 */
SSI_WS2_MARK, HSCIF1_HCTS_N_B_MARK, SCIFA0_RXD_D_MARK, VI1_DATA3_MARK,
@@ -1413,13 +1413,13 @@
PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_26_24, VI1_DATA1),
PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
- PINMUX_IPSR_GPSR(IP12_26_24, ATAG0_N),
+ PINMUX_IPSR_GPSR(IP12_26_24, ATAWR0_N),
PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_29_27, VI1_DATA2),
PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
- PINMUX_IPSR_GPSR(IP12_29_27, ATAWR0_N),
+ PINMUX_IPSR_GPSR(IP12_29_27, ATAG0_N),
PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
/* IPSR13 */
@@ -4938,10 +4938,10 @@
0, 0, 0, 0,
/* IP12_29_27 [3] */
FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, FN_MDATA,
- FN_ATAWR0_N, FN_ETH_RXD1_B, 0, 0,
+ FN_ATAG0_N, FN_ETH_RXD1_B, 0, 0,
/* IP12_26_24 [3] */
FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_VI1_DATA1, FN_SDATA,
- FN_ATAG0_N, FN_ETH_RXD0_B, 0, 0,
+ FN_ATAWR0_N, FN_ETH_RXD0_B, 0, 0,
/* IP12_23_21 [3] */
FN_SSI_WS1, FN_SCIF1_TXD_B, FN_IIC1_SDA_C, FN_VI1_DATA0,
FN_CAN0_TX_D, FN_AVB_AVTP_MATCH, FN_ETH_RX_ER_B, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 84cee66..0acb0a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -2056,7 +2056,7 @@
RCAR_GP_PIN(6, 21),
};
static const unsigned int hscif2_clk_b_mux[] = {
- HSCK1_B_MARK,
+ HSCK2_B_MARK,
};
static const unsigned int hscif2_ctrl_b_pins[] = {
/* RTS, CTS */
@@ -2129,7 +2129,7 @@
RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
};
static const unsigned int hscif4_ctrl_mux[] = {
- HRTS4_N_MARK, HCTS3_N_MARK,
+ HRTS4_N_MARK, HCTS4_N_MARK,
};
static const unsigned int hscif4_data_b_pins[] = {
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index efc4371..a36fd4b 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@
break;
case PIN_CONFIG_OUTPUT:
__stm32_gpio_set(bank, offset, arg);
- ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+ ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break;
default:
ret = -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 90b973e..a7c81e9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
+ SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 1946204..e5fe6ba 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -56,6 +56,24 @@
for RmNet Data Driver and also exchange of QMI messages between
A7 and Q6 IPA-driver.
+config ECM_IPA
+ tristate "STD ECM LAN Driver support"
+ depends on IPA || IPA3
+ help
+ Enables LAN between applications processor and a tethered
+ host using the STD ECM protocol.
+ This Network interface is aimed to allow data path go through
+ IPA core while using STD ECM protocol.
+
+config RNDIS_IPA
+ tristate "RNDIS_IPA Network Interface Driver support"
+ depends on IPA || IPA3
+ help
+ Enables LAN between applications processor and a tethered
+ host using the RNDIS protocol.
+ This Network interface is aimed to allow data path go through
+ IPA core while using RNDIS protocol.
+
config IPA_UT
tristate "IPA Unit-Test Framework and Test Suites"
depends on IPA3 && DEBUG_FS
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 38264d9..4a9232e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -172,6 +172,7 @@
__stringify(IPA_CLIENT_TEST3_CONS),
__stringify(IPA_CLIENT_TEST4_PROD),
__stringify(IPA_CLIENT_TEST4_CONS),
+ __stringify(IPA_CLIENT_DUMMY_CONS),
};
/**
@@ -421,7 +422,7 @@
/**
* ipa_cfg_ep_nat() - IPA end-point NAT configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
- * @ipa_ep_cfg: [in] IPA end-point configuration params
+ * @ep_nat: [in] IPA NAT end-point configuration params
*
* Returns: 0 on success, negative on failure
*
@@ -438,6 +439,27 @@
EXPORT_SYMBOL(ipa_cfg_ep_nat);
/**
+* ipa_cfg_ep_conn_track() - IPA end-point IPv6CT configuration
+* @clnt_hdl: [in] opaque client handle assigned by IPA to client
+* @ep_conn_track: [in] IPA IPv6CT end-point configuration params
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_cfg_ep_conn_track(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ep_conn_track)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_cfg_ep_conn_track, clnt_hdl,
+ ep_conn_track);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_conn_track);
+
+/**
* ipa_cfg_ep_hdr() - IPA end-point header configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
@@ -2560,6 +2582,21 @@
EXPORT_SYMBOL(ipa_stop_gsi_channel);
/**
+ * ipa_start_gsi_channel()- Startsa GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_start_gsi_channel(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_start_gsi_channel);
+
+/**
* ipa_get_version_string() - Get string representation of IPA version
* @ver: IPA version
*
@@ -2949,6 +2986,25 @@
}
EXPORT_SYMBOL(ipa_get_pdev);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB,
+ ipauc_ready_cb, user_data);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB);
+
+void ipa_ntn_uc_dereg_rdyCB(void)
+{
+ IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB);
+}
+EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB);
+
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index bfe1608..20471eb 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -34,6 +34,9 @@
int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+ int (*ipa_cfg_ep_conn_track)(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ipa_ep_cfg);
+
int (*ipa_cfg_ep_hdr)(u32 clnt_hdl,
const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
@@ -321,6 +324,8 @@
int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
+ int (*ipa_start_gsi_channel)(u32 clnt_hdl);
+
struct iommu_domain *(*ipa_get_smmu_domain)(void);
int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
@@ -375,6 +380,11 @@
int ipa_ep_idx_dl);
struct device *(*ipa_get_pdev)(void);
+
+ int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+
+ void (*ipa_ntn_uc_dereg_rdyCB)(void);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index 61cef2d..61625f5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
+obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
similarity index 100%
rename from drivers/net/ethernet/msm/ecm_ipa.c
rename to drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 5aa39b6..9b3b53d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -2046,6 +2046,8 @@
if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
ipa_mhi_update_host_ch_state(true);
+ return 0;
+
fail_stop_event_update_dl_channel:
ipa_mhi_resume_channels(true,
ipa_mhi_client_ctx->dl_channels);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index ae06d54..a15a9d8 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -58,7 +58,6 @@
IPA_UC_OFFLOAD_STATE_INVALID,
IPA_UC_OFFLOAD_STATE_INITIALIZED,
IPA_UC_OFFLOAD_STATE_UP,
- IPA_UC_OFFLOAD_STATE_DOWN,
};
struct ipa_uc_offload_ctx {
@@ -413,8 +412,7 @@
return -EINVAL;
}
- if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED &&
- offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
return -EPERM;
}
@@ -471,7 +469,7 @@
int ipa_ep_idx_ul, ipa_ep_idx_dl;
int ret = 0;
- ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
@@ -597,7 +595,7 @@
return -EINVAL;
}
- if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
return -EINVAL;
}
@@ -622,3 +620,41 @@
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_cleanup);
+
+/**
+ * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
+ * ready
+ * @inout: [in/out] input/output parameters
+ * from/to client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
+{
+ int ret = 0;
+
+ if (!inp) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ if (inp->proto == IPA_UC_NTN)
+ ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
+
+ if (ret == -EEXIST) {
+ inp->is_uC_ready = true;
+ ret = 0;
+ } else
+ inp->is_uC_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
+
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+ if (proto == IPA_UC_NTN)
+ ipa_ntn_uc_dereg_rdyCB();
+}
+EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index 79da63e..a623d0b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -114,6 +114,7 @@
* @send_dl_skb: client callback for sending skb in downlink direction
* @stats: statistics, how many packets were transmitted using the SW bridge
* @is_conencted: is bridge connected ?
+ * @is_suspended: is bridge suspended ?
* @mode: ODU mode (router/bridge)
* @lock: for the initialization, connect and disconnect synchronization
* @llv6_addr: link local IPv6 address of ODU network interface
@@ -122,6 +123,8 @@
* @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
* @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
* @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ * @rm_comp: completion object for IP RM
+ * @wakeup_request: client callback to wakeup
*/
struct odu_bridge_ctx {
struct class *class;
@@ -135,6 +138,7 @@
int (*send_dl_skb)(void *priv, struct sk_buff *skb);
struct stats stats;
bool is_connected;
+ bool is_suspended;
enum odu_bridge_mode mode;
struct mutex lock;
struct in6_addr llv6_addr;
@@ -146,6 +150,8 @@
u32 ipa_sys_desc_size;
void *logbuf;
void *logbuf_low;
+ struct completion rm_comp;
+ void (*wakeup_request)(void *);
};
static struct odu_bridge_ctx *odu_bridge_ctx;
@@ -1246,6 +1252,288 @@
}
EXPORT_SYMBOL(odu_bridge_cleanup);
+/* IPA Bridge implementation */
+#ifdef CONFIG_IPA3
+
+static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ if (event == IPA_RM_RESOURCE_GRANTED)
+ complete(&odu_bridge_ctx->rm_comp);
+}
+
+static int ipa_br_request_prod(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ reinit_completion(&odu_bridge_ctx->rm_comp);
+ ODU_BRIDGE_DBG("requesting odu prod\n");
+ res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (res) {
+ if (res != -EINPROGRESS) {
+ ODU_BRIDGE_ERR("failed to request prod %d\n", res);
+ return res;
+ }
+ wait_for_completion(&odu_bridge_ctx->rm_comp);
+ }
+
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+
+}
+
+static int ipa_br_release_prod(void)
+{
+ int res;
+
+ ODU_BRIDGE_FUNC_ENTRY();
+
+ reinit_completion(&odu_bridge_ctx->rm_comp);
+ ODU_BRIDGE_DBG("requesting odu prod\n");
+ res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (res) {
+ ODU_BRIDGE_ERR("failed to release prod %d\n", res);
+ return res;
+ }
+
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+
+}
+
+static int ipa_br_cons_request(void)
+{
+ ODU_BRIDGE_FUNC_ENTRY();
+ if (odu_bridge_ctx->is_suspended)
+ odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+}
+
+static int ipa_br_cons_release(void)
+{
+ ODU_BRIDGE_FUNC_ENTRY();
+ ODU_BRIDGE_FUNC_EXIT();
+ return 0;
+}
+
+/* IPA Bridge API is the new API which will replaces old odu_bridge API */
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+ int ret;
+ struct ipa_rm_create_params create_params;
+
+ if (!params || !params->wakeup_request || !hdl) {
+ ODU_BRIDGE_ERR("NULL arg\n");
+ return -EINVAL;
+ }
+
+
+ ret = odu_bridge_init(¶ms->info);
+ if (ret)
+ return ret;
+
+ odu_bridge_ctx->wakeup_request = params->wakeup_request;
+
+ /* create IPA RM resources for power management */
+ init_completion(&odu_bridge_ctx->rm_comp);
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ create_params.reg_params.user_data = odu_bridge_ctx;
+ create_params.reg_params.notify_cb = ipa_br_rm_notify;
+ create_params.floor_voltage = IPA_VOLTAGE_SVS;
+ ret = ipa_rm_create_resource(&create_params);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret);
+ goto fail_rm_prod;
+ }
+
+ ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret);
+ goto fail_add_dep;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ create_params.request_resource = ipa_br_cons_request;
+ create_params.release_resource = ipa_br_cons_release;
+ create_params.floor_voltage = IPA_VOLTAGE_SVS;
+ ret = ipa_rm_create_resource(&create_params);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret);
+ goto fail_rm_cons;
+ }
+
+ /* handle is ignored for now */
+ *hdl = 0;
+
+ return 0;
+
+fail_rm_cons:
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+fail_add_dep:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+fail_rm_prod:
+ odu_bridge_cleanup();
+ return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+int ipa_bridge_connect(u32 hdl)
+{
+ int ret;
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("already connected\n");
+ return -EFAULT;
+ }
+
+ ret = ipa_br_request_prod();
+ if (ret)
+ return ret;
+
+ return odu_bridge_connect();
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+ struct ipa_rm_perf_profile profile = {0};
+ int ret;
+
+ profile.max_supported_bandwidth_mbps = bandwidth;
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret);
+ return ret;
+ }
+
+ ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+ int ret;
+
+ ret = odu_bridge_disconnect();
+ if (ret)
+ return ret;
+
+ ret = ipa_br_release_prod();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+ int ret;
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (!odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("bridge is disconnected\n");
+ return -EFAULT;
+ }
+
+ if (odu_bridge_ctx->is_suspended) {
+ ODU_BRIDGE_ERR("bridge is already suspended\n");
+ return -EFAULT;
+ }
+
+ /* stop cons channel to prevent downlink data during suspend */
+ ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret);
+ return ret;
+ }
+
+ ret = ipa_br_release_prod();
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
+ ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+ return ret;
+ }
+ odu_bridge_ctx->is_suspended = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_resume(u32 hdl)
+{
+ int ret;
+
+ if (!odu_bridge_ctx) {
+ ODU_BRIDGE_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (!odu_bridge_ctx->is_connected) {
+ ODU_BRIDGE_ERR("bridge is disconnected\n");
+ return -EFAULT;
+ }
+
+ if (!odu_bridge_ctx->is_suspended) {
+ ODU_BRIDGE_ERR("bridge is not suspended\n");
+ return -EFAULT;
+ }
+
+ ret = ipa_br_request_prod();
+ if (ret)
+ return ret;
+
+ ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+ if (ret) {
+ ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret);
+ return ret;
+ }
+ odu_bridge_ctx->is_suspended = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return odu_bridge_tx_dp(skb, metadata);
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+ return odu_bridge_cleanup();
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+#endif /* CONFIG_IPA3 */
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
similarity index 100%
rename from drivers/net/ethernet/msm/rndis_ipa.c
rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h
similarity index 100%
rename from drivers/net/ethernet/msm/rndis_ipa_trace.h
rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 50804ee..32c8b25 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -26,7 +26,8 @@
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = EP; \
- log_info.id_string = ipa_clients_strings[client]
+ log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \
+ ? "Invalid Client" : ipa_clients_strings[client]
#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
log_info.file = __FILENAME__; \
@@ -373,13 +374,16 @@
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
-
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
u8 *ipa_write_16(u16 hw, u8 *dest);
u8 *ipa_write_8(u8 b, u8 *dest);
u8 *ipa_pad_to_64(u8 *dest);
u8 *ipa_pad_to_32(u8 *dest);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
const char *ipa_get_version_string(enum ipa_hw_type ver);
+int ipa_start_gsi_channel(u32 clnt_hdl);
#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
index ae6cfc4..0bc4b76 100644
--- a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,4 +21,7 @@
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
+ void *user_data);
+void ipa_ntn_uc_dereg_rdyCB(void);
#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 53ab299..bfd0446 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -641,7 +641,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -688,7 +688,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -727,7 +727,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -767,7 +767,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -807,7 +807,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -847,7 +847,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -886,7 +886,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -926,7 +926,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -966,7 +966,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1104,7 +1104,7 @@
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1149,7 +1149,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1194,7 +1194,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1232,7 +1232,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1372,7 +1372,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1411,7 +1411,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index e8710a6..2b517a1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -83,6 +83,10 @@
__stringify(IPA_QUOTA_REACH),
__stringify(IPA_SSR_BEFORE_SHUTDOWN),
__stringify(IPA_SSR_AFTER_POWERUP),
+ __stringify(ADD_VLAN_IFACE),
+ __stringify(DEL_VLAN_IFACE),
+ __stringify(ADD_L2TP_VLAN_MAPPING),
+ __stringify(DEL_L2TP_VLAN_MAPPING)
};
const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 3418896..f7b0864 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1015,25 +1015,25 @@
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1046,7 +1046,7 @@
}
INIT_LIST_HEAD(&entry->link);
entry->rule = *rule;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_FLT_COOKIE;
entry->rt_tbl = rt_tbl;
entry->tbl = tbl;
if (add_rear) {
@@ -1065,13 +1065,19 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
-
+ipa_insert_failed:
+ tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
error:
return -EPERM;
}
@@ -1083,12 +1089,12 @@
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -1115,12 +1121,12 @@
entry = ipa_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1130,25 +1136,25 @@
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1172,7 +1178,7 @@
struct ipa_flt_tbl *tbl;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
return -EINVAL;
}
@@ -1191,14 +1197,14 @@
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
}
ipa_ep_idx = ipa2_get_ep_mapping(ep);
if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
- IPAERR("ep not valid ep=%d\n", ep);
+ IPAERR_RL("ep not valid ep=%d\n", ep);
return -EINVAL;
}
if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
@@ -1225,7 +1231,7 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1243,7 +1249,7 @@
rules->rules[i].at_rear,
&rules->rules[i].flt_rule_hdl);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1276,14 +1282,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1316,14 +1322,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1357,7 +1363,7 @@
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1393,7 +1399,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index bb6f8ec..d4e39d7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -547,7 +547,7 @@
{
struct ipa_hdr_entry *hdr_entry;
struct ipa_hdr_proc_ctx_entry *entry;
- struct ipa_hdr_proc_ctx_offset_entry *offset;
+ struct ipa_hdr_proc_ctx_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
int id;
@@ -558,13 +558,13 @@
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
- if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
- IPAERR("hdr_hdl is invalid\n");
+ if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
@@ -580,7 +580,7 @@
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
@@ -592,7 +592,7 @@
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -602,7 +602,7 @@
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -640,6 +640,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -647,6 +648,13 @@
return 0;
+ipa_insert_failed:
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -659,7 +667,7 @@
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
int id;
@@ -667,12 +675,12 @@
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -691,7 +699,7 @@
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -704,7 +712,7 @@
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -780,6 +788,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -804,10 +813,19 @@
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len:
@@ -824,8 +842,8 @@
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
entry = ipa_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -833,7 +851,7 @@
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -871,12 +889,12 @@
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -888,7 +906,7 @@
htbl->hdr_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("hdr already deleted by user\n");
+ IPAERR_RL("hdr already deleted by user\n");
return -EINVAL;
}
@@ -937,12 +955,12 @@
int result = -EFAULT;
if (unlikely(!ipa_ctx)) {
- IPAERR("IPA driver was not initialized\n");
+ IPAERR_RL("IPA driver was not initialized\n");
return -EINVAL;
}
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -951,7 +969,7 @@
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -992,14 +1010,14 @@
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1048,13 +1066,13 @@
if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
- IPAERR("Processing context not supported on IPA HW %d\n",
+ IPAERR_RL("Processing context not supported on IPA HW %d\n",
ipa_ctx->ipa_hw_type);
return -EFAULT;
}
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1063,7 +1081,7 @@
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -1108,14 +1126,14 @@
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1352,7 +1370,7 @@
}
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
@@ -1439,13 +1457,13 @@
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1474,7 +1492,7 @@
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 5568f8b..141bff1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -37,7 +37,15 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
+
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
+
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
@@ -60,6 +68,18 @@
#define IPAERR(fmt, args...) \
pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \
+ __LINE__, ## args);\
+ if (ipa_ctx) { \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -196,8 +216,8 @@
*/
struct ipa_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa_flt_tbl *tbl;
struct ipa_rt_tbl *rt_tbl;
u32 hw_len;
@@ -222,13 +242,13 @@
*/
struct ipa_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa_rt_tbl_set *set;
- u32 cookie;
bool in_sys;
u32 sz;
struct ipa_mem_buffer curr_mem;
@@ -259,6 +279,7 @@
*/
struct ipa_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -268,7 +289,6 @@
dma_addr_t phys_base;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -341,10 +361,10 @@
*/
struct ipa_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -400,8 +420,8 @@
*/
struct ipa_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa_rt_tbl *tbl;
struct ipa_hdr_entry *hdr;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
@@ -1512,6 +1532,8 @@
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv);
+void ipa2_ntn_uc_dereg_rdyCB(void);
/*
* To retrieve doorbell physical address of
@@ -1720,9 +1742,6 @@
iowrite32(val, base + offset);
}
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos);
int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 4b62927..e6954b7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -271,6 +271,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, tx->name)) {
+ /* add the entry check */
+ if (entry->num_tx_props != tx->num_tx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_tx_props,
+ tx->num_tx_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(tx->tx, entry->tx, entry->num_tx_props *
sizeof(struct ipa_ioc_tx_intf_prop));
result = 0;
@@ -304,6 +312,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, rx->name)) {
+ /* add the entry check */
+ if (entry->num_rx_props != rx->num_rx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_rx_props,
+ rx->num_rx_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(rx->rx, entry->rx, entry->num_rx_props *
sizeof(struct ipa_ioc_rx_intf_prop));
result = 0;
@@ -337,6 +353,14 @@
mutex_lock(&ipa_ctx->lock);
list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
if (!strcmp(entry->name, ext->name)) {
+ /* add the entry check */
+ if (entry->num_ext_props != ext->num_ext_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_ext_props,
+ ext->num_ext_props);
+ mutex_unlock(&ipa_ctx->lock);
+ return result;
+ }
memcpy(ext->ext, entry->ext, entry->num_ext_props *
sizeof(struct ipa_ioc_ext_intf_prop));
result = 0;
@@ -380,13 +404,13 @@
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -516,15 +540,15 @@
char __user *start;
struct ipa_push_msg *msg = NULL;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int locked;
start = buf;
+ add_wait_queue(&ipa_ctx->msg_waitq, &wait);
while (1) {
mutex_lock(&ipa_ctx->msg_lock);
locked = 1;
- prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
if (!list_empty(&ipa_ctx->msg_list)) {
msg = list_first_entry(&ipa_ctx->msg_list,
struct ipa_push_msg, link);
@@ -576,10 +600,10 @@
locked = 0;
mutex_unlock(&ipa_ctx->msg_lock);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- finish_wait(&ipa_ctx->msg_waitq, &wait);
+ remove_wait_queue(&ipa_ctx->msg_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
@@ -609,7 +633,7 @@
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index a7f983e..50b2706 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -252,8 +252,8 @@
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -272,7 +272,7 @@
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -335,8 +335,8 @@
/* check for integer overflow */
if (init->ipv4_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Table Entry offset is not
@@ -345,8 +345,8 @@
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -355,7 +355,7 @@
/* check for integer overflow */
if (init->expn_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table Entry offset is not
@@ -364,8 +364,8 @@
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -374,7 +374,7 @@
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Indx Table Entry offset is not
@@ -383,8 +383,8 @@
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -392,8 +392,8 @@
/* check for integer overflow */
if (init->index_expn_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table entry offset is not
@@ -402,8 +402,8 @@
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -448,16 +448,16 @@
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_mem;
@@ -513,7 +513,7 @@
desc[1].len = size;
IPADBG("posting v4 init command\n");
if (ipa_send_cmd(2, desc)) {
- IPAERR("Fail to send immediate command\n");
+ IPAERR_RL("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
}
@@ -578,7 +578,7 @@
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -586,7 +586,7 @@
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -597,7 +597,7 @@
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -609,7 +609,7 @@
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -621,7 +621,7 @@
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -633,7 +633,7 @@
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -642,7 +642,7 @@
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 5b70853..50930d3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -850,16 +850,20 @@
struct ipa_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
+ mutex_lock(&ipa_ctx->lock);
/* check if this table exists */
entry = __ipa_find_rt_tbl(in->ip, in->name);
- if (!entry)
+ if (!entry) {
+ mutex_unlock(&ipa_ctx->lock);
return -EFAULT;
+ }
in->idx = entry->idx;
+ mutex_unlock(&ipa_ctx->lock);
return 0;
}
@@ -902,7 +906,7 @@
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys = (ip == IPA_IP_v4) ?
!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
set->tbl_cnt++;
@@ -915,12 +919,16 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
@@ -933,13 +941,13 @@
enum ipa_ip_type ip = IPA_IP_MAX;
u32 id;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -947,8 +955,11 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
+
if (!entry->in_sys) {
list_del(&entry->link);
@@ -987,13 +998,14 @@
if (rule->hdr_hdl) {
hdr = ipa_id_find(rule->hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rule->hdr_proc_ctx_hdl) {
proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
goto error;
}
@@ -1001,7 +1013,7 @@
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
IPAERR("bad params\n");
goto error;
}
@@ -1022,7 +1034,7 @@
goto error;
}
INIT_LIST_HEAD(&entry->link);
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_RULE_COOKIE;
entry->rule = *rule;
entry->tbl = tbl;
entry->hdr = hdr;
@@ -1074,7 +1086,7 @@
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1084,7 +1096,7 @@
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1111,12 +1123,12 @@
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1130,7 +1142,7 @@
entry->tbl->rule_cnt);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1157,14 +1169,14 @@
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1197,7 +1209,7 @@
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1241,7 +1253,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1259,7 +1271,7 @@
* filtering rules point to routing tables
*/
if (ipa2_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa_ctx->rt_tbl_set[ip];
rset = &ipa_ctx->reap_rt_tbl_set[ip];
@@ -1345,21 +1357,27 @@
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
+ if (entry->ref_cnt == U32_MAX) {
+ IPAERR("fail: ref count crossed limit\n");
+ goto ret;
+ }
entry->ref_cnt++;
lookup->hdl = entry->id;
/* commit for get */
if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
+
+ret:
mutex_unlock(&ipa_ctx->lock);
return result;
@@ -1377,18 +1395,18 @@
{
struct ipa_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1397,16 +1415,19 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa_ctx->ctrl->ipa_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1425,20 +1446,20 @@
if (rtrule->rule.hdr_hdl) {
hdr = ipa_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
}
entry = ipa_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1471,14 +1492,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 6f59ebd..d4116eb 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -165,6 +165,17 @@
return -EEXIST;
}
+int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv)
+{
+ return ipa2_register_ipa_ready_cb(ipauc_ready_cb, priv);
+}
+
+void ipa2_ntn_uc_dereg_rdyCB(void)
+{
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+}
+
static void ipa_uc_ntn_loaded_handler(void)
{
if (!ipa_ctx) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index b7815cb..a454382 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1673,7 +1673,7 @@
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1686,7 +1686,7 @@
ep = &ipa_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 4652fc8..2c88244 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -847,7 +847,7 @@
}
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return INVALID_EP_MAPPING_INDEX;
}
@@ -1700,7 +1700,7 @@
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
IPA_FLT_FLOW_LABEL) {
- IPAERR("v6 attrib's specified for v4 rule\n");
+ IPAERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
@@ -1712,7 +1712,7 @@
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1732,7 +1732,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1746,7 +1746,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1760,11 +1760,11 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1778,11 +1778,11 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1796,7 +1796,7 @@
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1809,7 +1809,7 @@
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1822,7 +1822,7 @@
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1836,7 +1836,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1850,7 +1850,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1877,7 +1877,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1892,7 +1892,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1907,7 +1907,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1922,7 +1922,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1937,7 +1937,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1955,7 +1955,7 @@
/* error check */
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAERR("v4 attrib's specified for v6 rule\n");
+ IPAERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
@@ -1967,7 +1967,7 @@
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1980,7 +1980,7 @@
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1993,7 +1993,7 @@
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2007,7 +2007,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2021,7 +2021,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2035,11 +2035,11 @@
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2053,11 +2053,11 @@
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2071,7 +2071,7 @@
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2097,7 +2097,7 @@
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2129,7 +2129,7 @@
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2174,7 +2174,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2189,7 +2189,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2204,7 +2204,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2219,7 +2219,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2234,7 +2234,7 @@
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2247,7 +2247,7 @@
}
} else {
- IPAERR("unsupported ip %d\n", ip);
+ IPAERR_RL("unsupported ip %d\n", ip);
return -EPERM;
}
@@ -2257,7 +2257,7 @@
*/
if (attrib->attrib_mask == 0) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -3548,19 +3548,19 @@
}
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3574,7 +3574,7 @@
ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
@@ -5086,6 +5086,8 @@
api_ctrl->ipa_tear_down_uc_offload_pipes =
ipa2_tear_down_uc_offload_pipes;
api_ctrl->ipa_get_pdev = ipa2_get_pdev;
+ api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB;
+ api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 29766fb..9c75202 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1877,7 +1877,9 @@
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(ipa_rm_q6_workqueue);
+
+ if (ipa_rm_q6_workqueue)
+ destroy_workqueue(ipa_rm_q6_workqueue);
}
static void wake_tx_queue(struct work_struct *work)
@@ -2186,7 +2188,10 @@
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- q6_deinitialize_rm();
+
+ if (!atomic_read(&is_ssr))
+ q6_deinitialize_rm();
+
q6_init_err:
free_netdev(ipa_netdevs[0]);
ipa_netdevs[0] = NULL;
@@ -2616,6 +2621,9 @@
if (!data->set_quota)
ipa_qmi_stop_data_qouta();
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
index = find_vchannel_name_index(data->interface_name);
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name,
@@ -2853,6 +2861,10 @@
kfree(req);
kfree(resp);
return rc;
+ } else if (data == NULL) {
+ kfree(req);
+ kfree(resp);
+ return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -3034,8 +3046,11 @@
int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
+ struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3053,7 +3068,7 @@
} else {
IPAWANDBG(" reset modem-backhaul stats\n");
rc = rmnet_ipa_query_tethering_stats_modem(
- NULL, true);
+ &tether_stats, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index a4faaea..e3f8d45 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
+ ipa_hw_stats.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 85bfe95..4d4e993 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -235,6 +235,10 @@
static void ipa3_post_init_wq(struct work_struct *work);
static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
+static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
+ ipa_dec_clients_disable_clks_on_wq);
+
static struct ipa3_plat_drv_res ipa3_res = {0, };
struct msm_bus_scale_pdata *ipa3_bus_scale_table;
@@ -262,7 +266,9 @@
int cnt = 0;
int start_idx;
int end_idx;
+ unsigned long flags;
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
@@ -273,6 +279,8 @@
.log_buffer[i]);
cnt += nbytes;
}
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
return cnt;
}
@@ -282,7 +290,9 @@
int i;
struct ipa3_active_client_htable_entry *iterator;
int cnt = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
iterator, list) {
@@ -314,7 +324,9 @@
}
cnt += scnprintf(buf + cnt, size - cnt,
"\nTotal active clients count: %d\n",
- ipa3_ctx->ipa3_active_clients.cnt);
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
return cnt;
}
@@ -322,11 +334,11 @@
static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
- ipa3_active_clients_lock();
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
ipa3_active_clients_log_print_table(active_clients_table_buf,
IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
IPAERR("%s", active_clients_table_buf);
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return NOTIFY_DONE;
}
@@ -364,6 +376,7 @@
{
int i;
+ spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
@@ -395,20 +408,28 @@
void ipa3_active_clients_log_clear(void)
{
- ipa3_active_clients_lock();
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
ipa3_ctx->ipa3_active_clients_logging.log_tail =
IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
- ipa3_active_clients_unlock();
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
}
static void ipa3_active_clients_log_destroy(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
ipa3_ctx->ipa3_active_clients_logging.log_tail =
IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
}
enum ipa_smmu_cb_type {
@@ -538,7 +559,7 @@
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
+ IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
kfree(wan_msg);
return retval;
}
@@ -546,6 +567,90 @@
return 0;
}
+static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ if (!buff) {
+ IPAERR("Null buffer\n");
+ return;
+ }
+
+ if (type != ADD_VLAN_IFACE &&
+ type != DEL_VLAN_IFACE &&
+ type != ADD_L2TP_VLAN_MAPPING &&
+ type != DEL_L2TP_VLAN_MAPPING) {
+ IPAERR("Wrong type given. buff %pK type %d\n", buff, type);
+ return;
+ }
+
+ kfree(buff);
+}
+
+static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
+{
+ int retval;
+ struct ipa_ioc_vlan_iface_info *vlan_info;
+ struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info;
+ struct ipa_msg_meta msg_meta;
+
+ if (msg_type == ADD_VLAN_IFACE ||
+ msg_type == DEL_VLAN_IFACE) {
+ vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info),
+ GFP_KERNEL);
+ if (!vlan_info) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param,
+ sizeof(struct ipa_ioc_vlan_iface_info))) {
+ kfree(vlan_info);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info);
+ retval = ipa3_send_msg(&msg_meta, vlan_info,
+ ipa3_vlan_l2tp_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d\n", retval);
+ kfree(vlan_info);
+ return retval;
+ }
+ } else if (msg_type == ADD_L2TP_VLAN_MAPPING ||
+ msg_type == DEL_L2TP_VLAN_MAPPING) {
+ mapping_info = kzalloc(sizeof(struct
+ ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL);
+ if (!mapping_info) {
+ IPAERR("no memory\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user((u8 *)mapping_info,
+ (void __user *)usr_param,
+ sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) {
+ kfree(mapping_info);
+ return -EFAULT;
+ }
+
+ memset(&msg_meta, 0, sizeof(msg_meta));
+ msg_meta.msg_type = msg_type;
+ msg_meta.msg_len = sizeof(struct
+ ipa_ioc_l2tp_vlan_mapping_info);
+ retval = ipa3_send_msg(&msg_meta, mapping_info,
+ ipa3_vlan_l2tp_msg_free_cb);
+ if (retval) {
+ IPAERR("ipa3_send_msg failed: %d\n", retval);
+ kfree(mapping_info);
+ return retval;
+ }
+ } else {
+ IPAERR("Unexpected event\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
@@ -631,7 +736,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -661,7 +766,7 @@
retval = -EFAULT;
break;
}
- if (ipa4_nat_mdfy_pdn(&mdfy_pdn)) {
+ if (ipa3_nat_mdfy_pdn(&mdfy_pdn)) {
retval = -EFAULT;
break;
}
@@ -690,7 +795,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -729,7 +834,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -769,7 +874,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -809,7 +914,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules,
pre_entry);
@@ -851,7 +956,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -891,7 +996,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -930,7 +1035,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -972,7 +1077,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules,
pre_entry);
@@ -1013,7 +1118,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -1053,7 +1158,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1191,7 +1296,7 @@
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1236,7 +1341,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1281,7 +1386,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1319,7 +1424,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1459,7 +1564,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1498,7 +1603,7 @@
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
@@ -1530,6 +1635,34 @@
}
break;
+ case IPA_IOC_ADD_VLAN_IFACE:
+ if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_VLAN_IFACE:
+ if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_L2TP_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_L2TP_VLAN_MAPPING:
+ if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
default: /* redundant, as cmd was checked against MAXNR */
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
@@ -3155,7 +3288,6 @@
}
ipa3_uc_notify_clk_state(true);
- ipa3_suspend_apps_pipes(false);
}
static unsigned int ipa3_get_bus_vote(void)
@@ -3287,7 +3419,10 @@
struct ipa3_active_client_htable_entry *hfound;
u32 hkey;
char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+ unsigned long flags;
+ spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+ int_ctx = true;
hfound = NULL;
memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
@@ -3307,6 +3442,9 @@
int_ctx ? GFP_ATOMIC : GFP_KERNEL);
if (hentry == NULL) {
IPAERR("failed allocating active clients hash entry");
+ spin_unlock_irqrestore(
+ &ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
return;
}
hentry->type = id->type;
@@ -3331,6 +3469,8 @@
id->id_string, id->file, id->line);
ipa3_active_clients_log_insert(temp_str);
}
+ spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+ flags);
}
void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
@@ -3354,13 +3494,33 @@
*/
void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
{
- ipa3_active_clients_lock();
+ int ret;
+
ipa3_active_clients_log_inc(id, false);
- ipa3_ctx->ipa3_active_clients.cnt++;
- if (ipa3_ctx->ipa3_active_clients.cnt == 1)
- ipa3_enable_clks();
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
- ipa3_active_clients_unlock();
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
+ }
+
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+
+ /* somebody might voted to clocks meanwhile */
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
+ }
+
+ ipa3_enable_clks();
+ atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ ipa3_suspend_apps_pipes(false);
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
}
/**
@@ -3374,23 +3534,57 @@
int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id)
{
- int res = 0;
- unsigned long flags;
+ int ret;
- if (ipa3_active_clients_trylock(&flags) == 0)
- return -EPERM;
-
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- res = -EPERM;
- goto bail;
+ ret = atomic_inc_not_zero(&ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret) {
+ ipa3_active_clients_log_inc(id, true);
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return 0;
}
- ipa3_active_clients_log_inc(id, true);
- ipa3_ctx->ipa3_active_clients.cnt++;
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
-bail:
- ipa3_active_clients_trylock_unlock(&flags);
- return res;
+ return -EPERM;
+}
+
+static void __ipa3_dec_client_disable_clks(void)
+{
+ int ret;
+
+ if (!atomic_read(&ipa3_ctx->ipa3_active_clients.cnt)) {
+ IPAERR("trying to disable clocks with refcnt is 0!\n");
+ ipa_assert();
+ return;
+ }
+
+ ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+ if (ret)
+ goto bail;
+
+ /* seems like this is the only client holding the clocks */
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
+ ipa3_ctx->tag_process_before_gating) {
+ ipa3_ctx->tag_process_before_gating = false;
+ /*
+ * When TAG process ends, active clients will be
+ * decreased
+ */
+ queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+ goto unlock_mutex;
+ }
+
+ /* a different context might increase the clock reference meanwhile */
+ ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
+ if (ret > 0)
+ goto unlock_mutex;
+ ipa3_disable_clks();
+
+unlock_mutex:
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+bail:
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
}
/**
@@ -3406,29 +3600,39 @@
*/
void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
{
- struct ipa_active_client_logging_info log_info;
-
- ipa3_active_clients_lock();
ipa3_active_clients_log_dec(id, false);
- ipa3_ctx->ipa3_active_clients.cnt--;
- IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- if (ipa3_ctx->tag_process_before_gating) {
- ipa3_ctx->tag_process_before_gating = false;
- /*
- * When TAG process ends, active clients will be
- * decreased
- */
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
- "TAG_PROCESS");
- ipa3_active_clients_log_inc(&log_info, false);
- ipa3_ctx->ipa3_active_clients.cnt = 1;
- queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
- } else {
- ipa3_disable_clks();
- }
+ __ipa3_dec_client_disable_clks();
+}
+
+static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work)
+{
+ __ipa3_dec_client_disable_clks();
+}
+
+/**
+ * ipa3_dec_client_disable_clks_no_block() - Decrease active clients counter
+ * if possible without blocking. If this is the last client then the desrease
+ * will happen from work queue context.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks_no_block(
+ struct ipa_active_client_logging_info *id)
+{
+ int ret;
+
+ ipa3_active_clients_log_dec(id, true);
+ ret = atomic_add_unless(&ipa3_ctx->ipa3_active_clients.cnt, -1, 1);
+ if (ret) {
+ IPADBG_LOW("active clients = %d\n",
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+ return;
}
- ipa3_active_clients_unlock();
+
+ /* seems like this is the only client holding the clocks */
+ queue_work(ipa3_ctx->power_mgmt_wq,
+ &ipa_dec_clients_disable_clks_on_wq_work);
}
/**
@@ -3524,34 +3728,20 @@
return 0;
}
- ipa3_active_clients_lock();
+ /* Hold the mutex to avoid race conditions with ipa3_enable_clocks() */
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
ipa3_ctx->curr_ipa_clk_rate = clk_rate;
IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
- if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
- struct ipa_active_client_logging_info log_info;
-
- /*
- * clk_set_rate should be called with unlocked lock to allow
- * clients to get a reference to IPA clock synchronously.
- * Hold a reference to IPA clock here to make sure clock
- * state does not change during set_rate.
- */
- IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
- ipa3_ctx->ipa3_active_clients.cnt++;
- ipa3_active_clients_log_inc(&log_info, false);
- ipa3_active_clients_unlock();
-
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
if (ipa3_clk)
clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
- ipa3_get_bus_vote()))
+ ipa3_get_bus_vote()))
WARN_ON(1);
- /* remove the vote added here */
- IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
} else {
IPADBG_LOW("clocks are gated, not setting rate\n");
- ipa3_active_clients_unlock();
}
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
IPADBG_LOW("Done\n");
return 0;
@@ -3757,14 +3947,17 @@
int i;
struct ipa3_flt_tbl *flt_tbl;
+ idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+ idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i))
continue;
flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
- idr_destroy(&flt_tbl->rule_ids);
+ flt_tbl->rule_ids = NULL;
flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
- idr_destroy(&flt_tbl->rule_ids);
+ flt_tbl->rule_ids = NULL;
}
}
@@ -3939,6 +4132,7 @@
struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
struct ipa3_flt_tbl *flt_tbl;
int i;
+ struct idr *idr;
if (ipa3_ctx == NULL) {
IPADBG("IPA driver haven't initialized\n");
@@ -3962,6 +4156,11 @@
/* Assign resource limitation to each group */
ipa3_set_resorce_groups_min_max_limits();
+ idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+ idr_init(idr);
+ idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+ idr_init(idr);
+
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i))
continue;
@@ -3972,7 +4171,7 @@
!ipa3_ctx->ip4_flt_tbl_hash_lcl;
flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
- idr_init(&flt_tbl->rule_ids);
+ flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
@@ -3980,7 +4179,7 @@
!ipa3_ctx->ip6_flt_tbl_hash_lcl;
flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
- idr_init(&flt_tbl->rule_ids);
+ flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
}
if (!ipa3_ctx->apply_rg10_wa) {
@@ -3998,7 +4197,8 @@
* IPAv3.5 and above requires to disable prefetch for USB in order
* to allow MBIM to work, currently MBIM is not needed in MHI mode.
*/
- if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
+ if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5
+ && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
(!ipa3_ctx->ipa_config_is_mhi))
ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
@@ -4064,6 +4264,12 @@
else
IPADBG(":ntn init ok\n");
+ result = ipa_hw_stats_init();
+ if (result)
+ IPAERR("fail to init stats %d\n", result);
+ else
+ IPADBG(":stats init ok\n");
+
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
@@ -4512,10 +4718,9 @@
}
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
- spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
ipa3_active_clients_log_inc(&log_info, false);
- ipa3_ctx->ipa3_active_clients.cnt = 1;
+ atomic_set(&ipa3_ctx->ipa3_active_clients.cnt, 1);
/* Create workqueues for power management */
ipa3_ctx->power_mgmt_wq =
@@ -4625,12 +4830,16 @@
hdr_proc_ctx_tbl.head_free_offset_list[i]);
}
INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+ idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+ idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ idr_init(&rset->rule_ids);
rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+ idr_init(&rset->rule_ids);
INIT_LIST_HEAD(&ipa3_ctx->intf_list);
INIT_LIST_HEAD(&ipa3_ctx->msg_list);
@@ -4750,6 +4959,12 @@
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
fail_alloc_chrdev_region:
+ rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+ idr_destroy(&rset->rule_ids);
+ rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+ idr_destroy(&rset->rule_ids);
+ idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
+ idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
ipa3_free_dma_task_for_gsi();
fail_dma_task:
idr_destroy(&ipa3_ctx->ipa_idr);
@@ -5405,6 +5620,10 @@
struct device_node *node = dev->of_node;
int res;
+ if (ipa3_ctx == NULL) {
+ IPAERR("ipa3_ctx was not initialized\n");
+ return -ENXIO;
+ }
IPADBG("node->name=%s\n", node->name);
if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
res = of_get_gpio(node, 0);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 2d08767..1af968e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -644,6 +644,12 @@
}
gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client);
+ if (gsi_ep_cfg_ptr == NULL) {
+ IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n");
+ result = -EFAULT;
+ goto write_evt_scratch_fail;
+ }
+
params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 2a7b977..9486b0a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -64,6 +64,10 @@
__stringify(IPA_QUOTA_REACH),
__stringify(IPA_SSR_BEFORE_SHUTDOWN),
__stringify(IPA_SSR_AFTER_POWERUP),
+ __stringify(ADD_VLAN_IFACE),
+ __stringify(DEL_VLAN_IFACE),
+ __stringify(ADD_L2TP_VLAN_MAPPING),
+ __stringify(DEL_L2TP_VLAN_MAPPING)
};
const char *ipa3_hdr_l2_type_name[] = {
@@ -78,6 +82,8 @@
__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+ __stringify(IPA_HDR_PROC_L2TP_HEADER_ADD),
+ __stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE),
};
static struct dentry *dent;
@@ -258,6 +264,7 @@
return scnprintf(
dbg_buff, IPA_MAX_MSG_LEN,
"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+ "IPA_ENDP_INIT_CONN_TRACK_n%u=0x%x\n"
"IPA_ENDP_INIT_HDR_%u=0x%x\n"
"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
"IPA_ENDP_INIT_MODE_%u=0x%x\n"
@@ -268,6 +275,7 @@
"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
"IPA_ENDP_INIT_CFG_%u=0x%x\n",
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CONN_TRACK_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
@@ -355,14 +363,14 @@
{
int nbytes;
- ipa3_active_clients_lock();
- if (ipa3_ctx->ipa3_active_clients.cnt)
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+ if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt))
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA APPS power state is ON\n");
else
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA APPS power state is OFF\n");
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -524,7 +532,8 @@
}
if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
- (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
+ (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
}
@@ -1055,7 +1064,7 @@
ipa3_ctx->stats.stat_compl,
ipa3_ctx->stats.aggr_close,
ipa3_ctx->stats.wan_aggr_close,
- ipa3_ctx->ipa3_active_clients.cnt,
+ atomic_read(&ipa3_ctx->ipa3_active_clients.cnt),
connect,
ipa3_ctx->stats.wan_rx_empty,
ipa3_ctx->stats.wan_repl_rx_empty,
@@ -1248,8 +1257,6 @@
if (!ipa3_get_ntn_stats(&stats)) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"TX num_pkts_processed=%u\n"
- "TX tail_ptr_val=%u\n"
- "TX num_db_fired=%u\n"
"TX ringFull=%u\n"
"TX ringEmpty=%u\n"
"TX ringUsageHigh=%u\n"
@@ -1261,27 +1268,25 @@
"TX bamFifoUsageLow=%u\n"
"TX bamUtilCount=%u\n"
"TX num_db=%u\n"
- "TX num_qmb_int_handled=%u\n",
+ "TX num_qmb_int_handled=%u\n"
+ "TX ipa_pipe_number=%u\n",
TX_STATS(num_pkts_processed),
- TX_STATS(tail_ptr_val),
- TX_STATS(num_db_fired),
- TX_STATS(tx_comp_ring_stats.ringFull),
- TX_STATS(tx_comp_ring_stats.ringEmpty),
- TX_STATS(tx_comp_ring_stats.ringUsageHigh),
- TX_STATS(tx_comp_ring_stats.ringUsageLow),
- TX_STATS(tx_comp_ring_stats.RingUtilCount),
- TX_STATS(bam_stats.bamFifoFull),
- TX_STATS(bam_stats.bamFifoEmpty),
- TX_STATS(bam_stats.bamFifoUsageHigh),
- TX_STATS(bam_stats.bamFifoUsageLow),
- TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(ring_stats.ringFull),
+ TX_STATS(ring_stats.ringEmpty),
+ TX_STATS(ring_stats.ringUsageHigh),
+ TX_STATS(ring_stats.ringUsageLow),
+ TX_STATS(ring_stats.RingUtilCount),
+ TX_STATS(gsi_stats.bamFifoFull),
+ TX_STATS(gsi_stats.bamFifoEmpty),
+ TX_STATS(gsi_stats.bamFifoUsageHigh),
+ TX_STATS(gsi_stats.bamFifoUsageLow),
+ TX_STATS(gsi_stats.bamUtilCount),
TX_STATS(num_db),
- TX_STATS(num_qmb_int_handled));
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(ipa_pipe_number));
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
- "RX max_outstanding_pkts=%u\n"
"RX num_pkts_processed=%u\n"
- "RX rx_ring_rp_value=%u\n"
"RX ringFull=%u\n"
"RX ringEmpty=%u\n"
"RX ringUsageHigh=%u\n"
@@ -1292,21 +1297,23 @@
"RX bamFifoUsageHigh=%u\n"
"RX bamFifoUsageLow=%u\n"
"RX bamUtilCount=%u\n"
- "RX num_db=%u\n",
- RX_STATS(max_outstanding_pkts),
+ "RX num_db=%u\n"
+ "RX num_qmb_int_handled=%u\n"
+ "RX ipa_pipe_number=%u\n",
RX_STATS(num_pkts_processed),
- RX_STATS(rx_ring_rp_value),
- RX_STATS(rx_ind_ring_stats.ringFull),
- RX_STATS(rx_ind_ring_stats.ringEmpty),
- RX_STATS(rx_ind_ring_stats.ringUsageHigh),
- RX_STATS(rx_ind_ring_stats.ringUsageLow),
- RX_STATS(rx_ind_ring_stats.RingUtilCount),
- RX_STATS(bam_stats.bamFifoFull),
- RX_STATS(bam_stats.bamFifoEmpty),
- RX_STATS(bam_stats.bamFifoUsageHigh),
- RX_STATS(bam_stats.bamFifoUsageLow),
- RX_STATS(bam_stats.bamUtilCount),
- RX_STATS(num_db));
+ RX_STATS(ring_stats.ringFull),
+ RX_STATS(ring_stats.ringEmpty),
+ RX_STATS(ring_stats.ringUsageHigh),
+ RX_STATS(ring_stats.ringUsageLow),
+ RX_STATS(ring_stats.RingUtilCount),
+ RX_STATS(gsi_stats.bamFifoFull),
+ RX_STATS(gsi_stats.bamFifoEmpty),
+ RX_STATS(gsi_stats.bamFifoUsageHigh),
+ RX_STATS(gsi_stats.bamFifoUsageLow),
+ RX_STATS(gsi_stats.bamUtilCount),
+ RX_STATS(num_db),
+ RX_STATS(num_qmb_int_handled),
+ RX_STATS(ipa_pipe_number));
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -1776,12 +1783,12 @@
return 0;
}
memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
- ipa3_active_clients_lock();
+ mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
table_size = ipa3_active_clients_log_print_table(active_clients_buf
+ cnt, IPA_MAX_MSG_LEN);
- ipa3_active_clients_unlock();
+ mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
return simple_read_from_buffer(ubuf, count, ppos,
active_clients_buf, cnt + table_size);
@@ -2162,6 +2169,8 @@
goto fail;
}
+ ipa_debugfs_init_stats(dent);
+
return;
fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 915f2b8..02c5991 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -759,6 +759,15 @@
trace_idle_sleep_enter3(sys->ep->client);
usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
trace_idle_sleep_exit3(sys->ep->client);
+
+ /*
+ * if pipe is out of buffers there is no point polling for
+ * completed descs; release the worker so delayed work can
+ * run in a timely manner
+ */
+ if (sys->len - sys->len_pending_xfer == 0)
+ break;
+
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr3(sys->ep->client);
@@ -775,8 +784,8 @@
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
if (sys->ep->napi_enabled) {
- ipa3_rx_switch_to_intr_mode(sys);
- IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ /* interrupt mode is done in ipa3_rx_poll context */
+ ipa_assert();
} else
ipa3_handle_rx(sys);
}
@@ -1549,6 +1558,8 @@
struct ipa3_rx_pkt_wrapper *rx_pkt;
struct ipa3_rx_pkt_wrapper *tmp;
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
list_for_each_entry_safe(rx_pkt, tmp,
&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
list_del(&rx_pkt->link);
@@ -1569,6 +1580,8 @@
IPAERR("wlan comm buff total cnt: %d\n",
ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
}
static void ipa3_alloc_wlan_rx_common_cache(u32 size)
@@ -1606,11 +1619,13 @@
goto fail_dma_mapping;
}
+ spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
list_add_tail(&rx_pkt->link,
&ipa3_ctx->wc_memb.wlan_comm_desc_list);
rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+ spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
}
@@ -1836,6 +1851,8 @@
struct gsi_xfer_elem gsi_xfer_elem_one;
u32 curr;
+ spin_lock_bh(&sys->spinlock);
+
rx_len_cached = sys->len;
curr = atomic_read(&sys->repl.head_idx);
@@ -1878,6 +1895,7 @@
mb();
atomic_set(&sys->repl.head_idx, curr);
}
+ spin_unlock_bh(&sys->spinlock);
queue_work(sys->repl_wq, &sys->repl_work);
@@ -2556,6 +2574,7 @@
WARN_ON(1);
return;
}
+ spin_lock_bh(&sys->spinlock);
rx_pkt_expected = list_first_entry(&sys->head_desc_list,
struct ipa3_rx_pkt_wrapper,
link);
@@ -2563,6 +2582,7 @@
sys->len--;
if (size)
rx_pkt_expected->len = size;
+ spin_unlock_bh(&sys->spinlock);
rx_skb = rx_pkt_expected->data.skb;
dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
sys->rx_buff_sz, DMA_FROM_DEVICE);
@@ -3252,6 +3272,7 @@
{
struct ipa3_sys_context *sys;
struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+ int clk_off;
if (!notify) {
IPAERR("gsi notify is NULL.\n");
@@ -3283,7 +3304,20 @@
GSI_CHAN_MODE_POLL);
ipa3_inc_acquire_wakelock();
atomic_set(&sys->curr_polling_state, 1);
- queue_work(sys->wq, &sys->work);
+ if (sys->ep->napi_enabled) {
+ struct ipa_active_client_logging_info log;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
+ clk_off = ipa3_inc_client_enable_clks_no_block(
+ &log);
+ if (!clk_off)
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ else
+ queue_work(sys->wq, &sys->work);
+ } else {
+ queue_work(sys->wq, &sys->work);
+ }
}
break;
default:
@@ -3651,6 +3685,9 @@
int cnt = 0;
struct ipa_mem_buffer mem_info = {0};
static int total_cnt;
+ struct ipa_active_client_logging_info log;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -3663,6 +3700,7 @@
while (cnt < weight &&
atomic_read(&ep->sys->curr_polling_state)) {
+ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
if (ret)
break;
@@ -3680,7 +3718,8 @@
if (cnt < weight) {
ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
- queue_work(ep->sys->wq, &ep->sys->switch_to_intr_work.work);
+ ipa3_rx_switch_to_intr_mode(ep->sys);
+ ipa3_dec_client_disable_clks_no_block(&log);
}
return cnt;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index bfcaa2b..beca549 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -741,7 +741,7 @@
goto error;
}
- if ((*rt_tbl)->cookie != IPA_COOKIE) {
+ if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
IPAERR("RT table cookie is invalid\n");
goto error;
}
@@ -800,13 +800,13 @@
}
INIT_LIST_HEAD(&((*entry)->link));
(*entry)->rule = *rule;
- (*entry)->cookie = IPA_COOKIE;
+ (*entry)->cookie = IPA_FLT_COOKIE;
(*entry)->rt_tbl = rt_tbl;
(*entry)->tbl = tbl;
if (rule->rule_id) {
id = rule->rule_id;
} else {
- id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ id = ipa3_alloc_rule_id(tbl->rule_ids);
if (id < 0) {
IPAERR("failed to allocate rule id\n");
WARN_ON(1);
@@ -835,12 +835,18 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
+ipa_insert_failed:
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ tbl->rule_cnt--;
+ return -EPERM;
}
static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
@@ -866,9 +872,16 @@
list_add(&entry->link, &tbl->head_flt_rule_list);
}
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
error:
return -EPERM;
@@ -887,7 +900,7 @@
goto error;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule,
rule_hdl);
goto error;
}
@@ -900,7 +913,8 @@
list_add(&entry->link, &((*add_after_entry)->link));
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
/*
* prepare for next insertion
@@ -909,6 +923,13 @@
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
error:
*add_after_entry = NULL;
return -EPERM;
@@ -921,12 +942,12 @@
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -940,7 +961,7 @@
entry->cookie = 0;
/* if rule id was allocated from idr, remove it */
if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
- idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ idr_remove(entry->tbl->rule_ids, entry->rule_id);
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
@@ -958,12 +979,12 @@
entry = ipa3_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -973,25 +994,25 @@
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1036,7 +1057,7 @@
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
@@ -1066,7 +1087,7 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1081,7 +1102,7 @@
result = -1;
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1089,7 +1110,7 @@
}
if (rules->global) {
- IPAERR("no support for global filter rules\n");
+ IPAERR_RL("no support for global filter rules\n");
result = -EPERM;
goto bail;
}
@@ -1124,12 +1145,12 @@
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
if (rules->ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms ep=%d\n", rules->ep);
+ IPAERR_RL("bad parms ep=%d\n", rules->ep);
return -EINVAL;
}
@@ -1144,20 +1165,20 @@
entry = ipa3_id_find(rules->add_after_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given entry does not match the table\n");
+ IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
goto bail;
}
if (tbl->sticky_rear)
if (&entry->link == tbl->head_flt_rule_list.prev) {
- IPAERR("cannot add rule at end of a sticky table");
+ IPAERR_RL("cannot add rule at end of a sticky table");
result = -EINVAL;
goto bail;
}
@@ -1179,7 +1200,7 @@
&entry);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1213,14 +1234,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del flt rule %i\n", i);
+ IPAERR_RL("failed to del flt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1253,14 +1274,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy flt rule %i\n", i);
+ IPAERR_RL("failed to mdfy flt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1294,7 +1315,7 @@
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1330,7 +1351,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1353,7 +1374,7 @@
entry->rt_tbl->ref_cnt--;
/* if rule id was allocated from idr, remove it */
if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
- idr_remove(&entry->tbl->rule_ids,
+ idr_remove(entry->tbl->rule_ids,
entry->rule_id);
entry->cookie = 0;
id = entry->id;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 6e51472..cc29f8f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -13,7 +13,7 @@
#include "ipa_i.h"
#include "ipahal/ipahal.h"
-static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64};
static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
#define HDR_TYPE_IS_VALID(type) \
@@ -66,19 +66,37 @@
{
struct ipa3_hdr_proc_ctx_entry *entry;
int ret;
+ int ep;
list_for_each_entry(entry,
&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
link) {
IPADBG_LOW("processing type %d ofst=%d\n",
entry->type, entry->offset_entry->offset);
+
+ if (entry->l2tp_params.is_dst_pipe_valid) {
+ ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
+ if (ep >= 0) {
+ entry->l2tp_params.hdr_remove_param.
+ hdr_ofst_pkt_size_valid = ipa3_ctx->
+ ep[ep].cfg.hdr.hdr_ofst_pkt_size_valid;
+ entry->l2tp_params.hdr_remove_param.
+ hdr_ofst_pkt_size = ipa3_ctx->ep[ep].
+ cfg.hdr.hdr_ofst_pkt_size;
+ entry->l2tp_params.hdr_remove_param.
+ hdr_endianness = ipa3_ctx->ep[ep].
+ cfg.hdr_ext.hdr_little_endian ? 0 : 1;
+ }
+ }
+
ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
entry->offset_entry->offset,
entry->hdr->hdr_len,
entry->hdr->is_hdr_proc_ctx,
entry->hdr->phys_base,
hdr_base_addr,
- entry->hdr->offset_entry);
+ entry->hdr->offset_entry,
+ entry->l2tp_params);
if (ret)
return ret;
}
@@ -326,17 +344,17 @@
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
if (!hdr_entry) {
- IPAERR("hdr_hdl is invalid\n");
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
- if (hdr_entry->cookie != IPA_COOKIE) {
- IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+ if (hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
WARN_ON(1);
return -EINVAL;
}
@@ -353,9 +371,10 @@
entry->type = proc_ctx->type;
entry->hdr = hdr_entry;
+ entry->l2tp_params = proc_ctx->l2tp_params;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
@@ -365,7 +384,7 @@
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -375,7 +394,7 @@
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -413,6 +432,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -420,6 +440,13 @@
return 0;
+ipa_insert_failed:
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -432,19 +459,19 @@
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa3_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
int id;
int mem_size;
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -463,7 +490,7 @@
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -476,7 +503,7 @@
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -542,6 +569,7 @@
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -566,10 +594,19 @@
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa3_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
@@ -587,8 +624,8 @@
struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
entry = ipa3_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -596,7 +633,7 @@
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -634,12 +671,12 @@
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -652,7 +689,7 @@
entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -701,7 +738,7 @@
int result = -EFAULT;
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -710,7 +747,7 @@
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -746,14 +783,14 @@
int result = -EFAULT;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -801,7 +838,7 @@
int result = -EFAULT;
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -810,7 +847,7 @@
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -848,14 +885,14 @@
int result;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1062,7 +1099,7 @@
struct ipa3_hdr_entry *entry;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Header name too long: %s\n", name);
+ IPAERR_RL("Header name too long: %s\n", name);
return NULL;
}
@@ -1092,7 +1129,7 @@
int result = -1;
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
@@ -1179,13 +1216,13 @@
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1214,7 +1251,7 @@
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
new file mode 100644
index 0000000..d8785ed
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -0,0 +1,1973 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_hw_stats.h"
+
+#define IPA_CLIENT_BIT_32(client) \
+ ((ipa3_get_ep_mapping(client) >= 0 && \
+ ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \
+ (1 << ipa3_get_ep_mapping(client)) : 0)
+
+int ipa_hw_stats_init(void)
+{
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ return 0;
+
+ /* initialize stats here */
+ ipa3_ctx->hw_stats.enabled = true;
+ return 0;
+}
+
+int ipa_init_quota_stats(u32 pipe_bitmask)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_register_write quota_base = {0};
+ struct ipahal_imm_cmd_pyld *quota_base_pyld;
+ struct ipahal_imm_cmd_register_write quota_mask = {0};
+ struct ipahal_imm_cmd_pyld *quota_mask_pyld;
+ struct ipa3_desc desc[3] = { {0} };
+ dma_addr_t dma_address;
+ int ret;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ /* reset driver's cache */
+ memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota));
+ ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask;
+ IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+ pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA,
+ &ipa3_ctx->hw_stats.quota.init, false);
+ if (!pyld) {
+ IPAERR("failed to generate pyld\n");
+ return -EPERM;
+ }
+
+ if (pyld->len > IPA_MEM_PART(stats_quota_size)) {
+ IPAERR("SRAM partition too small: %d needed %d\n",
+ IPA_MEM_PART(stats_quota_size), pyld->len);
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ dma_address = dma_map_single(ipa3_ctx->pdev,
+ pyld->data,
+ pyld->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+ IPAERR("failed to DMA map\n");
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ /* setting the registers and init the stats pyld are done atomically */
+ quota_mask.skip_pipeline_clear = false;
+ quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n,
+ ipa3_ctx->ee);
+ quota_mask.value = pipe_bitmask;
+ quota_mask.value_mask = ~0;
+ quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ "a_mask, false);
+ if (!quota_mask_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ desc[0].opcode = quota_mask_pyld->opcode;
+ desc[0].pyld = quota_mask_pyld->data;
+ desc[0].len = quota_mask_pyld->len;
+ desc[0].type = IPA_IMM_CMD_DESC;
+
+ quota_base.skip_pipeline_clear = false;
+ quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
+ ipa3_ctx->ee);
+ quota_base.value = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_quota_ofst);
+ quota_base.value_mask = ~0;
+ quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ "a_base, false);
+ if (!quota_base_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_quota_mask;
+ }
+ desc[1].opcode = quota_base_pyld->opcode;
+ desc[1].pyld = quota_base_pyld->data;
+ desc[1].len = quota_base_pyld->len;
+ desc[1].type = IPA_IMM_CMD_DESC;
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = pyld->len;
+ cmd.system_addr = dma_address;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_quota_ofst);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_quota_base;
+ }
+ desc[2].opcode = cmd_pyld->opcode;
+ desc[2].pyld = cmd_pyld->data;
+ desc[2].len = cmd_pyld->len;
+ desc[2].type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(3, desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ ret = 0;
+
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_quota_base:
+ ipahal_destroy_imm_cmd(quota_base_pyld);
+destroy_quota_mask:
+ ipahal_destroy_imm_cmd(quota_mask_pyld);
+unmap:
+ dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+ ipahal_destroy_stats_init_pyld(pyld);
+ return ret;
+}
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
+{
+ int i;
+ int ret;
+ struct ipahal_stats_get_offset_quota get_offset = { { 0 } };
+ struct ipahal_stats_offset offset = { 0 };
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa_mem_buffer mem;
+ struct ipa3_desc desc = { 0 };
+ struct ipahal_stats_quota_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ get_offset.init = ipa3_ctx->hw_stats.quota.init;
+ ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset,
+ &offset);
+ if (ret) {
+ IPAERR("failed to get offset from hal %d\n", ret);
+ return ret;
+ }
+
+ IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+ mem.size = offset.size;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ mem.size,
+ &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA memory");
+ return ret;
+ }
+
+ cmd.is_read = true;
+ cmd.clear_after_read = true;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_quota_ofst) + offset.offset;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto free_dma_mem;
+ }
+ desc.opcode = cmd_pyld->opcode;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(1, &desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ IPADBG("failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto destroy_imm;
+ }
+
+ ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA,
+ &ipa3_ctx->hw_stats.quota.init, mem.base, stats);
+ if (ret) {
+ IPAERR("failed to parse stats (error %d)\n", ret);
+ goto free_stats;
+ }
+
+ /*
+ * update driver cache.
+ * the stats were read from hardware with clear_after_read meaning
+ * hardware stats are 0 now
+ */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ int ep_idx = ipa3_get_ep_mapping(i);
+
+ if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+ continue;
+
+ if (ipa3_ctx->ep[ep_idx].client != i)
+ continue;
+
+ ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes +=
+ stats->stats[ep_idx].num_ipv4_bytes;
+ ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts +=
+ stats->stats[ep_idx].num_ipv4_pkts;
+ ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes +=
+ stats->stats[ep_idx].num_ipv6_bytes;
+ ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts +=
+ stats->stats[ep_idx].num_ipv6_pkts;
+ }
+
+ /* copy results to out parameter */
+ if (out)
+ *out = ipa3_ctx->hw_stats.quota.stats;
+ ret = 0;
+free_stats:
+ kfree(stats);
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return ret;
+
+}
+
+int ipa_reset_quota_stats(enum ipa_client_type client)
+{
+ int ret;
+ struct ipa_quota_stats *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (client >= IPA_CLIENT_MAX) {
+ IPAERR("invalid client %d\n", client);
+ return -EINVAL;
+ }
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_quota_stats(NULL);
+ if (ret) {
+ IPAERR("ipa_get_quota_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ stats = &ipa3_ctx->hw_stats.quota.stats.client[client];
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+}
+
+int ipa_reset_all_quota_stats(void)
+{
+ int ret;
+ struct ipa_quota_stats_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_quota_stats(NULL);
+ if (ret) {
+ IPAERR("ipa_get_quota_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ stats = &ipa3_ctx->hw_stats.quota.stats;
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+}
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_register_write teth_base = {0};
+ struct ipahal_imm_cmd_pyld *teth_base_pyld;
+ struct ipahal_imm_cmd_register_write teth_mask = { 0 };
+ struct ipahal_imm_cmd_pyld *teth_mask_pyld;
+ struct ipa3_desc desc[3] = { {0} };
+ dma_addr_t dma_address;
+ int ret;
+ int i;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (!in || !in->prod_mask) {
+ IPAERR("invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) {
+ if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) {
+ IPAERR("prod %d doesn't have cons\n", i);
+ return -EINVAL;
+ }
+ }
+ IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask);
+
+ /* reset driver's cache */
+ memset(&ipa3_ctx->hw_stats.teth.init, 0,
+ sizeof(ipa3_ctx->hw_stats.teth.init));
+ for (i = 0; i < IPA_CLIENT_MAX; i++)
+ memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
+ sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
+ ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask;
+ memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask,
+ sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask));
+
+
+ pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING,
+ &ipa3_ctx->hw_stats.teth.init, false);
+ if (!pyld) {
+ IPAERR("failed to generate pyld\n");
+ return -EPERM;
+ }
+
+ if (pyld->len > IPA_MEM_PART(stats_tethering_size)) {
+ IPAERR("SRAM partition too small: %d needed %d\n",
+ IPA_MEM_PART(stats_tethering_size), pyld->len);
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ dma_address = dma_map_single(ipa3_ctx->pdev,
+ pyld->data,
+ pyld->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+ IPAERR("failed to DMA map\n");
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ /* setting the registers and init the stats pyld are done atomically */
+ teth_mask.skip_pipeline_clear = false;
+ teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n,
+ ipa3_ctx->ee);
+ teth_mask.value = in->prod_mask;
+ teth_mask.value_mask = ~0;
+ teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &teth_mask, false);
+ if (!teth_mask_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ desc[0].opcode = teth_mask_pyld->opcode;
+ desc[0].pyld = teth_mask_pyld->data;
+ desc[0].len = teth_mask_pyld->len;
+ desc[0].type = IPA_IMM_CMD_DESC;
+
+ teth_base.skip_pipeline_clear = false;
+ teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n,
+ ipa3_ctx->ee);
+ teth_base.value = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_tethering_ofst);
+ teth_base.value_mask = ~0;
+ teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &teth_base, false);
+ if (!teth_base_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_teth_mask;
+ }
+ desc[1].opcode = teth_base_pyld->opcode;
+ desc[1].pyld = teth_base_pyld->data;
+ desc[1].len = teth_base_pyld->len;
+ desc[1].type = IPA_IMM_CMD_DESC;
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = pyld->len;
+ cmd.system_addr = dma_address;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_tethering_ofst);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_teth_base;
+ }
+ desc[2].opcode = cmd_pyld->opcode;
+ desc[2].pyld = cmd_pyld->data;
+ desc[2].len = cmd_pyld->len;
+ desc[2].type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(3, desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ ret = 0;
+
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_teth_base:
+ ipahal_destroy_imm_cmd(teth_base_pyld);
+destroy_teth_mask:
+ ipahal_destroy_imm_cmd(teth_mask_pyld);
+unmap:
+ dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+ ipahal_destroy_stats_init_pyld(pyld);
+ return ret;
+}
+
+int ipa_get_teth_stats(enum ipa_client_type prod,
+ struct ipa_quota_stats_all *out)
+{
+ int i, j;
+ int ret;
+ struct ipahal_stats_get_offset_tethering get_offset = { { 0 } };
+ struct ipahal_stats_offset offset = {0};
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa_mem_buffer mem;
+ struct ipa3_desc desc = { 0 };
+ struct ipahal_stats_tethering_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) {
+ IPAERR("invalid prod %d\n", prod);
+ return -EINVAL;
+ }
+
+ get_offset.init = ipa3_ctx->hw_stats.teth.init;
+ ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset,
+ &offset);
+ if (ret) {
+ IPAERR("failed to get offset from hal %d\n", ret);
+ return ret;
+ }
+
+ IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+ mem.size = offset.size;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ mem.size,
+ &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA memory\n");
+ return ret;
+ }
+
+ cmd.is_read = true;
+ cmd.clear_after_read = true;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_tethering_ofst) + offset.offset;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto free_dma_mem;
+ }
+ desc.opcode = cmd_pyld->opcode;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(1, &desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ IPADBG("failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto destroy_imm;
+ }
+
+ ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING,
+ &ipa3_ctx->hw_stats.teth.init, mem.base, stats);
+ if (ret) {
+ IPAERR("failed to parse stats (error %d)\n", ret);
+ goto free_stats;
+ }
+
+ /*
+ * update driver cache.
+ * the stats were read from hardware with clear_after_read meaning
+ * hardware stats are 0 now
+ */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ for (j = 0; j < IPA_CLIENT_MAX; j++) {
+ struct ipa_hw_stats_teth *sw_stats =
+ &ipa3_ctx->hw_stats.teth;
+ int prod_idx = ipa3_get_ep_mapping(i);
+ int cons_idx = ipa3_get_ep_mapping(j);
+
+ if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES)
+ continue;
+
+ if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES)
+ continue;
+
+ if (ipa3_ctx->ep[prod_idx].client != i ||
+ ipa3_ctx->ep[cons_idx].client != j)
+ continue;
+
+ sw_stats->prod_stats[i].client[j].num_ipv4_bytes +=
+ stats->stats[prod_idx][cons_idx].num_ipv4_bytes;
+ sw_stats->prod_stats[i].client[j].num_ipv4_pkts +=
+ stats->stats[prod_idx][cons_idx].num_ipv4_pkts;
+ sw_stats->prod_stats[i].client[j].num_ipv6_bytes +=
+ stats->stats[prod_idx][cons_idx].num_ipv6_bytes;
+ sw_stats->prod_stats[i].client[j].num_ipv6_pkts +=
+ stats->stats[prod_idx][cons_idx].num_ipv6_pkts;
+ }
+ }
+
+ if (!out) {
+ ret = 0;
+ goto free_stats;
+ }
+
+ /* copy results to out parameter */
+ *out = ipa3_ctx->hw_stats.teth.prod_stats[prod];
+
+ ret = 0;
+free_stats:
+ kfree(stats);
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return ret;
+
+}
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons)
+{
+ int ret;
+ struct ipa_quota_stats *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (!IPA_CLIENT_IS_PROD(prod) || IPA_CLIENT_IS_CONS(cons) == -1) {
+ IPAERR("invalid prod %d or cons %d\n", prod, cons);
+ return -EINVAL;
+ }
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_teth_stats(prod, NULL);
+ if (ret) {
+ IPAERR("ipa_get_teth_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[cons];
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+}
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod)
+{
+ int ret;
+ int i;
+ struct ipa_quota_stats *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (!IPA_CLIENT_IS_PROD(prod)) {
+ IPAERR("invalid prod %d\n", prod);
+ return -EINVAL;
+ }
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_teth_stats(prod, NULL);
+ if (ret) {
+ IPAERR("ipa_get_teth_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[i];
+ memset(stats, 0, sizeof(*stats));
+ }
+
+ return 0;
+}
+
+int ipa_reset_all_teth_stats(void)
+{
+ int i;
+ int ret;
+ struct ipa_quota_stats_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ /* reading stats will reset them in hardware */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) {
+ ret = ipa_get_teth_stats(i, NULL);
+ if (ret) {
+ IPAERR("ipa_get_teth_stats failed %d\n", ret);
+ return ret;
+ }
+ /* a single iteration will reset all hardware stats */
+ break;
+ }
+ }
+
+ /* reset driver's cache */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ stats = &ipa3_ctx->hw_stats.teth.prod_stats[i];
+ memset(stats, 0, sizeof(*stats));
+ }
+
+ return 0;
+}
+
+int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
+ u16 rule_id)
+{
+ int rule_idx, rule_bit;
+ u32 *bmsk_ptr;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip < 0 || ip >= IPA_IP_MAX) {
+ IPAERR("wrong ip type %d\n", ip);
+ return -EINVAL;
+ }
+
+ rule_idx = rule_id / 32;
+ rule_bit = rule_id % 32;
+
+ if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
+ IPAERR("invalid rule_id %d\n", rule_id);
+ return -EINVAL;
+ }
+
+ if (ip == IPA_IP_v4 && filtering)
+ bmsk_ptr =
+ ipa3_ctx->hw_stats.flt_rt.flt_v4_init.rule_id_bitmask;
+ else if (ip == IPA_IP_v4)
+ bmsk_ptr =
+ ipa3_ctx->hw_stats.flt_rt.rt_v4_init.rule_id_bitmask;
+ else if (ip == IPA_IP_v6 && filtering)
+ bmsk_ptr =
+ ipa3_ctx->hw_stats.flt_rt.flt_v6_init.rule_id_bitmask;
+ else
+ bmsk_ptr =
+ ipa3_ctx->hw_stats.flt_rt.rt_v6_init.rule_id_bitmask;
+
+ bmsk_ptr[rule_idx] |= (1 << rule_bit);
+
+ return 0;
+}
+
+int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ int smem_ofst, smem_size, stats_base, start_id_ofst, end_id_ofst;
+ int start_id, end_id;
+ struct ipahal_stats_init_flt_rt *init;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_register_write flt_rt_base = {0};
+ struct ipahal_imm_cmd_pyld *flt_rt_base_pyld;
+ struct ipahal_imm_cmd_register_write flt_rt_start_id = {0};
+ struct ipahal_imm_cmd_pyld *flt_rt_start_id_pyld;
+ struct ipahal_imm_cmd_register_write flt_rt_end_id = { 0 };
+ struct ipahal_imm_cmd_pyld *flt_rt_end_id_pyld;
+ struct ipa3_desc desc[4] = { {0} };
+ dma_addr_t dma_address;
+ int ret;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip == IPA_IP_v4 && filtering) {
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+ smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
+ smem_size = IPA_MEM_PART(stats_flt_v4_size);
+ stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
+ start_id_ofst =
+ ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_START_ID);
+ end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_END_ID);
+ } else if (ip == IPA_IP_v4) {
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+ smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
+ smem_size = IPA_MEM_PART(stats_rt_v4_size);
+ stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
+ start_id_ofst =
+ ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_START_ID);
+ end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_END_ID);
+ } else if (ip == IPA_IP_v6 && filtering) {
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+ smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
+ smem_size = IPA_MEM_PART(stats_flt_v6_size);
+ stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
+ start_id_ofst =
+ ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_START_ID);
+ end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_END_ID);
+ } else {
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+ smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
+ smem_size = IPA_MEM_PART(stats_rt_v6_size);
+ stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
+ start_id_ofst =
+ ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_START_ID);
+ end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_END_ID);
+ }
+
+ for (start_id = 0; start_id < IPAHAL_MAX_RULE_ID_32; start_id++) {
+ if (init->rule_id_bitmask[start_id])
+ break;
+ }
+
+ if (start_id == IPAHAL_MAX_RULE_ID_32) {
+ IPAERR("empty rule ids\n");
+ return -EINVAL;
+ }
+
+ /* every rule_id_bitmask contains 32 rules */
+ start_id *= 32;
+
+ for (end_id = IPAHAL_MAX_RULE_ID_32 - 1; end_id >= 0; end_id--) {
+ if (init->rule_id_bitmask[end_id])
+ break;
+ }
+ end_id = (end_id + 1) * 32 - 1;
+
+ pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR, init,
+ false);
+ if (!pyld) {
+ IPAERR("failed to generate pyld\n");
+ return -EPERM;
+ }
+
+ if (pyld->len > smem_size) {
+ IPAERR("SRAM partition too small: %d needed %d\n",
+ smem_size, pyld->len);
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ dma_address = dma_map_single(ipa3_ctx->pdev,
+ pyld->data,
+ pyld->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+ IPAERR("failed to DMA map\n");
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ /* setting the registers and init the stats pyld are done atomically */
+ flt_rt_start_id.skip_pipeline_clear = false;
+ flt_rt_start_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ flt_rt_start_id.offset = start_id_ofst;
+ flt_rt_start_id.value = start_id;
+ flt_rt_start_id.value_mask = 0x3FF;
+ flt_rt_start_id_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_start_id, false);
+ if (!flt_rt_start_id_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ desc[0].opcode = flt_rt_start_id_pyld->opcode;
+ desc[0].pyld = flt_rt_start_id_pyld->data;
+ desc[0].len = flt_rt_start_id_pyld->len;
+ desc[0].type = IPA_IMM_CMD_DESC;
+
+ flt_rt_end_id.skip_pipeline_clear = false;
+ flt_rt_end_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ flt_rt_end_id.offset = end_id_ofst;
+ flt_rt_end_id.value = end_id;
+ flt_rt_end_id.value_mask = 0x3FF;
+ flt_rt_end_id_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_end_id, false);
+ if (!flt_rt_end_id_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_flt_rt_start_id;
+ }
+ desc[1].opcode = flt_rt_end_id_pyld->opcode;
+ desc[1].pyld = flt_rt_end_id_pyld->data;
+ desc[1].len = flt_rt_end_id_pyld->len;
+ desc[1].type = IPA_IMM_CMD_DESC;
+
+ flt_rt_base.skip_pipeline_clear = false;
+ flt_rt_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ flt_rt_base.offset = stats_base;
+ flt_rt_base.value = ipa3_ctx->smem_restricted_bytes +
+ smem_ofst;
+ flt_rt_base.value_mask = ~0;
+ flt_rt_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &flt_rt_base, false);
+ if (!flt_rt_base_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_flt_rt_end_id;
+ }
+ desc[2].opcode = flt_rt_base_pyld->opcode;
+ desc[2].pyld = flt_rt_base_pyld->data;
+ desc[2].len = flt_rt_base_pyld->len;
+ desc[2].type = IPA_IMM_CMD_DESC;
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = pyld->len;
+ cmd.system_addr = dma_address;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ smem_ofst;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_flt_rt_base;
+ }
+ desc[3].opcode = cmd_pyld->opcode;
+ desc[3].pyld = cmd_pyld->data;
+ desc[3].len = cmd_pyld->len;
+ desc[3].type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(4, desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ ret = 0;
+
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_flt_rt_base:
+ ipahal_destroy_imm_cmd(flt_rt_base_pyld);
+destroy_flt_rt_end_id:
+ ipahal_destroy_imm_cmd(flt_rt_end_id_pyld);
+destroy_flt_rt_start_id:
+ ipahal_destroy_imm_cmd(flt_rt_start_id_pyld);
+unmap:
+ dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+ ipahal_destroy_stats_init_pyld(pyld);
+ return ret;
+}
+
+int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering)
+{
+ struct ipahal_stats_init_flt_rt *init;
+ int i;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip < 0 || ip >= IPA_IP_MAX) {
+ IPAERR("wrong ip type %d\n", ip);
+ return -EINVAL;
+ }
+
+ if (ip == IPA_IP_v4 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+ else if (ip == IPA_IP_v4)
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+ else if (ip == IPA_IP_v6 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+ else
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
+ init->rule_id_bitmask[i] = 0;
+
+ return 0;
+}
+
+static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
+ u16 rule_id, struct ipa_flt_rt_stats *out)
+{
+ int ret;
+ int smem_ofst;
+ bool clear = false;
+ struct ipahal_stats_get_offset_flt_rt *get_offset;
+ struct ipahal_stats_offset offset = { 0 };
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa_mem_buffer mem;
+ struct ipa3_desc desc = { 0 };
+ struct ipahal_stats_flt_rt stats;
+
+ if (rule_id >= IPAHAL_MAX_RULE_ID_32 * 32) {
+ IPAERR("invalid rule_id %d\n", rule_id);
+ return -EINVAL;
+ }
+
+ if (out == NULL)
+ clear = true;
+
+ get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
+ if (!get_offset) {
+ IPADBG("no mem\n");
+ return -ENOMEM;
+ }
+
+ if (ip == IPA_IP_v4 && filtering) {
+ get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+ smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
+ } else if (ip == IPA_IP_v4) {
+ get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+ smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
+ } else if (ip == IPA_IP_v6 && filtering) {
+ get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+ smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
+ } else {
+ get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+ smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
+ }
+
+ get_offset->rule_id = rule_id;
+
+ ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
+ &offset);
+ if (ret) {
+ IPAERR("failed to get offset from hal %d\n", ret);
+ goto free_offset;
+ }
+
+ IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+ mem.size = offset.size;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ mem.size,
+ &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA memory\n");
+ goto free_offset;
+ }
+
+ cmd.is_read = true;
+ cmd.clear_after_read = clear;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ smem_ofst + offset.offset;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto free_dma_mem;
+ }
+ desc.opcode = cmd_pyld->opcode;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(1, &desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR,
+ &get_offset->init, mem.base, &stats);
+ if (ret) {
+ IPAERR("failed to parse stats (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ if (out) {
+ out->num_pkts = stats.num_packets;
+ out->num_pkts_hash = stats.num_packets_hash;
+ }
+
+ ret = 0;
+
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+free_offset:
+ kfree(get_offset);
+ return ret;
+
+}
+
+
+int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
+ struct ipa_flt_rt_stats *out)
+{
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip < 0 || ip >= IPA_IP_MAX) {
+ IPAERR("wrong ip type %d\n", ip);
+ return -EINVAL;
+ }
+
+ return __ipa_get_flt_rt_stats(ip, filtering, rule_id, out);
+}
+
+int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id)
+{
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip < 0 || ip >= IPA_IP_MAX) {
+ IPAERR("wrong ip type %d\n", ip);
+ return -EINVAL;
+ }
+
+ return __ipa_get_flt_rt_stats(ip, filtering, rule_id, NULL);
+}
+
+int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering)
+{
+ struct ipahal_stats_init_flt_rt *init;
+ int i;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ if (ip < 0 || ip >= IPA_IP_MAX) {
+ IPAERR("wrong ip type %d\n", ip);
+ return -EINVAL;
+ }
+
+ if (ip == IPA_IP_v4 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+ else if (ip == IPA_IP_v4)
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+ else if (ip == IPA_IP_v6 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+ else
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
+ int idx = i / 32;
+ int bit = i % 32;
+
+ if (init->rule_id_bitmask[idx] & (1 << bit))
+ __ipa_get_flt_rt_stats(ip, filtering, i, NULL);
+ }
+
+ return 0;
+}
+
+int ipa_init_drop_stats(u32 pipe_bitmask)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_register_write drop_base = {0};
+ struct ipahal_imm_cmd_pyld *drop_base_pyld;
+ struct ipahal_imm_cmd_register_write drop_mask = {0};
+ struct ipahal_imm_cmd_pyld *drop_mask_pyld;
+ struct ipa3_desc desc[3] = { {0} };
+ dma_addr_t dma_address;
+ int ret;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ /* reset driver's cache */
+ memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop));
+ ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask;
+ IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+ pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP,
+ &ipa3_ctx->hw_stats.drop.init, false);
+ if (!pyld) {
+ IPAERR("failed to generate pyld\n");
+ return -EPERM;
+ }
+
+ if (pyld->len > IPA_MEM_PART(stats_drop_size)) {
+ IPAERR("SRAM partition too small: %d needed %d\n",
+ IPA_MEM_PART(stats_drop_size), pyld->len);
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ dma_address = dma_map_single(ipa3_ctx->pdev,
+ pyld->data,
+ pyld->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+ IPAERR("failed to DMA map\n");
+ ret = -EPERM;
+ goto destroy_init_pyld;
+ }
+
+ /* setting the registers and init the stats pyld are done atomically */
+ drop_mask.skip_pipeline_clear = false;
+ drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n,
+ ipa3_ctx->ee);
+ drop_mask.value = pipe_bitmask;
+ drop_mask.value_mask = ~0;
+ drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &drop_mask, false);
+ if (!drop_mask_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ desc[0].opcode = drop_mask_pyld->opcode;
+ desc[0].pyld = drop_mask_pyld->data;
+ desc[0].len = drop_mask_pyld->len;
+ desc[0].type = IPA_IMM_CMD_DESC;
+
+ drop_base.skip_pipeline_clear = false;
+ drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n,
+ ipa3_ctx->ee);
+ drop_base.value = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_drop_ofst);
+ drop_base.value_mask = ~0;
+ drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &drop_base, false);
+ if (!drop_base_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_drop_mask;
+ }
+ desc[1].opcode = drop_base_pyld->opcode;
+ desc[1].pyld = drop_base_pyld->data;
+ desc[1].len = drop_base_pyld->len;
+ desc[1].type = IPA_IMM_CMD_DESC;
+
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = pyld->len;
+ cmd.system_addr = dma_address;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_drop_ofst);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto destroy_drop_base;
+ }
+ desc[2].opcode = cmd_pyld->opcode;
+ desc[2].pyld = cmd_pyld->data;
+ desc[2].len = cmd_pyld->len;
+ desc[2].type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(3, desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ ret = 0;
+
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_drop_base:
+ ipahal_destroy_imm_cmd(drop_base_pyld);
+destroy_drop_mask:
+ ipahal_destroy_imm_cmd(drop_mask_pyld);
+unmap:
+ dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+ ipahal_destroy_stats_init_pyld(pyld);
+ return ret;
+}
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
+{
+ int i;
+ int ret;
+ struct ipahal_stats_get_offset_drop get_offset = { { 0 } };
+ struct ipahal_stats_offset offset = { 0 };
+ struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipa_mem_buffer mem;
+ struct ipa3_desc desc = { 0 };
+ struct ipahal_stats_drop_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ get_offset.init = ipa3_ctx->hw_stats.drop.init;
+ ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset,
+ &offset);
+ if (ret) {
+ IPAERR("failed to get offset from hal %d\n", ret);
+ return ret;
+ }
+
+ IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+ mem.size = offset.size;
+ mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+ mem.size,
+ &mem.phys_base,
+ GFP_KERNEL);
+ if (!mem.base) {
+ IPAERR("fail to alloc DMA memory\n");
+ return ret;
+ }
+
+ cmd.is_read = true;
+ cmd.clear_after_read = true;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(stats_drop_ofst) + offset.offset;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ ret = -ENOMEM;
+ goto free_dma_mem;
+ }
+ desc.opcode = cmd_pyld->opcode;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
+ desc.type = IPA_IMM_CMD_DESC;
+
+ ret = ipa3_send_cmd(1, &desc);
+ if (ret) {
+ IPAERR("failed to send immediate command (error %d)\n", ret);
+ goto destroy_imm;
+ }
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ IPADBG("failed to alloc memory\n");
+ ret = -ENOMEM;
+ goto destroy_imm;
+ }
+
+ ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP,
+ &ipa3_ctx->hw_stats.drop.init, mem.base, stats);
+ if (ret) {
+ IPAERR("failed to parse stats (error %d)\n", ret);
+ goto free_stats;
+ }
+
+ /*
+ * update driver cache.
+ * the stats were read from hardware with clear_after_read meaning
+ * hardware stats are 0 now
+ */
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ int ep_idx = ipa3_get_ep_mapping(i);
+
+ if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+ continue;
+
+ if (ipa3_ctx->ep[ep_idx].client != i)
+ continue;
+
+ ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt +=
+ stats->stats[ep_idx].drop_byte_cnt;
+ ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt +=
+ stats->stats[ep_idx].drop_packet_cnt;
+ }
+
+
+ if (!out) {
+ ret = 0;
+ goto free_stats;
+ }
+
+ /* copy results to out parameter */
+ *out = ipa3_ctx->hw_stats.drop.stats;
+
+ ret = 0;
+free_stats:
+ kfree(stats);
+destroy_imm:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+ dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ return ret;
+
+}
+
+int ipa_reset_drop_stats(enum ipa_client_type client)
+{
+ int ret;
+ struct ipa_drop_stats *stats;
+
+ if (client >= IPA_CLIENT_MAX) {
+ IPAERR("invalid client %d\n", client);
+ return -EINVAL;
+ }
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_drop_stats(NULL);
+ if (ret) {
+ IPAERR("ipa_get_drop_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ stats = &ipa3_ctx->hw_stats.drop.stats.client[client];
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+}
+
+int ipa_reset_all_drop_stats(void)
+{
+ int ret;
+ struct ipa_drop_stats_all *stats;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ /* reading stats will reset them in hardware */
+ ret = ipa_get_drop_stats(NULL);
+ if (ret) {
+ IPAERR("ipa_get_drop_stats failed %d\n", ret);
+ return ret;
+ }
+
+ /* reset driver's cache */
+ stats = &ipa3_ctx->hw_stats.drop.stats;
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+}
+
+
+#ifndef CONFIG_DEBUG_FS
+int ipa_debugfs_init_stats(struct dentry *parent) { return 0; }
+#else
+#define IPA_MAX_MSG_LEN 4096
+static char dbg_buff[IPA_MAX_MSG_LEN];
+
+static ssize_t ipa_debugfs_reset_quota_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 client = 0;
+ int ret;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (sizeof(dbg_buff) < count + 1) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &client)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (client == -1)
+ ipa_reset_all_quota_stats();
+ else
+ ipa_reset_quota_stats(client);
+
+ ret = count;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+static ssize_t ipa_debugfs_print_quota_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipa_quota_stats_all *out;
+ int i;
+ int res;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ mutex_lock(&ipa3_ctx->lock);
+ res = ipa_get_quota_stats(out);
+ if (res) {
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+ return res;
+ }
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ int ep_idx = ipa3_get_ep_mapping(i);
+
+ if (ep_idx == -1)
+ continue;
+
+ if (IPA_CLIENT_IS_TEST(i))
+ continue;
+
+ if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask &
+ (1 << ep_idx)))
+ continue;
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "%s:\n",
+ ipa_clients_strings[i]);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv4_bytes=%llu\n",
+ out->client[i].num_ipv4_bytes);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv6_bytes=%llu\n",
+ out->client[i].num_ipv6_bytes);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv4_pkts=%u\n",
+ out->client[i].num_ipv4_pkts);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv6_pkts=%u\n",
+ out->client[i].num_ipv6_pkts);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "\n");
+
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 client = 0;
+ int ret;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (sizeof(dbg_buff) < count + 1) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &client)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (client == -1)
+ ipa_reset_all_teth_stats();
+ else
+ ipa_reset_all_cons_teth_stats(client);
+
+ ret = count;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipa_quota_stats_all *out;
+ int i, j;
+ int res;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ int ep_idx = ipa3_get_ep_mapping(i);
+
+ if (ep_idx == -1)
+ continue;
+
+ if (!IPA_CLIENT_IS_PROD(i))
+ continue;
+
+ if (IPA_CLIENT_IS_TEST(i))
+ continue;
+
+ if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask &
+ (1 << ep_idx)))
+ continue;
+
+ res = ipa_get_teth_stats(i, out);
+ if (res) {
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+ return res;
+ }
+
+ for (j = 0; j < IPA_CLIENT_MAX; j++) {
+ int cons_idx = ipa3_get_ep_mapping(j);
+
+ if (cons_idx == -1)
+ continue;
+
+ if (IPA_CLIENT_IS_TEST(j))
+ continue;
+
+ if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx]
+ & (1 << cons_idx)))
+ continue;
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "%s->%s:\n",
+ ipa_clients_strings[i],
+ ipa_clients_strings[j]);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv4_bytes=%llu\n",
+ out->client[j].num_ipv4_bytes);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv6_bytes=%llu\n",
+ out->client[j].num_ipv6_bytes);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv4_pkts=%u\n",
+ out->client[j].num_ipv4_pkts);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_ipv6_pkts=%u\n",
+ out->client[j].num_ipv6_pkts);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "\n");
+ }
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip,
+ bool filtering, struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ u16 rule_id = 0;
+ int ret;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (sizeof(dbg_buff) < count + 1) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dbg_buff[count] = '\0';
+ if (strcmp(dbg_buff, "start\n") == 0) {
+ ipa_flt_rt_stats_start(ip, filtering);
+ } else if (strcmp(dbg_buff, "clear\n") == 0) {
+ ipa_flt_rt_stats_clear_rule_ids(ip, filtering);
+ } else if (strcmp(dbg_buff, "reset\n") == 0) {
+ ipa_reset_all_flt_rt_stats(ip, filtering);
+ } else {
+ if (kstrtou16(dbg_buff, 0, &rule_id)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ ipa_flt_rt_stats_add_rule_id(ip, filtering, rule_id);
+ }
+
+ ret = count;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+static ssize_t ipa_debugfs_print_flt_rt_stats(enum ipa_ip_type ip,
+ bool filtering, struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipahal_stats_init_flt_rt *init;
+ struct ipa_flt_rt_stats out;
+ int i;
+ int res;
+
+ if (ip == IPA_IP_v4 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+ else if (ip == IPA_IP_v4)
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+ else if (ip == IPA_IP_v6 && filtering)
+ init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+ else
+ init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
+ int idx = i / 32;
+ int bit = i % 32;
+
+ if (init->rule_id_bitmask[idx] & (1 << bit)) {
+ res = ipa_get_flt_rt_stats(ip, filtering, i, &out);
+ if (res) {
+ mutex_unlock(&ipa3_ctx->lock);
+ return res;
+ }
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "rule_id: %d\n", i);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_pkts: %d\n",
+ out.num_pkts);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "num_pkts_hash: %d\n",
+ out.num_pkts_hash);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "\n");
+ }
+ }
+
+ mutex_unlock(&ipa3_ctx->lock);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_drop_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 client = 0;
+ int ret;
+
+ mutex_lock(&ipa3_ctx->lock);
+ if (sizeof(dbg_buff) < count + 1) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &client)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ if (client == -1)
+ ipa_reset_all_drop_stats();
+ else
+ ipa_reset_drop_stats(client);
+
+ ret = count;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return count;
+}
+
+static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+ struct ipa_drop_stats_all *out;
+ int i;
+ int res;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ mutex_lock(&ipa3_ctx->lock);
+ res = ipa_get_drop_stats(out);
+ if (res) {
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+ return res;
+ }
+
+ for (i = 0; i < IPA_CLIENT_MAX; i++) {
+ int ep_idx = ipa3_get_ep_mapping(i);
+
+ if (ep_idx == -1)
+ continue;
+
+ if (!IPA_CLIENT_IS_CONS(i))
+ continue;
+
+ if (IPA_CLIENT_IS_TEST(i))
+ continue;
+
+ if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask &
+ (1 << ep_idx)))
+ continue;
+
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "%s:\n",
+ ipa_clients_strings[i]);
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "drop_byte_cnt=%u\n",
+ out->client[i].drop_byte_cnt);
+
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "drop_packet_cnt=%u\n",
+ out->client[i].drop_packet_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ IPA_MAX_MSG_LEN - nbytes,
+ "\n");
+ }
+ mutex_unlock(&ipa3_ctx->lock);
+ kfree(out);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_control_flt_v4_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_flt_v6_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_rt_v4_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_rt_v6_stats(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_flt_v4_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_flt_v6_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_rt_v4_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
+ count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_rt_v6_stats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
+ count, ppos);
+}
+
+static const struct file_operations ipa3_quota_ops = {
+ .read = ipa_debugfs_print_quota_stats,
+ .write = ipa_debugfs_reset_quota_stats,
+};
+
+static const struct file_operations ipa3_tethering_ops = {
+ .read = ipa_debugfs_print_tethering_stats,
+ .write = ipa_debugfs_reset_tethering_stats,
+};
+
+static const struct file_operations ipa3_flt_v4_ops = {
+ .read = ipa_debugfs_print_flt_v4_stats,
+ .write = ipa_debugfs_control_flt_v4_stats,
+};
+
+static const struct file_operations ipa3_flt_v6_ops = {
+ .read = ipa_debugfs_print_flt_v6_stats,
+ .write = ipa_debugfs_control_flt_v6_stats,
+};
+
+static const struct file_operations ipa3_rt_v4_ops = {
+ .read = ipa_debugfs_print_rt_v4_stats,
+ .write = ipa_debugfs_control_rt_v4_stats,
+};
+
+static const struct file_operations ipa3_rt_v6_ops = {
+ .read = ipa_debugfs_print_rt_v6_stats,
+ .write = ipa_debugfs_control_rt_v6_stats,
+};
+
+static const struct file_operations ipa3_drop_ops = {
+ .read = ipa_debugfs_print_drop_stats,
+ .write = ipa_debugfs_reset_drop_stats,
+};
+
+
+int ipa_debugfs_init_stats(struct dentry *parent)
+{
+ const mode_t read_write_mode = 0664;
+ struct dentry *file;
+ struct dentry *dent;
+
+ if (!ipa3_ctx->hw_stats.enabled)
+ return 0;
+
+ dent = debugfs_create_dir("hw_stats", parent);
+ if (IS_ERR_OR_NULL(dent)) {
+ IPAERR("fail to create folder in debug_fs\n");
+ return -EFAULT;
+ }
+
+ file = debugfs_create_file("quota", read_write_mode, dent, NULL,
+ &ipa3_quota_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "quota");
+ goto fail;
+ }
+
+ file = debugfs_create_file("drop", read_write_mode, dent, NULL,
+ &ipa3_drop_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "drop");
+ goto fail;
+ }
+
+ file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
+ &ipa3_tethering_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "tethering");
+ goto fail;
+ }
+
+ file = debugfs_create_file("flt_v4", read_write_mode, dent, NULL,
+ &ipa3_flt_v4_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "flt_v4");
+ goto fail;
+ }
+
+ file = debugfs_create_file("flt_v6", read_write_mode, dent, NULL,
+ &ipa3_flt_v6_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "flt_v6");
+ goto fail;
+ }
+
+ file = debugfs_create_file("rt_v4", read_write_mode, dent, NULL,
+ &ipa3_rt_v4_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "rt_v4");
+ goto fail;
+ }
+
+ file = debugfs_create_file("rt_v6", read_write_mode, dent, NULL,
+ &ipa3_rt_v6_ops);
+ if (IS_ERR_OR_NULL(file)) {
+ IPAERR("fail to create file %s\n", "rt_v6");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ debugfs_remove_recursive(dent);
+ return -EFAULT;
+}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index c6d5c6e..43fd4d1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -33,12 +33,19 @@
#include "ipahal/ipahal_reg.h"
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
#include "../ipa_common_i.h"
#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
#define MTU_BYTE 1500
#define IPA_EP_NOT_ALLOCATED (-1)
@@ -94,6 +101,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -169,6 +188,7 @@
};
struct ipa3_active_clients_log_ctx {
+ spinlock_t lock;
char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
int log_head;
int log_tail;
@@ -207,8 +227,8 @@
*/
struct ipa3_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa3_flt_tbl *tbl;
struct ipa3_rt_tbl *rt_tbl;
u32 hw_len;
@@ -232,23 +252,23 @@
* @curr_mem: current routing tables block in sys memory
* @prev_mem: previous routing table block in sys memory
* @id: routing table id
- * @rule_ids: idr structure that holds the rule_id for each rule
+ * @rule_ids: common idr structure that holds the rule_id for each rule
*/
struct ipa3_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa3_rt_tbl_set *set;
- u32 cookie;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
int id;
- struct idr rule_ids;
+ struct idr *rule_ids;
};
/**
@@ -274,6 +294,7 @@
*/
struct ipa3_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -283,7 +304,6 @@
dma_addr_t phys_base;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -322,7 +342,8 @@
/**
* struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
* @link: entry's link in global header table entries list
- * @type:
+ * @type: header processing context type
+ * @l2tp_params: L2TP parameters
* @offset_entry: entry's offset
* @hdr: the header
* @cookie: cookie used for validity check
@@ -332,10 +353,11 @@
*/
struct ipa3_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa3_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -368,7 +390,7 @@
* @end: the last header index
* @curr_mem: current filter tables block in sys memory
* @prev_mem: previous filter table block in sys memory
- * @rule_ids: idr structure that holds the rule_id for each rule
+ * @rule_ids: common idr structure that holds the rule_id for each rule
*/
struct ipa3_flt_tbl {
struct list_head head_flt_rule_list;
@@ -378,7 +400,7 @@
struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
bool sticky_rear;
- struct idr rule_ids;
+ struct idr *rule_ids;
};
/**
@@ -397,8 +419,8 @@
*/
struct ipa3_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa3_rt_tbl *tbl;
struct ipa3_hdr_entry *hdr;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
@@ -412,10 +434,12 @@
* struct ipa3_rt_tbl_set - collection of routing tables
* @head_rt_tbl_list: collection of routing tables
* @tbl_cnt: number of routing tables
+ * @rule_ids: idr structure that holds the rule_id for each rule
*/
struct ipa3_rt_tbl_set {
struct list_head head_rt_tbl_list;
u32 tbl_cnt;
+ struct idr rule_ids;
};
/**
@@ -842,9 +866,7 @@
struct ipa3_active_clients {
struct mutex mutex;
- spinlock_t spinlock;
- bool mutex_locked;
- int cnt;
+ atomic_t cnt;
};
struct ipa3_wakelock_ref_cnt {
@@ -1016,6 +1038,56 @@
struct ipahal_imm_cmd_pyld *cmd_pyld;
};
+struct ipa_quota_stats {
+ u64 num_ipv4_bytes;
+ u64 num_ipv6_bytes;
+ u32 num_ipv4_pkts;
+ u32 num_ipv6_pkts;
+};
+
+struct ipa_quota_stats_all {
+ struct ipa_quota_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_drop_stats {
+ u32 drop_packet_cnt;
+ u32 drop_byte_cnt;
+};
+
+struct ipa_drop_stats_all {
+ struct ipa_drop_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_quota {
+ struct ipahal_stats_init_quota init;
+ struct ipa_quota_stats_all stats;
+};
+
+struct ipa_hw_stats_teth {
+ struct ipahal_stats_init_tethering init;
+ struct ipa_quota_stats_all prod_stats[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_flt_rt {
+ struct ipahal_stats_init_flt_rt flt_v4_init;
+ struct ipahal_stats_init_flt_rt flt_v6_init;
+ struct ipahal_stats_init_flt_rt rt_v4_init;
+ struct ipahal_stats_init_flt_rt rt_v6_init;
+};
+
+struct ipa_hw_stats_drop {
+ struct ipahal_stats_init_drop init;
+ struct ipa_drop_stats_all stats;
+};
+
+struct ipa_hw_stats {
+ bool enabled;
+ struct ipa_hw_stats_quota quota;
+ struct ipa_hw_stats_teth teth;
+ struct ipa_hw_stats_flt_rt flt_rt;
+ struct ipa_hw_stats_drop drop;
+};
+
/**
* struct ipa3_context - IPA context
* @class: pointer to the struct class
@@ -1029,6 +1101,7 @@
* @ep_flt_num: End-points supporting filtering number
* @resume_on_connect: resume ep on ipa connect
* @flt_tbl: list of all IPA filter tables
+ * @flt_rule_ids: idr structure that holds the rule_id for each rule
* @mode: IPA operating mode
* @mmio: iomem
* @ipa_wrapper_base: IPA wrapper base address
@@ -1114,6 +1187,7 @@
u32 ep_flt_num;
bool resume_on_connect[IPA_CLIENT_MAX];
struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+ struct idr flt_rule_ids[IPA_IP_MAX];
void __iomem *mmio;
u32 ipa_wrapper_base;
u32 ipa_wrapper_size;
@@ -1228,6 +1302,7 @@
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
struct ipa_dma_task_info dma_task_info;
+ struct ipa_hw_stats hw_stats;
};
struct ipa3_plat_drv_res {
@@ -1340,6 +1415,48 @@
* +-------------------------+
* | CANARY |
* +-------------------------+
+ * | QUOTA STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | TETH STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 FLT STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 FLT STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V4 RT STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | V6 RT STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | DROP STATS |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
+ * | CANARY |
+ * +-------------------------+
* | MODEM MEM |
* +-------------------------+
* | CANARY |
@@ -1422,6 +1539,20 @@
u32 uc_event_ring_size;
u32 pdn_config_ofst;
u32 pdn_config_size;
+ u32 stats_quota_ofst;
+ u32 stats_quota_size;
+ u32 stats_tethering_ofst;
+ u32 stats_tethering_size;
+ u32 stats_flt_v4_ofst;
+ u32 stats_flt_v4_size;
+ u32 stats_flt_v6_ofst;
+ u32 stats_flt_v6_size;
+ u32 stats_rt_v4_ofst;
+ u32 stats_rt_v4_size;
+ u32 stats_rt_v6_ofst;
+ u32 stats_rt_v6_size;
+ u32 stats_drop_ofst;
+ u32 stats_drop_size;
};
struct ipa3_controller {
@@ -1493,6 +1624,9 @@
int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+int ipa3_cfg_ep_conn_track(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ep_conn_track);
+
int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
@@ -1595,7 +1729,7 @@
int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
-int ipa4_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
+int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
/*
* Messaging
@@ -1670,6 +1804,8 @@
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv);
+void ipa3_ntn_uc_dereg_rdyCB(void);
/*
* To retrieve doorbell physical address of
@@ -1840,6 +1976,8 @@
int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
*id);
void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_dec_client_disable_clks_no_block(
+ struct ipa_active_client_logging_info *id);
void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
bool int_ctx);
void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
@@ -1862,10 +2000,6 @@
void *private_data,
void *interrupt_data);
-
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos);
int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
@@ -1916,10 +2050,7 @@
bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
int ipa3_tag_aggr_force_close(int pipe_num);
-void ipa3_active_clients_lock(void);
-int ipa3_active_clients_trylock(unsigned long *flags);
void ipa3_active_clients_unlock(void);
-void ipa3_active_clients_trylock_unlock(unsigned long *flags);
int ipa3_wdi_init(void);
int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
@@ -1968,6 +2099,65 @@
(enum ipa_client_type client);
void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
+/* Hardware stats */
+
+#define IPA_STATS_MAX_PIPE_BIT 32
+
+struct ipa_teth_stats_endpoints {
+ u32 prod_mask;
+ u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT];
+};
+
+struct ipa_flt_rt_stats {
+ u32 num_pkts;
+ u32 num_pkts_hash;
+};
+
+int ipa_hw_stats_init(void);
+
+int ipa_debugfs_init_stats(struct dentry *parent);
+
+int ipa_init_quota_stats(u32 pipe_bitmask);
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out);
+
+int ipa_reset_quota_stats(enum ipa_client_type client);
+
+int ipa_reset_all_quota_stats(void);
+
+int ipa_init_drop_stats(u32 pipe_bitmask);
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out);
+
+int ipa_reset_drop_stats(enum ipa_client_type client);
+
+int ipa_reset_all_drop_stats(void);
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in);
+
+int ipa_get_teth_stats(enum ipa_client_type prod,
+ struct ipa_quota_stats_all *out);
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons);
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod);
+
+int ipa_reset_all_teth_stats(void);
+
+int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
+ u16 rule_id);
+
+int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering);
+
+int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering);
+
+int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
+ struct ipa_flt_rt_stats *out);
+
+int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id);
+
+int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering);
+
u32 ipa3_get_num_pipes(void);
struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index e7f8acd..6d82da2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -322,27 +322,21 @@
static irqreturn_t ipa3_isr(int irq, void *ctxt)
{
- unsigned long flags;
+ struct ipa_active_client_logging_info log_info;
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
IPADBG_LOW("Enter\n");
/* defer interrupt handling in case IPA is not clocked on */
- if (ipa3_active_clients_trylock(&flags) == 0) {
+ if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
IPADBG("defer interrupt processing\n");
queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
return IRQ_HANDLED;
}
- if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
- IPADBG("defer interrupt processing\n");
- queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
- goto bail;
- }
-
ipa3_process_interrupts(true);
IPADBG_LOW("Exit\n");
-bail:
- ipa3_active_clients_trylock_unlock(&flags);
+ ipa3_dec_client_disable_clks(&log_info);
return IRQ_HANDLED;
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index da965e7..2bd7b79 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -227,7 +227,7 @@
if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", lookup->name);
+ IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
return result;
}
@@ -268,13 +268,21 @@
}
if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", tx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", tx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, tx->name)) {
+ /* add the entry check */
+ if (entry->num_tx_props != tx->num_tx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_tx_props,
+ tx->num_tx_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(tx->tx, entry->tx, entry->num_tx_props *
sizeof(struct ipa_ioc_tx_intf_prop));
result = 0;
@@ -307,13 +315,21 @@
}
if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", rx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", rx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, rx->name)) {
+ /* add the entry check */
+ if (entry->num_rx_props != rx->num_rx_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_rx_props,
+ rx->num_rx_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(rx->rx, entry->rx, entry->num_rx_props *
sizeof(struct ipa_ioc_rx_intf_prop));
result = 0;
@@ -348,6 +364,14 @@
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, ext->name)) {
+ /* add the entry check */
+ if (entry->num_ext_props != ext->num_ext_props) {
+ IPAERR("invalid entry number(%u %u)\n",
+ entry->num_ext_props,
+ ext->num_ext_props);
+ mutex_unlock(&ipa3_ctx->lock);
+ return result;
+ }
memcpy(ext->ext, entry->ext, entry->num_ext_props *
sizeof(struct ipa_ioc_ext_intf_prop));
result = 0;
@@ -386,13 +410,13 @@
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -522,17 +546,15 @@
char __user *start;
struct ipa3_push_msg *msg = NULL;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int locked;
start = buf;
+ add_wait_queue(&ipa3_ctx->msg_waitq, &wait);
while (1) {
mutex_lock(&ipa3_ctx->msg_lock);
locked = 1;
- prepare_to_wait(&ipa3_ctx->msg_waitq,
- &wait,
- TASK_INTERRUPTIBLE);
if (!list_empty(&ipa3_ctx->msg_list)) {
msg = list_first_entry(&ipa3_ctx->msg_list,
@@ -585,10 +607,10 @@
locked = 0;
mutex_unlock(&ipa3_ctx->msg_lock);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- finish_wait(&ipa3_ctx->msg_waitq, &wait);
+ remove_wait_queue(&ipa3_ctx->msg_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
@@ -618,7 +640,7 @@
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 0dd86fa..b19ef8b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -191,7 +191,7 @@
static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
int ipa_ep_idx, struct start_gsi_channel *params)
{
- int res;
+ int res = 0;
struct gsi_evt_ring_props ev_props;
struct ipa_mhi_msi_info *msi;
struct gsi_chan_props ch_props;
@@ -241,7 +241,6 @@
if (res) {
IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
goto fail_alloc_evt;
- return res;
}
IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
client,
@@ -259,7 +258,6 @@
IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
params->ev_ctx_host->wp);
goto fail_alloc_ch;
- return res;
}
IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
@@ -270,7 +268,6 @@
IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
goto fail_alloc_ch;
- return res;
}
memset(&ch_props, 0, sizeof(ch_props));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 6acc4d8..958fc6c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -253,8 +253,8 @@
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -273,7 +273,7 @@
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -371,7 +371,7 @@
/* check for integer overflow */
if (init->ipv4_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Table Entry offset is not
@@ -380,8 +380,8 @@
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -389,8 +389,8 @@
/* check for integer overflow */
if (init->expn_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table Entry offset is not
@@ -399,8 +399,8 @@
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -409,7 +409,7 @@
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Indx Table Entry offset is not
@@ -418,8 +418,8 @@
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -428,7 +428,7 @@
/* check for integer overflow */
if (init->index_expn_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
+ IPAERR_RL("Detected overflow\n");
return -EPERM;
}
/* Check Expn Table entry offset is not
@@ -437,8 +437,8 @@
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -476,16 +476,16 @@
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa3_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto destroy_imm_cmd;
@@ -544,7 +544,7 @@
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld[num_cmd]) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_imm_cmd;
}
@@ -643,14 +643,14 @@
}
/**
-* ipa4_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+* ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
* @mdfy_pdn: [in] PDN info to be written to SRAM
*
* Called by NAT client driver to modify an entry in the PDN config table
*
* Returns: 0 on success, negative on failure
*/
-int ipa4_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
{
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
struct ipa3_desc desc;
@@ -747,7 +747,7 @@
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -755,7 +755,7 @@
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -766,7 +766,7 @@
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -778,7 +778,7 @@
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -790,7 +790,7 @@
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -802,7 +802,7 @@
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -811,7 +811,7 @@
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
@@ -853,7 +853,7 @@
cmd.data = dma->dma[cnt].data;
cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct nat_dma imm cmd\n");
+ IPAERR_RL("Fail to construct nat_dma imm cmd\n");
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
@@ -1016,7 +1016,7 @@
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 73738bf..61bccc6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -808,6 +808,11 @@
return -EINVAL;
}
+ if (req->source_pipe_index == -1) {
+ IPAWANERR("Source pipe index invalid\n");
+ return -EINVAL;
+ }
+
mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
@@ -1178,10 +1183,13 @@
}
/* clean the QMI msg cache */
+ mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
vfree(ipa3_qmi_ctx);
ipa3_qmi_ctx = NULL;
}
+ mutex_unlock(&ipa3_qmi_lock);
+
ipa3_svc_handle = 0;
ipa3_qmi_modem_init_fin = false;
ipa3_qmi_indication_fin = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index cf28986..ef0158e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -694,7 +694,7 @@
struct ipa3_rt_tbl_set *set;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Name too long: %s\n", name);
+ IPAERR_RL("Name too long: %s\n", name);
return NULL;
}
@@ -720,16 +720,19 @@
struct ipa3_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
+ mutex_lock(&ipa3_ctx->lock);
/* check if this table exists */
entry = __ipa3_find_rt_tbl(in->ip, in->name);
- if (!entry)
+ if (!entry) {
+ mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
-
+ }
in->idx = entry->idx;
+ mutex_unlock(&ipa3_ctx->lock);
return 0;
}
@@ -743,7 +746,7 @@
int max_tbl_indx;
if (name == NULL) {
- IPAERR("no tbl name\n");
+ IPAERR_RL("no tbl name\n");
goto error;
}
@@ -756,7 +759,7 @@
max(IPA_MEM_PART(v6_modem_rt_index_hi),
IPA_MEM_PART(v6_apps_rt_index_hi));
} else {
- IPAERR("bad ip family type\n");
+ IPAERR_RL("bad ip family type\n");
goto error;
}
@@ -790,7 +793,7 @@
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
!ipa3_ctx->ip4_rt_tbl_hash_lcl :
!ipa3_ctx->ip6_rt_tbl_hash_lcl;
@@ -798,7 +801,7 @@
!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
set->tbl_cnt++;
- idr_init(&entry->rule_ids);
+ entry->rule_ids = &set->rule_ids;
list_add(&entry->link, &set->head_rt_tbl_list);
IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
@@ -808,12 +811,16 @@
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
-
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
+ idr_destroy(entry->rule_ids);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
@@ -827,13 +834,13 @@
u32 id;
struct ipa3_rt_tbl_set *rset;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa3_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -841,12 +848,14 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
- idr_destroy(&entry->rule_ids);
+ entry->rule_ids = NULL;
if (entry->in_sys[IPA_RULE_HASHABLE] ||
entry->in_sys[IPA_RULE_NON_HASHABLE]) {
list_move(&entry->link, &rset->head_rt_tbl_list);
@@ -879,14 +888,14 @@
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
- if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+ if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
if ((*proc_ctx == NULL) ||
- ((*proc_ctx)->cookie != IPA_COOKIE)) {
+ ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
return -EPERM;
@@ -909,12 +918,12 @@
goto error;
}
INIT_LIST_HEAD(&(*entry)->link);
- (*(entry))->cookie = IPA_COOKIE;
+ (*(entry))->cookie = IPA_RT_RULE_COOKIE;
(*(entry))->rule = *rule;
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
(*(entry))->proc_ctx = proc_ctx;
- id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ id = ipa3_alloc_rule_id(tbl->rule_ids);
if (id < 0) {
IPAERR("failed to allocate rule id\n");
WARN_ON(1);
@@ -958,7 +967,7 @@
entry->hdr->ref_cnt--;
else if (entry->proc_ctx)
entry->proc_ctx->ref_cnt--;
- idr_remove(&tbl->rule_ids, entry->rule_id);
+ idr_remove(tbl->rule_ids, entry->rule_id);
list_del(&entry->link);
kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
return -EPERM;
@@ -977,8 +986,8 @@
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed adding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed adding rt tbl name = %s\n",
name ? name : "");
goto error;
}
@@ -988,8 +997,8 @@
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(tbl->rule_cnt > 0) && (at_rear != 0)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
- tbl->rule_cnt, at_rear);
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
+ , tbl->rule_cnt, at_rear);
goto error;
}
@@ -1059,7 +1068,7 @@
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1069,7 +1078,7 @@
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1105,36 +1114,36 @@
struct ipa3_rt_entry *entry = NULL;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed finding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed finding rt tbl name = %s\n",
rules->rt_tbl_name ? rules->rt_tbl_name : "");
ret = -EINVAL;
goto bail;
}
if (tbl->rule_cnt <= 0) {
- IPAERR("tbl->rule_cnt <= 0");
+ IPAERR_RL("tbl->rule_cnt <= 0");
ret = -EINVAL;
goto bail;
}
entry = ipa3_id_find(rules->add_after_hdl);
if (!entry) {
- IPAERR("failed finding rule %d in rt tbls\n",
+ IPAERR_RL("failed finding rule %d in rt tbls\n",
rules->add_after_hdl);
ret = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given rt rule does not match the table\n");
+ IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
goto bail;
}
@@ -1145,7 +1154,7 @@
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(&entry->link == tbl->head_rt_rule_list.prev)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
tbl->rule_cnt);
ret = -EINVAL;
goto bail;
@@ -1162,7 +1171,7 @@
&rules->rules[i].rule,
&rules->rules[i].rt_rule_hdl,
&entry)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1171,7 +1180,7 @@
if (rules->commit)
if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
- IPAERR("failed to commit\n");
+ IPAERR_RL("failed to commit\n");
ret = -EPERM;
goto bail;
}
@@ -1192,12 +1201,12 @@
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1210,10 +1219,10 @@
IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
entry->tbl->idx, entry->tbl->rule_cnt,
entry->rule_id, entry->tbl->ref_cnt);
- idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ idr_remove(entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1240,14 +1249,14 @@
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1280,7 +1289,7 @@
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1324,7 +1333,7 @@
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1340,7 +1349,7 @@
* filtering rules point to routing tables
*/
if (ipa3_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa3_ctx->rt_tbl_set[ip];
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -1369,7 +1378,7 @@
else if (rule->proc_ctx)
__ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
rule->cookie = 0;
- idr_remove(&tbl->rule_ids, rule->rule_id);
+ idr_remove(tbl->rule_ids, rule->rule_id);
id = rule->id;
kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
@@ -1386,7 +1395,7 @@
/* do not remove the "default" routing tbl which has index 0 */
if (tbl->idx != apps_start_idx) {
- idr_destroy(&tbl->rule_ids);
+ tbl->rule_ids = NULL;
if (tbl->in_sys[IPA_RULE_HASHABLE] ||
tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
list_move(&tbl->link, &rset->head_rt_tbl_list);
@@ -1429,21 +1438,27 @@
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
+ if (entry->ref_cnt == U32_MAX) {
+ IPAERR("fail: ref count crossed limit\n");
+ goto ret;
+ }
entry->ref_cnt++;
lookup->hdl = entry->id;
/* commit for get */
if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
+
+ret:
mutex_unlock(&ipa3_ctx->lock);
return result;
@@ -1461,18 +1476,18 @@
{
struct ipa3_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1481,18 +1496,21 @@
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
entry->idx);
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1512,26 +1530,27 @@
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rtrule->rule.hdr_proc_ctx_hdl) {
proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
goto error;
}
}
entry = ipa3_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1572,14 +1591,14 @@
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index ce47623..b6427d0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -104,41 +104,83 @@
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
- TX_STATS(tail_ptr_val);
- TX_STATS(num_db_fired);
- TX_STATS(tx_comp_ring_stats.ringFull);
- TX_STATS(tx_comp_ring_stats.ringEmpty);
- TX_STATS(tx_comp_ring_stats.ringUsageHigh);
- TX_STATS(tx_comp_ring_stats.ringUsageLow);
- TX_STATS(tx_comp_ring_stats.RingUtilCount);
- TX_STATS(bam_stats.bamFifoFull);
- TX_STATS(bam_stats.bamFifoEmpty);
- TX_STATS(bam_stats.bamFifoUsageHigh);
- TX_STATS(bam_stats.bamFifoUsageLow);
- TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(ring_stats.ringFull);
+ TX_STATS(ring_stats.ringEmpty);
+ TX_STATS(ring_stats.ringUsageHigh);
+ TX_STATS(ring_stats.ringUsageLow);
+ TX_STATS(ring_stats.RingUtilCount);
+ TX_STATS(gsi_stats.bamFifoFull);
+ TX_STATS(gsi_stats.bamFifoEmpty);
+ TX_STATS(gsi_stats.bamFifoUsageHigh);
+ TX_STATS(gsi_stats.bamFifoUsageLow);
+ TX_STATS(gsi_stats.bamUtilCount);
TX_STATS(num_db);
TX_STATS(num_qmb_int_handled);
+ TX_STATS(ipa_pipe_number);
- RX_STATS(max_outstanding_pkts);
RX_STATS(num_pkts_processed);
- RX_STATS(rx_ring_rp_value);
- RX_STATS(rx_ind_ring_stats.ringFull);
- RX_STATS(rx_ind_ring_stats.ringEmpty);
- RX_STATS(rx_ind_ring_stats.ringUsageHigh);
- RX_STATS(rx_ind_ring_stats.ringUsageLow);
- RX_STATS(rx_ind_ring_stats.RingUtilCount);
- RX_STATS(bam_stats.bamFifoFull);
- RX_STATS(bam_stats.bamFifoEmpty);
- RX_STATS(bam_stats.bamFifoUsageHigh);
- RX_STATS(bam_stats.bamFifoUsageLow);
- RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(ring_stats.ringFull);
+ RX_STATS(ring_stats.ringEmpty);
+ RX_STATS(ring_stats.ringUsageHigh);
+ RX_STATS(ring_stats.ringUsageLow);
+ RX_STATS(ring_stats.RingUtilCount);
+ RX_STATS(gsi_stats.bamFifoFull);
+ RX_STATS(gsi_stats.bamFifoEmpty);
+ RX_STATS(gsi_stats.bamFifoUsageHigh);
+ RX_STATS(gsi_stats.bamFifoUsageLow);
+ RX_STATS(gsi_stats.bamUtilCount);
RX_STATS(num_db);
+ RX_STATS(num_qmb_int_handled);
+ RX_STATS(ipa_pipe_number);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
+
+int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ if (!ipa3_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return -ENXIO;
+ }
+
+ ret = ipa3_uc_state_check();
+ if (ret) {
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa3_ctx->uc_ntn_ctx.priv = user_data;
+ return 0;
+ }
+
+ return -EEXIST;
+}
+
+void ipa3_ntn_uc_dereg_rdyCB(void)
+{
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
+ ipa3_ctx->uc_ntn_ctx.priv = NULL;
+}
+
+static void ipa3_uc_ntn_loaded_handler(void)
+{
+ if (!ipa3_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa3_ctx->uc_ntn_ctx.priv);
+
+ ipa3_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa3_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
int ipa3_ntn_init(void)
{
struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
@@ -146,6 +188,8 @@
uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
ipa3_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa3_uc_ntn_loaded_handler;
ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 79f0973..2e5a832 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -383,22 +383,21 @@
* struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
* information
*
- *@max_outstanding_pkts: Number of outstanding packets in Rx
- * Ring
*@num_pkts_processed: Number of packets processed - cumulative
- *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
*
- *@rx_ind_ring_stats:
- *@bam_stats:
+ *@ring_stats:
+ *@gsi_stats:
*@num_db: Number of times the doorbell was rung
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@ipa_pipe_number: The IPA Rx/Tx pipe number.
*/
struct NTN3RxInfoData_t {
- u32 max_outstanding_pkts;
u32 num_pkts_processed;
- u32 rx_ring_rp_value;
- struct IpaHwRingStats_t rx_ind_ring_stats;
- struct IpaHwBamStats_t bam_stats;
- u32 num_db;
+ struct IpaHwRingStats_t ring_stats;
+ struct IpaHwBamStats_t gsi_stats;
+ u32 num_db;
+ u32 num_qmb_int_handled;
+ u32 ipa_pipe_number;
} __packed;
@@ -417,12 +416,11 @@
*/
struct NTN3TxInfoData_t {
u32 num_pkts_processed;
- u32 tail_ptr_val;
- u32 num_db_fired;
- struct IpaHwRingStats_t tx_comp_ring_stats;
- struct IpaHwBamStats_t bam_stats;
- u32 num_db;
- u32 num_qmb_int_handled;
+ struct IpaHwRingStats_t ring_stats;
+ struct IpaHwBamStats_t gsi_stats;
+ u32 num_db;
+ u32 num_qmb_int_handled;
+ u32 ipa_pipe_number;
} __packed;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 60dc04f..c97d2b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1668,7 +1668,7 @@
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1681,7 +1681,7 @@
ep = &ipa3_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 0abe5fe..8fe15bc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -21,11 +21,21 @@
#include "ipa_i.h"
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
#include "../ipa_rm_i.h"
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+
+#define IPA_V3_5_CLK_RATE_SVS (200 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_NOMINAL (400 * 1000 * 1000UL)
+#define IPA_V3_5_CLK_RATE_TURBO (42640 * 10 * 1000UL)
+
+#define IPA_V4_0_CLK_RATE_SVS (125 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
+#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
+
#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
@@ -43,6 +53,7 @@
#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
#define IPA_BCR_REG_VAL_v4_0 (0x00000039)
+#define IPA_CLKON_CFG_v4_0 (0x30000000)
#define IPA_AGGR_GRAN_MIN (1)
#define IPA_AGGR_GRAN_MAX (32)
#define IPA_EOT_COAL_GRAN_MIN (1)
@@ -630,7 +641,12 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 29, 14, 8, 8, IPA_EE_AP } },
-
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_3_0][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v3_0_GROUP_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_3_5 */
[IPA_3_5][IPA_CLIENT_WLAN1_PROD] = {
@@ -777,6 +793,12 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
{ 19, 13, 8, 8, IPA_EE_AP } },
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_3_5][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v3_5_GROUP_UL_DL, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_3_5_MHI */
[IPA_3_5_MHI][IPA_CLIENT_USB_PROD] = {
@@ -927,6 +949,12 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
{ 19, 13, 8, 8, IPA_EE_AP } },
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v3_5_MHI_GROUP_DMA, false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_PCIE,
+ { 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_3_5_1 */
[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = {
@@ -1072,7 +1100,13 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 11, 2, 4, 6, IPA_EE_AP } },
-
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_3_5_1][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v3_5_GROUP_UL_DL,
+ false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_4_0 */
[IPA_4_0][IPA_CLIENT_WLAN1_PROD] = {
@@ -1108,7 +1142,7 @@
[IPA_4_0][IPA_CLIENT_ODU_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
- IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 0, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_ETHERNET_PROD] = {
@@ -1272,6 +1306,13 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
{ 21, 15, 9, 9, IPA_EE_AP } },
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_4_0][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v4_0_GROUP_UL_DL,
+ false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_4_0_MHI */
[IPA_4_0_MHI][IPA_CLIENT_USB_PROD] = {
@@ -1451,8 +1492,13 @@
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_PCIE,
{ 21, 15, 9, 9, IPA_EE_AP } },
-
-
+ /* Dummy consumer (pipe 31) is used in L2TP rt rule */
+ [IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS] = {
+ true, IPA_v4_0_GROUP_UL_DL,
+ false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 31, 31, 8, 8, IPA_EE_AP } },
};
static struct msm_bus_vectors ipa_init_vectors_v3_0[] = {
@@ -1502,43 +1548,6 @@
.name = "ipa",
};
-void ipa3_active_clients_lock(void)
-{
- unsigned long flags;
-
- mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- ipa3_ctx->ipa3_active_clients.mutex_locked = true;
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
-}
-
-int ipa3_active_clients_trylock(unsigned long *flags)
-{
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
- if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
- *flags);
- return 0;
- }
-
- return 1;
-}
-
-void ipa3_active_clients_trylock_unlock(unsigned long *flags)
-{
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
-}
-
-void ipa3_active_clients_unlock(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- ipa3_ctx->ipa3_active_clients.mutex_locked = false;
- spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
- mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
-}
-
/**
* ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
* IPA_RM resource
@@ -1782,16 +1791,8 @@
enum ipa_client_type client;
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
- unsigned long flags;
struct ipa_active_client_logging_info log_info;
- if (ipa3_active_clients_trylock(&flags) == 0)
- return -EPERM;
- if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
- res = -EPERM;
- goto bail;
- }
-
memset(&clients, 0, sizeof(clients));
res = ipa3_get_clients_from_rm_resource(resource, &clients);
if (res) {
@@ -1830,14 +1831,11 @@
if (res == 0) {
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa_rm_resource_str(resource));
- ipa3_active_clients_log_dec(&log_info, true);
- ipa3_ctx->ipa3_active_clients.cnt--;
- IPADBG("active clients = %d\n",
- ipa3_ctx->ipa3_active_clients.cnt);
+ /* before gating IPA clocks do TAG process */
+ ipa3_ctx->tag_process_before_gating = true;
+ ipa3_dec_client_disable_clks_no_block(&log_info);
}
bail:
- ipa3_active_clients_trylock_unlock(&flags);
-
return res;
}
@@ -1991,7 +1989,7 @@
*/
int ipa3_cfg_filter(u32 disable)
{
- IPAERR("Filter disable is not supported!\n");
+ IPAERR_RL("Filter disable is not supported!\n");
return -EPERM;
}
@@ -2055,6 +2053,16 @@
ipahal_write_reg(IPA_BCR, val);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ struct ipahal_reg_tx_cfg cfg;
+
+ ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0);
+ ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+ /* disable PA_MASK_EN to allow holb drop */
+ cfg.pa_mask_en = 0;
+ ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+ }
+
ipa3_cfg_qsb();
return 0;
@@ -2116,7 +2124,7 @@
int ipa_ep_idx;
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return IPA_EP_NOT_ALLOCATED;
}
@@ -2125,7 +2133,8 @@
ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].
ipa_gsi_ep_info.ipa_ep_num;
- if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES)
+ if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
+ && client != IPA_CLIENT_DUMMY_CONS))
return IPA_EP_NOT_ALLOCATED;
return ipa_ep_idx;
@@ -2440,8 +2449,8 @@
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
*
- * This includes nat, header, mode, aggregation and route settings and is a one
- * shot API to configure the IPA end-point fully
+ * This includes nat, IPv6CT, header, mode, aggregation and route settings and
+ * is a one shot API to configure the IPA end-point fully
*
* Returns: 0 on success, negative on failure
*
@@ -2478,6 +2487,13 @@
if (result)
return result;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ result = ipa3_cfg_ep_conn_track(clnt_hdl,
+ &ipa_ep_cfg->conn_track);
+ if (result)
+ return result;
+ }
+
result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
if (result)
return result;
@@ -2503,7 +2519,7 @@
return 0;
}
-const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
+static const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
{
switch (nat_en) {
case (IPA_BYPASS_NAT):
@@ -2517,10 +2533,22 @@
return "undefined";
}
+static const char *ipa3_get_ipv6ct_en_str(enum ipa_ipv6ct_en_type ipv6ct_en)
+{
+ switch (ipv6ct_en) {
+ case (IPA_BYPASS_IPV6CT):
+ return "ipv6ct disabled";
+ case (IPA_ENABLE_IPV6CT):
+ return "ipv6ct enabled";
+ }
+
+ return "undefined";
+}
+
/**
* ipa3_cfg_ep_nat() - IPA end-point NAT configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
- * @ipa_ep_cfg: [in] IPA end-point configuration params
+ * @ep_nat: [in] IPA NAT end-point configuration params
*
* Returns: 0 on success, negative on failure
*
@@ -2558,6 +2586,49 @@
return 0;
}
+/**
+ * ipa3_cfg_ep_conn_track() - IPA end-point IPv6CT configuration
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ * @ep_conn_track: [in] IPA IPv6CT end-point configuration params
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_cfg_ep_conn_track(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ep_conn_track)
+{
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_conn_track == NULL) {
+ IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+ clnt_hdl,
+ ipa3_ctx->ep[clnt_hdl].valid);
+ return -EINVAL;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+ IPAERR("IPv6CT does not apply to IPA out EP %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ IPADBG("pipe=%d, conn_track_en=%d(%s)\n",
+ clnt_hdl,
+ ep_conn_track->conn_track_en,
+ ipa3_get_ipv6ct_en_str(ep_conn_track->conn_track_en));
+
+ /* copy over EP cfg */
+ ipa3_ctx->ep[clnt_hdl].cfg.conn_track = *ep_conn_track;
+
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CONN_TRACK_n, clnt_hdl,
+ ep_conn_track);
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ return 0;
+}
+
/**
* ipa3_cfg_ep_status() - IPA end-point status configuration
@@ -2892,7 +2963,8 @@
if (ep_mode->mode == IPA_DMA)
type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
else
- type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
+ type =
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP;
IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
clnt_hdl);
@@ -3210,19 +3282,19 @@
int result = -EINVAL;
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3236,7 +3308,7 @@
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
@@ -3657,13 +3729,24 @@
int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
enum ipa_hw_type hw_type)
{
+ if (hw_type >= IPA_HW_v4_0) {
+ ctrl->ipa_clk_rate_turbo = IPA_V4_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V4_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V4_0_CLK_RATE_SVS;
+ } else if (hw_type >= IPA_HW_v3_5) {
+ ctrl->ipa_clk_rate_turbo = IPA_V3_5_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V3_5_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V3_5_CLK_RATE_SVS;
+ } else {
+ ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+ ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+ ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+ }
+
ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
- ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
- ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
- ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
@@ -3979,7 +4062,7 @@
IPAHAL_FULL_PIPELINE_CLEAR;
reg_write_agg_close.offset =
ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
- ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+ ipahal_get_aggr_force_close_valmask(i, &valmask);
reg_write_agg_close.value = valmask.val;
reg_write_agg_close.value_mask = valmask.mask;
cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
@@ -4238,6 +4321,7 @@
api_ctrl->ipa_disable_endpoint = NULL;
api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
+ api_ctrl->ipa_cfg_ep_conn_track = ipa3_cfg_ep_conn_track;
api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
@@ -4372,6 +4456,7 @@
api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+ api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel;
api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
@@ -4391,6 +4476,8 @@
api_ctrl->ipa_tear_down_uc_offload_pipes =
ipa3_tear_down_uc_offload_pipes;
api_ctrl->ipa_get_pdev = ipa3_get_pdev;
+ api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB;
+ api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
index b945eb06..67e491b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_IPA3) += ipa_hal.o
-ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 6f46ebf..56fed2a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -15,6 +15,8 @@
#include "ipahal_i.h"
#include "ipahal_reg_i.h"
#include "ipahal_fltrt_i.h"
+#include "ipahal_hw_stats_i.h"
+
struct ipahal_context *ipahal_ctx;
@@ -45,11 +47,9 @@
__stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
__stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+ __stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT),
};
-#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
- (kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
-
static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
@@ -838,10 +838,13 @@
{
enum ipahal_pkt_status_opcode opcode = 0;
enum ipahal_pkt_status_exception exception_type = 0;
+ bool is_ipv6;
struct ipa_pkt_status_hw *hw_status =
(struct ipa_pkt_status_hw *)unparsed_status;
+ is_ipv6 = (hw_status->status_mask & 0x80) ? false : true;
+
status->pkt_len = hw_status->pkt_len;
status->endp_src_idx = hw_status->endp_src_idx;
status->endp_dest_idx = hw_status->endp_dest_idx;
@@ -933,7 +936,10 @@
exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
break;
case 64:
- exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+ if (is_ipv6)
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT;
+ else
+ exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
break;
default:
IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
@@ -1153,12 +1159,14 @@
* @phys_base: memory location in DDR
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
*/
static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
void *const base, u32 offset,
u32 hdr_len, bool is_hdr_proc_ctx,
dma_addr_t phys_base, u32 hdr_base_addr,
- struct ipa_hdr_offset_entry *offset_entry){
+ struct ipa_hdr_offset_entry *offset_entry,
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params){
if (type == IPA_HDR_PROC_NONE) {
struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
@@ -1174,6 +1182,69 @@
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
ctx->end.value = 0;
+ } else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) {
+ struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x\n",
+ ctx->hdr_add.hdr_addr);
+ ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->l2tp_params.tlv.length = 1;
+ ctx->l2tp_params.tlv.value =
+ IPA_HDR_UCP_L2TP_HEADER_ADD;
+ ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+ l2tp_params.hdr_add_param.eth_hdr_retained;
+ ctx->l2tp_params.l2tp_params.input_ip_version =
+ l2tp_params.hdr_add_param.input_ip_version;
+ ctx->l2tp_params.l2tp_params.output_ip_version =
+ l2tp_params.hdr_add_param.output_ip_version;
+
+ IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
+ } else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) {
+ struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx;
+
+ ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
+ (base + offset);
+ ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+ ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.value = hdr_len;
+ ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_base_addr + offset_entry->offset;
+ IPAHAL_DBG("header address 0x%x length %d\n",
+ ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value);
+ ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+ ctx->l2tp_params.tlv.length = 1;
+ ctx->l2tp_params.tlv.value =
+ IPA_HDR_UCP_L2TP_HEADER_REMOVE;
+ ctx->l2tp_params.l2tp_params.hdr_len_remove =
+ l2tp_params.hdr_remove_param.hdr_len_remove;
+ ctx->l2tp_params.l2tp_params.eth_hdr_retained =
+ l2tp_params.hdr_remove_param.eth_hdr_retained;
+ ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid =
+ l2tp_params.hdr_remove_param.hdr_ofst_pkt_size_valid;
+ ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size =
+ l2tp_params.hdr_remove_param.hdr_ofst_pkt_size;
+ ctx->l2tp_params.l2tp_params.hdr_endianness =
+ l2tp_params.hdr_remove_param.hdr_endianness;
+ IPAHAL_DBG("hdr ofst valid: %d, hdr ofst pkt size: %d\n",
+ ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size_valid,
+ ctx->l2tp_params.l2tp_params.hdr_ofst_pkt_size);
+ IPAHAL_DBG("endianness: %d\n",
+ ctx->l2tp_params.l2tp_params.hdr_endianness);
+
+ IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value);
+ ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+ ctx->end.length = 0;
+ ctx->end.value = 0;
} else {
struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
@@ -1242,7 +1313,8 @@
void *const base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u32 hdr_base_addr,
- struct ipa_hdr_offset_entry *offset_entry);
+ struct ipa_hdr_offset_entry *offset_entry,
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
};
@@ -1307,11 +1379,13 @@
* @phys_base: memory location in DDR
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *const base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
- u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+ u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
{
IPAHAL_DBG(
"type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
@@ -1332,7 +1406,7 @@
return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
hdr_len, is_hdr_proc_ctx, phys_base,
- hdr_base_addr, offset_entry);
+ hdr_base_addr, offset_entry, l2tp_params);
}
/*
@@ -1440,6 +1514,12 @@
goto bail_free_ctx;
}
+ if (ipahal_hw_stats_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal hw stats\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
ipahal_debugfs_init();
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index f8bdc2c..56b884b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -434,7 +434,12 @@
IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+ /*
+ * NAT and IPv6CT have the same value at HW.
+ * NAT for IPv4 and IPv6CT for IPv6 exceptions
+ */
IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+ IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT,
IPAHAL_PKT_STATUS_EXCEPTION_MAX,
};
@@ -625,12 +630,14 @@
* @phys_base: memory location in DDR
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
+ * @l2tp_params: l2tp parameters
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u32 hdr_base_addr,
- struct ipa_hdr_offset_entry *offset_entry);
+ struct ipa_hdr_offset_entry *offset_entry,
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
/*
* ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 4f20e0f..2253b4b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -797,6 +797,38 @@
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate first ihl meq eq */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+ /* populate second ihl meq eq */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1103,6 +1135,38 @@
ihl_ofst_meq32++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate first ihl meq eq */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[3], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[2], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[1], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[0], rest);
+ /* populate second ihl meq eq */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[5], rest);
+ rest = ipa_write_8(attrib->dst_mac_addr[4], rest);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_META_DATA) {
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -1613,6 +1677,40 @@
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate the first ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ (attrib->dst_mac_addr_mask[3] & 0xFF) |
+ ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ (attrib->dst_mac_addr[3] & 0xFF) |
+ ((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+ /* populate the second ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+ ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+ ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
@@ -1976,6 +2074,40 @@
ofst_meq128++;
}
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
+ ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]);
+ /* populate the first ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ (attrib->dst_mac_addr_mask[3] & 0xFF) |
+ ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ (attrib->dst_mac_addr[3] & 0xFF) |
+ ((attrib->dst_mac_addr[2] << 8) & 0xFF00) |
+ ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[0] << 24) & 0xFF000000);
+ /* populate the second ihl meq 32 eq */
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask =
+ ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value =
+ ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) |
+ ((attrib->dst_mac_addr[4] << 24) & 0xFF000000);
+ ihl_ofst_meq32 += 2;
+ }
+
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
IPAHAL_ERR("ran out of meq32 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
new file mode 100644
index 0000000..c711ff4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipahal_hw_stats.h"
+#include "ipahal_hw_stats_i.h"
+#include "ipahal_i.h"
+
+struct ipahal_hw_stats_obj {
+ struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params,
+ bool is_atomic_ctx);
+ int (*get_offset)(void *params, struct ipahal_stats_offset *out);
+ int (*parse_stats)(void *init_params, void *raw_stats,
+ void *parsed_stats);
+};
+
+static int _count_ones(u32 number)
+{
+ int count = 0;
+
+ while (number) {
+ count++;
+ number = number & (number - 1);
+ }
+
+ return count;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota(
+ void *params, bool is_atomic_ctx)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_stats_init_quota *in =
+ (struct ipahal_stats_init_quota *)params;
+ int entries = _count_ones(in->enabled_bitmask);
+
+ IPAHAL_DBG_LOW("entries = %d\n", entries);
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+ entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx);
+ if (!pyld) {
+ IPAHAL_ERR("no mem\n");
+ return NULL;
+ }
+
+ pyld->len = entries * sizeof(struct ipahal_stats_quota_hw);
+ return pyld;
+}
+
+static int ipahal_get_offset_quota(void *params,
+ struct ipahal_stats_offset *out)
+{
+ struct ipahal_stats_get_offset_quota *in =
+ (struct ipahal_stats_get_offset_quota *)params;
+ int entries = _count_ones(in->init.enabled_bitmask);
+
+ IPAHAL_DBG_LOW("\n");
+ out->offset = 0;
+ out->size = entries * sizeof(struct ipahal_stats_quota_hw);
+
+ return 0;
+}
+
+static int ipahal_parse_stats_quota(void *init_params, void *raw_stats,
+ void *parsed_stats)
+{
+ struct ipahal_stats_init_quota *init =
+ (struct ipahal_stats_init_quota *)init_params;
+ struct ipahal_stats_quota_hw *raw_hw =
+ (struct ipahal_stats_quota_hw *)raw_stats;
+ struct ipahal_stats_quota_all *out =
+ (struct ipahal_stats_quota_all *)parsed_stats;
+ int stat_idx = 0;
+ int i;
+
+ memset(out, 0, sizeof(*out));
+ IPAHAL_DBG_LOW("\n");
+ for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+ if (init->enabled_bitmask & (1 << i)) {
+ IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx);
+ out->stats[i].num_ipv4_bytes =
+ raw_hw[stat_idx].num_ipv4_bytes;
+ out->stats[i].num_ipv4_pkts =
+ raw_hw[stat_idx].num_ipv4_pkts;
+ out->stats[i].num_ipv6_pkts =
+ raw_hw[stat_idx].num_ipv6_pkts;
+ out->stats[i].num_ipv6_bytes =
+ raw_hw[stat_idx].num_ipv6_bytes;
+ stat_idx++;
+ }
+ }
+
+ return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering(
+ void *params, bool is_atomic_ctx)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_stats_init_tethering *in =
+ (struct ipahal_stats_init_tethering *)params;
+ int hdr_entries = _count_ones(in->prod_bitmask);
+ int entries = 0;
+ int i;
+ void *pyld_ptr;
+ u32 incremental_offset;
+
+ IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries);
+ for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+ if (in->prod_bitmask & (1 << i)) {
+ if (in->cons_bitmask[i] == 0) {
+ IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+ return NULL;
+ }
+ entries += _count_ones(in->cons_bitmask[i]);
+ }
+ }
+ IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+ hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+ entries * sizeof(struct ipahal_stats_tethering_hw),
+ is_atomic_ctx);
+ if (!pyld) {
+ IPAHAL_ERR("no mem\n");
+ return NULL;
+ }
+
+ pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+ entries * sizeof(struct ipahal_stats_tethering_hw);
+
+ pyld_ptr = pyld->data;
+ incremental_offset =
+ (hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw))
+ / 8;
+ for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+ if (in->prod_bitmask & (1 << i)) {
+ struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr;
+
+ hdr->dst_mask = in->cons_bitmask[i];
+ hdr->offset = incremental_offset;
+ IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask);
+ IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset);
+ /* add the stats entry */
+ incremental_offset += _count_ones(in->cons_bitmask[i]) *
+ sizeof(struct ipahal_stats_tethering_hw) / 8;
+ pyld_ptr += sizeof(*hdr);
+ }
+ }
+
+ return pyld;
+}
+
+static int ipahal_get_offset_tethering(void *params,
+ struct ipahal_stats_offset *out)
+{
+ struct ipahal_stats_get_offset_tethering *in =
+ (struct ipahal_stats_get_offset_tethering *)params;
+ int entries = 0;
+ int i;
+
+ for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) {
+ if (in->init.prod_bitmask & (1 << i)) {
+ if (in->init.cons_bitmask[i] == 0) {
+ IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+ return -EPERM;
+ }
+ entries += _count_ones(in->init.cons_bitmask[i]);
+ }
+ }
+ IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+ /* skip the header */
+ out->offset = _count_ones(in->init.prod_bitmask) *
+ sizeof(struct ipahal_stats_tethering_hdr_hw);
+ out->size = entries * sizeof(struct ipahal_stats_tethering_hw);
+
+ return 0;
+}
+
+static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats,
+ void *parsed_stats)
+{
+ struct ipahal_stats_init_tethering *init =
+ (struct ipahal_stats_init_tethering *)init_params;
+ struct ipahal_stats_tethering_hw *raw_hw =
+ (struct ipahal_stats_tethering_hw *)raw_stats;
+ struct ipahal_stats_tethering_all *out =
+ (struct ipahal_stats_tethering_all *)parsed_stats;
+ int i, j;
+ int stat_idx = 0;
+
+ memset(out, 0, sizeof(*out));
+ IPAHAL_DBG_LOW("\n");
+ for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+ for (j = 0; j < IPAHAL_MAX_PIPES; j++) {
+ if ((init->prod_bitmask & (1 << i)) &&
+ init->cons_bitmask[i] & (1 << j)) {
+ IPAHAL_DBG_LOW("prod %d cons %d\n", i, j);
+ IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx);
+ out->stats[i][j].num_ipv4_bytes =
+ raw_hw[stat_idx].num_ipv4_bytes;
+ IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n",
+ out->stats[i][j].num_ipv4_bytes);
+ out->stats[i][j].num_ipv4_pkts =
+ raw_hw[stat_idx].num_ipv4_pkts;
+ IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n",
+ out->stats[i][j].num_ipv4_pkts);
+ out->stats[i][j].num_ipv6_pkts =
+ raw_hw[stat_idx].num_ipv6_pkts;
+ IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n",
+ out->stats[i][j].num_ipv6_pkts);
+ out->stats[i][j].num_ipv6_bytes =
+ raw_hw[stat_idx].num_ipv6_bytes;
+ IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n",
+ out->stats[i][j].num_ipv6_bytes);
+ stat_idx++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt(
+ void *params, bool is_atomic_ctx)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_stats_init_flt_rt *in =
+ (struct ipahal_stats_init_flt_rt *)params;
+ int hdr_entries;
+ int num_rules = 0;
+ int i, start_entry;
+ void *pyld_ptr;
+ u32 incremental_offset;
+
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
+ num_rules += _count_ones(in->rule_id_bitmask[i]);
+
+ if (num_rules == 0) {
+ IPAHAL_ERR("no rule ids provided\n");
+ return NULL;
+ }
+ IPAHAL_DBG_LOW("num_rules = %d\n", num_rules);
+
+ hdr_entries = IPAHAL_MAX_RULE_ID_32;
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+ if (in->rule_id_bitmask[i] != 0)
+ break;
+ hdr_entries--;
+ }
+ start_entry = i;
+
+ for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) {
+ if (in->rule_id_bitmask[i] != 0)
+ break;
+ hdr_entries--;
+ }
+ IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+ hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+ num_rules * sizeof(struct ipahal_stats_flt_rt_hw),
+ is_atomic_ctx);
+ if (!pyld) {
+ IPAHAL_ERR("no mem\n");
+ return NULL;
+ }
+
+ pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+ num_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+
+ pyld_ptr = pyld->data;
+ incremental_offset =
+ (hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw))
+ / 8;
+ for (i = start_entry; i < hdr_entries; i++) {
+ struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr;
+
+ hdr->en_mask = in->rule_id_bitmask[i];
+ hdr->cnt_offset = incremental_offset;
+ /* add the stats entry */
+ incremental_offset += _count_ones(in->rule_id_bitmask[i]) *
+ sizeof(struct ipahal_stats_flt_rt_hw) / 8;
+ pyld_ptr += sizeof(*hdr);
+ }
+
+ return pyld;
+}
+
+static int ipahal_get_offset_flt_rt(void *params,
+ struct ipahal_stats_offset *out)
+{
+ struct ipahal_stats_get_offset_flt_rt *in =
+ (struct ipahal_stats_get_offset_flt_rt *)params;
+ int i;
+ int hdr_entries;
+ int skip_rules = 0;
+ int start_entry;
+ int rule_bit = in->rule_id % 32;
+ int rule_idx = in->rule_id / 32;
+
+ if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
+ IPAHAL_ERR("invalid rule_id %d\n", in->rule_id);
+ return -EPERM;
+ }
+
+ hdr_entries = IPAHAL_MAX_RULE_ID_32;
+ for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+ if (in->init.rule_id_bitmask[i] != 0)
+ break;
+ hdr_entries--;
+ }
+
+ if (hdr_entries == 0) {
+ IPAHAL_ERR("no rule ids provided\n");
+ return -EPERM;
+ }
+ start_entry = i;
+
+ for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) {
+ if (in->init.rule_id_bitmask[i] != 0)
+ break;
+ hdr_entries--;
+ }
+ IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+ /* skip the header */
+ out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw);
+
+ /* skip the previous rules */
+ for (i = start_entry; i < rule_idx; i++)
+ skip_rules += _count_ones(in->init.rule_id_bitmask[i]);
+
+ for (i = 0; i < rule_bit; i++)
+ if (in->init.rule_id_bitmask[rule_idx] & (1 << i))
+ skip_rules++;
+
+ out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+ out->size = sizeof(struct ipahal_stats_flt_rt_hw);
+
+ return 0;
+}
+
+static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats,
+ void *parsed_stats)
+{
+ struct ipahal_stats_flt_rt_hw *raw_hw =
+ (struct ipahal_stats_flt_rt_hw *)raw_stats;
+ struct ipahal_stats_flt_rt *out =
+ (struct ipahal_stats_flt_rt *)parsed_stats;
+
+ memset(out, 0, sizeof(*out));
+ IPAHAL_DBG_LOW("\n");
+ out->num_packets = raw_hw->num_packets;
+ out->num_packets_hash = raw_hw->num_packets_hash;
+
+ return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop(
+ void *params, bool is_atomic_ctx)
+{
+ struct ipahal_stats_init_pyld *pyld;
+ struct ipahal_stats_init_drop *in =
+ (struct ipahal_stats_init_drop *)params;
+ int entries = _count_ones(in->enabled_bitmask);
+
+ IPAHAL_DBG_LOW("entries = %d\n", entries);
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+ entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx);
+ if (!pyld) {
+ IPAHAL_ERR("no mem\n");
+ return NULL;
+ }
+
+ pyld->len = entries * sizeof(struct ipahal_stats_drop_hw);
+
+ return pyld;
+}
+
+static int ipahal_get_offset_drop(void *params,
+ struct ipahal_stats_offset *out)
+{
+ struct ipahal_stats_get_offset_drop *in =
+ (struct ipahal_stats_get_offset_drop *)params;
+ int entries = _count_ones(in->init.enabled_bitmask);
+
+ IPAHAL_DBG_LOW("\n");
+ out->offset = 0;
+ out->size = entries * sizeof(struct ipahal_stats_drop_hw);
+
+ return 0;
+}
+
+static int ipahal_parse_stats_drop(void *init_params, void *raw_stats,
+ void *parsed_stats)
+{
+ struct ipahal_stats_init_drop *init =
+ (struct ipahal_stats_init_drop *)init_params;
+ struct ipahal_stats_drop_hw *raw_hw =
+ (struct ipahal_stats_drop_hw *)raw_stats;
+ struct ipahal_stats_drop_all *out =
+ (struct ipahal_stats_drop_all *)parsed_stats;
+ int stat_idx = 0;
+ int i;
+
+ memset(out, 0, sizeof(*out));
+ IPAHAL_DBG_LOW("\n");
+ for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+ if (init->enabled_bitmask & (1 << i)) {
+ out->stats[i].drop_byte_cnt =
+ raw_hw[stat_idx].drop_byte_cnt;
+ out->stats[i].drop_packet_cnt =
+ raw_hw[stat_idx].drop_packet_cnt;
+ stat_idx++;
+ }
+ }
+
+ return 0;
+}
+
+static struct ipahal_hw_stats_obj
+ ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = {
+ /* IPAv4 */
+ [IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = {
+ ipahal_generate_init_pyld_quota,
+ ipahal_get_offset_quota,
+ ipahal_parse_stats_quota
+ },
+ [IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = {
+ ipahal_generate_init_pyld_tethering,
+ ipahal_get_offset_tethering,
+ ipahal_parse_stats_tethering
+ },
+ [IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = {
+ ipahal_generate_init_pyld_flt_rt,
+ ipahal_get_offset_flt_rt,
+ ipahal_parse_stats_flt_rt
+ },
+ [IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = {
+ ipahal_generate_init_pyld_drop,
+ ipahal_get_offset_drop,
+ ipahal_parse_stats_drop
+ },
+};
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_hw_stats_obj zero_obj;
+
+ IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) {
+ if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj,
+ sizeof(struct ipahal_hw_stats_obj))) {
+ memcpy(&ipahal_hw_stats_objs[i + 1][j],
+ &ipahal_hw_stats_objs[i][j],
+ sizeof(struct ipahal_hw_stats_obj));
+ } else {
+ /*
+ * explicitly overridden stat.
+ * Check validity
+ */
+ if (!ipahal_hw_stats_objs[i + 1][j].
+ get_offset) {
+ IPAHAL_ERR(
+ "stat=%d get_offset null ver=%d\n",
+ j, i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_hw_stats_objs[i + 1][j].
+ parse_stats) {
+ IPAHAL_ERR(
+ "stat=%d parse_stats null ver=%d\n",
+ j, i + 1);
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+ struct ipahal_stats_offset *out)
+{
+ if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+ IPAHAL_ERR("Invalid type stat=%d\n", type);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (!params || !out) {
+ IPAHAL_ERR("Null arg\n");
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset(
+ params, out);
+}
+
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+ enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx)
+{
+ if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+ IPAHAL_ERR("Invalid type stat=%d\n", type);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (!params) {
+ IPAHAL_ERR("Null arg\n");
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].
+ generate_init_pyld(params, is_atomic_ctx);
+}
+
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+ void *raw_stats, void *parsed_stats)
+{
+ if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+ IPAHAL_ERR("Invalid type stat=%d\n", type);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (!raw_stats || !parsed_stats) {
+ IPAHAL_ERR("Null arg\n");
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats(
+ init_params, raw_stats, parsed_stats);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
new file mode 100644
index 0000000..cbb1dc3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_HW_STATS_H_
+#define _IPAHAL_HW_STATS_H_
+
+#include <linux/ipa.h>
+
+#define IPAHAL_MAX_PIPES 32
+#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */
+
+enum ipahal_hw_stats_type {
+ IPAHAL_HW_STATS_QUOTA,
+ IPAHAL_HW_STATS_TETHERING,
+ IPAHAL_HW_STATS_FNR,
+ IPAHAL_HW_STATS_DROP,
+ IPAHAL_HW_STATS_MAX
+};
+
+/*
+ * struct ipahal_stats_init_pyld - Statistics initialization payload
+ * @len: length of payload
+ * @data: actual payload data
+ */
+struct ipahal_stats_init_pyld {
+ u16 len;
+ u16 reserved;
+ u8 data[0];
+};
+
+/*
+ * struct ipahal_stats_offset - Statistics offset parameters
+ * @offset: offset of the statistic from beginning of stats table
+ * @size: size of the statistics
+ */
+struct ipahal_stats_offset {
+ u32 offset;
+ u16 size;
+};
+
+/*
+ * struct ipahal_stats_init_quota - Initializations parameters for quota
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_quota {
+ u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_quota - Get offset parameters for quota
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_quota {
+ struct ipahal_stats_init_quota init;
+};
+
+/*
+ * struct ipahal_stats_quota - Quota statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_quota {
+ u64 num_ipv4_bytes;
+ u64 num_ipv6_bytes;
+ u64 num_ipv4_pkts;
+ u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_quota_all - Quota statistics for all pipes
+ * @stats: array of statistics per pipe
+ */
+struct ipahal_stats_quota_all {
+ struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_tethering - Initializations parameters for tethering
+ * @prod_bitmask: bit mask of producer pipes to be monitored
+ * @cons_bitmask: bit mask of consumer pipes to be monitored per producer
+ */
+struct ipahal_stats_init_tethering {
+ u32 prod_bitmask;
+ u32 cons_bitmask[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_get_offset_tethering - Get offset parameters for
+ * tethering
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_tethering {
+ struct ipahal_stats_init_tethering init;
+};
+
+/*
+ * struct ipahal_stats_tethering - Tethering statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_tethering {
+ u64 num_ipv4_bytes;
+ u64 num_ipv6_bytes;
+ u64 num_ipv4_pkts;
+ u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_tethering_all - Tethering statistics for all pipes
+ * @stats: matrix of statistics per pair of pipes
+ */
+struct ipahal_stats_tethering_all {
+ struct ipahal_stats_tethering
+ stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt
+ * @rule_id_bitmask: array describes which rule ids to monitor.
+ * rule_id bit is determined by:
+ * index to the array => rule_id / 32
+ * bit to enable => rule_id % 32
+ */
+struct ipahal_stats_init_flt_rt {
+ u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32];
+};
+
+/*
+ * struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt
+ * @init: initialization parameters used in initialization of stats
+ * @rule_id: rule_id to get the offset for
+ */
+struct ipahal_stats_get_offset_flt_rt {
+ struct ipahal_stats_init_flt_rt init;
+ u32 rule_id;
+};
+
+/*
+ * struct ipahal_stats_flt_rt - flt_rt statistics
+ * @num_packets: Total number of packets hit this rule
+ * @num_packets_hash: Total number of packets hit this rule in hash table
+ */
+struct ipahal_stats_flt_rt {
+ u32 num_packets;
+ u32 num_packets_hash;
+};
+
+/*
+ * struct ipahal_stats_init_drop - Initializations parameters for Drop
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_drop {
+ u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_drop - Get offset parameters for Drop
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_drop {
+ struct ipahal_stats_init_drop init;
+};
+
+/*
+ * struct ipahal_stats_drop - Packet Drop statistics
+ * @drop_packet_cnt: number of packets dropped
+ * @drop_byte_cnt: number of bytes dropped
+ */
+struct ipahal_stats_drop {
+ u32 drop_packet_cnt;
+ u32 drop_byte_cnt;
+};
+
+/*
+ * struct ipahal_stats_drop_all - Drop statistics for all pipes
+ * @stats: array of statistics per pipes
+ */
+struct ipahal_stats_drop_all {
+ struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * ipahal_stats_generate_init_pyld - Generate the init payload for stats
+ * @type: type of stats
+ * @params: init_pyld parameters based of stats type
+ * @is_atomic_ctx: is calling context atomic ?
+ *
+ * This function will generate the initialization payload for a particular
+ * statistic in hardware. IPA driver is expected to use this payload to
+ * initialize the SRAM.
+ *
+ * Return: pointer to ipahal_stats_init_pyld on success or NULL on failure.
+ */
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+ enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built
+ * by the ipahal_stats_generate_init_pyld function.
+ */
+static inline void ipahal_destroy_stats_init_pyld(
+ struct ipahal_stats_init_pyld *pyld)
+{
+ kfree(pyld);
+}
+
+/*
+ * ipahal_stats_get_offset - Get the offset / size of payload for stats
+ * @type: type of stats
+ * @params: get_offset parameters based of stats type
+ * @out: out parameter for the offset and size.
+ *
+ * This function will return the offset of the counter from beginning of
+ * the table.IPA driver is expected to read this portion in SRAM and pass
+ * it to ipahal_parse_stats() to interprete the stats.
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+ struct ipahal_stats_offset *out);
+
+/*
+ * ipahal_parse_stats - parse statistics
+ * @type: type of stats
+ * @init_params: init_pyld parameters used on init
+ * @raw_stats: stats read from IPA SRAM
+ * @parsed_stats: pointer to parsed stats based on type
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+ void *raw_stats, void *parsed_stats);
+
+
+#endif /* _IPAHAL_HW_STATS_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
new file mode 100644
index 0000000..3bb761d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_HW_STATS_I_H_
+#define _IPAHAL_HW_STATS_I_H_
+
+#include "ipahal_hw_stats.h"
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type);
+
+struct ipahal_stats_quota_hw {
+ u64 num_ipv4_bytes;
+ u64 num_ipv4_pkts:32;
+ u64 num_ipv6_pkts:32;
+ u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_tethering_hdr_hw {
+ u64 dst_mask:32;
+ u64 offset:32;
+};
+
+struct ipahal_stats_tethering_hw {
+ u64 num_ipv4_bytes;
+ u64 num_ipv4_pkts:32;
+ u64 num_ipv6_pkts:32;
+ u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_flt_rt_hdr_hw {
+ u64 en_mask:32;
+ u64 reserved:16;
+ u64 cnt_offset:16;
+};
+
+struct ipahal_stats_flt_rt_hw {
+ u64 num_packets_hash:32;
+ u64 num_packets:32;
+};
+
+struct ipahal_stats_drop_hw {
+ u64 drop_byte_cnt:40;
+ u64 drop_packet_cnt:24;
+};
+
+#endif /* _IPAHAL_HW_STATS_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index c023082..5eb1aef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -46,6 +46,9 @@
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+ (kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
+
/*
* struct ipahal_context - HAL global context data
* @hw_type: IPA H/W type/version.
@@ -555,6 +558,8 @@
#define IPA_HDR_UCP_802_3_TO_ETHII 7
#define IPA_HDR_UCP_ETHII_TO_802_3 8
#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
+#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
/* Processing context TLV type */
#define IPA_PROC_CTX_TLV_TYPE_END 0
@@ -596,6 +601,28 @@
};
/**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr -
+ * HW structure of IPA processing context - add l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr {
+ struct ipa_hw_hdr_proc_ctx_tlv tlv;
+ struct ipa_l2tp_header_add_procparams l2tp_params;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr -
+ * HW structure of IPA processing context - remove l2tp header tlv
+ * @tlv: IPA processing context TLV
+ * @l2tp_params: l2tp parameters
+ */
+struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr {
+ struct ipa_hw_hdr_proc_ctx_tlv tlv;
+ struct ipa_l2tp_header_remove_procparams l2tp_params;
+};
+
+/**
* struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
* IPA processing context header - add header sequence
* @hdr_add: add header command
@@ -619,6 +646,32 @@
struct ipa_hw_hdr_proc_ctx_tlv end;
};
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header addition
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @l2tp_params: l2tp params for header removal
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq {
+ struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+ struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params;
+ struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
/* IPA HW DPS/HPS image memory sizes */
#define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128
#define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 3019e4d..dc71414 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -43,6 +43,7 @@
__stringify(IPA_ENDP_INIT_ROUTE_n),
__stringify(IPA_ENDP_INIT_MODE_n),
__stringify(IPA_ENDP_INIT_NAT_n),
+ __stringify(IPA_ENDP_INIT_CONN_TRACK_n),
__stringify(IPA_ENDP_INIT_CTRL_n),
__stringify(IPA_ENDP_INIT_CTRL_SCND_n),
__stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
@@ -85,6 +86,25 @@
__stringify(IPA_IDLE_INDICATION_CFG),
__stringify(IPA_DPS_SEQUENCER_FIRST),
__stringify(IPA_HPS_SEQUENCER_FIRST),
+ __stringify(IPA_CLKON_CFG),
+ __stringify(IPA_STAT_QUOTA_BASE_n),
+ __stringify(IPA_STAT_QUOTA_MASK_n),
+ __stringify(IPA_STAT_TETHERING_BASE_n),
+ __stringify(IPA_STAT_TETHERING_MASK_n),
+ __stringify(IPA_STAT_FILTER_IPV4_BASE),
+ __stringify(IPA_STAT_FILTER_IPV6_BASE),
+ __stringify(IPA_STAT_ROUTER_IPV4_BASE),
+ __stringify(IPA_STAT_ROUTER_IPV6_BASE),
+ __stringify(IPA_STAT_FILTER_IPV4_START_ID),
+ __stringify(IPA_STAT_FILTER_IPV6_START_ID),
+ __stringify(IPA_STAT_ROUTER_IPV4_START_ID),
+ __stringify(IPA_STAT_ROUTER_IPV6_START_ID),
+ __stringify(IPA_STAT_FILTER_IPV4_END_ID),
+ __stringify(IPA_STAT_FILTER_IPV6_END_ID),
+ __stringify(IPA_STAT_ROUTER_IPV4_END_ID),
+ __stringify(IPA_STAT_ROUTER_IPV6_END_ID),
+ __stringify(IPA_STAT_DROP_CNT_BASE_n),
+ __stringify(IPA_STAT_DROP_CNT_MASK_n),
};
static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -723,6 +743,17 @@
IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
}
+static void ipareg_construct_endp_init_conn_track_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_conn_track *ep_ipv6ct =
+ (struct ipa_ep_cfg_conn_track *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_ipv6ct->conn_track_en,
+ IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT,
+ IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK);
+}
+
static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1490,6 +1521,67 @@
[IPA_HW_v4_0][IPA_ENDP_STATUS_n] = {
ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
0x00000840, 0x70},
+ [IPA_HW_v4_0][IPA_CLKON_CFG] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000044, 0},
+ [IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = {
+ ipareg_construct_endp_init_conn_track_n,
+ ipareg_parse_dummy,
+ 0x00000850, 0x70},
+ [IPA_HW_v4_0][IPA_STAT_QUOTA_BASE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000700, 0x4 },
+ [IPA_HW_v4_0][IPA_STAT_QUOTA_MASK_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000708, 0x4 },
+ [IPA_HW_v4_0][IPA_STAT_TETHERING_BASE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000710, 0x4 },
+ [IPA_HW_v4_0][IPA_STAT_TETHERING_MASK_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000718, 0x4 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000720, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000724, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000728, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000072C, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_START_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000730, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_START_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000734, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_START_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000738, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_START_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000073C, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_END_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000740, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_END_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000744, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_END_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000748, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_END_ID] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000074C, 0x0 },
+ [IPA_HW_v4_0][IPA_STAT_DROP_CNT_BASE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000750, 0x4 },
+ [IPA_HW_v4_0][IPA_STAT_DROP_CNT_MASK_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000758, 0x4 },
};
/*
@@ -1833,6 +1925,11 @@
IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V4_0;
}
+ if (ep_idx > (sizeof(valmask->val) * 8 - 1)) {
+ IPAHAL_ERR("too big ep_idx %d\n", ep_idx);
+ ipa_assert();
+ return;
+ }
IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
valmask->mask = bmsk << shft;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index c9293b8..a2864cd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -46,6 +46,7 @@
IPA_ENDP_INIT_ROUTE_n,
IPA_ENDP_INIT_MODE_n,
IPA_ENDP_INIT_NAT_n,
+ IPA_ENDP_INIT_CONN_TRACK_n,
IPA_ENDP_INIT_CTRL_n,
IPA_ENDP_INIT_CTRL_SCND_n,
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
@@ -88,6 +89,25 @@
IPA_IDLE_INDICATION_CFG,
IPA_DPS_SEQUENCER_FIRST,
IPA_HPS_SEQUENCER_FIRST,
+ IPA_CLKON_CFG,
+ IPA_STAT_QUOTA_BASE_n,
+ IPA_STAT_QUOTA_MASK_n,
+ IPA_STAT_TETHERING_BASE_n,
+ IPA_STAT_TETHERING_MASK_n,
+ IPA_STAT_FILTER_IPV4_BASE,
+ IPA_STAT_FILTER_IPV6_BASE,
+ IPA_STAT_ROUTER_IPV4_BASE,
+ IPA_STAT_ROUTER_IPV6_BASE,
+ IPA_STAT_FILTER_IPV4_START_ID,
+ IPA_STAT_FILTER_IPV6_START_ID,
+ IPA_STAT_ROUTER_IPV4_START_ID,
+ IPA_STAT_ROUTER_IPV6_START_ID,
+ IPA_STAT_FILTER_IPV4_END_ID,
+ IPA_STAT_FILTER_IPV6_END_ID,
+ IPA_STAT_ROUTER_IPV4_END_ID,
+ IPA_STAT_ROUTER_IPV6_END_ID,
+ IPA_STAT_DROP_CNT_BASE_n,
+ IPA_STAT_DROP_CNT_MASK_n,
IPA_REG_MAX,
};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 17bad03..664d254 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -118,6 +118,10 @@
#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+/* IPA_ENDP_INIT_CONN_TRACK_n register */
+#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CONN_TRACK_n_CONN_TRACK_EN_SHFT 0x0
+
/* IPA_ENDP_INIT_CTRL_n register */
#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index fcaabe3..b119a69 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -702,6 +702,11 @@
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD);
+ if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) {
+ IPAWANERR("ep mapping failed\n");
+ retval = -EFAULT;
+ }
+
req->install_status = QMI_RESULT_SUCCESS_V01;
req->rule_id_valid = 1;
req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
@@ -1141,7 +1146,8 @@
memset(&meta, 0, sizeof(meta));
meta.pkt_init_dst_ep_valid = true;
meta.pkt_init_dst_ep_remote = true;
- meta.pkt_init_dst_ep = IPA_CLIENT_Q6_LAN_CONS;
+ meta.pkt_init_dst_ep =
+ ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, &meta);
} else {
ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
@@ -1926,7 +1932,9 @@
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+
+ if (rmnet_ipa3_ctx->rm_q6_wq)
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
@@ -2266,7 +2274,10 @@
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- ipa3_q6_deinitialize_rm();
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_q6_deinitialize_rm();
+
q6_init_err:
free_netdev(dev);
rmnet_ipa3_ctx->wwan_priv = NULL;
@@ -2725,6 +2736,9 @@
if (!data->set_quota)
ipa3_qmi_stop_data_qouta();
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
index = find_vchannel_name_index(data->interface_name);
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name,
@@ -2962,6 +2976,10 @@
kfree(req);
kfree(resp);
return rc;
+ } else if (data == NULL) {
+ kfree(req);
+ kfree(resp);
+ return 0;
}
if (resp->dl_dst_pipe_stats_list_valid) {
@@ -3145,8 +3163,11 @@
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
{
enum ipa_upstream_type upstream_type;
+ struct wan_ioctl_query_tether_stats tether_stats;
int rc = 0;
+ memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3164,7 +3185,7 @@
} else {
IPAWANERR(" reset modem-backhaul stats\n");
rc = rmnet_ipa3_query_tethering_stats_modem(
- NULL, true);
+ &tether_stats, true);
if (rc) {
IPAWANERR("reset MODEM stats failed\n");
return rc;
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index c20fd2b..af46bf2 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
new file mode 100644
index 0000000..d37920e
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_ut_framework.h"
+#include <linux/netdevice.h>
+
+struct ipa_test_hw_stats_ctx {
+ u32 odu_prod_hdl;
+ u32 odu_cons_hdl;
+ u32 rt4_usb;
+ u32 rt6_usb;
+ u32 rt4_odu_cons;
+ u32 rt6_odu_cons;
+ atomic_t odu_pending;
+};
+
+static struct ipa_test_hw_stats_ctx *ctx;
+
+static int ipa_test_hw_stats_suite_setup(void **ppriv)
+{
+ IPA_UT_DBG("Start Setup\n");
+
+ if (!ctx)
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ return 0;
+}
+
+static int ipa_test_hw_stats_suite_teardown(void *priv)
+{
+ IPA_UT_DBG("Start Teardown\n");
+
+ return 0;
+}
+
+static void odu_prod_notify(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+
+ switch (evt) {
+ case IPA_RECEIVE:
+ dev_kfree_skb_any(skb);
+ break;
+ case IPA_WRITE_DONE:
+ atomic_dec(&ctx->odu_pending);
+ dev_kfree_skb_any(skb);
+ break;
+ default:
+ IPA_UT_ERR("unexpected evt %d\n", evt);
+ }
+}
+static void odu_cons_notify(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int ret;
+
+ switch (evt) {
+ case IPA_RECEIVE:
+ if (atomic_read(&ctx->odu_pending) >= 64)
+ msleep(20);
+ atomic_inc(&ctx->odu_pending);
+ skb_put(skb, 100);
+ ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL);
+ while (ret) {
+ msleep(100);
+ ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL);
+ }
+ break;
+ case IPA_WRITE_DONE:
+ dev_kfree_skb_any(skb);
+ break;
+ default:
+ IPA_UT_ERR("unexpected evt %d\n", evt);
+ }
+}
+
+static int ipa_test_hw_stats_configure(void *priv)
+{
+ struct ipa_sys_connect_params odu_prod_params;
+ struct ipa_sys_connect_params odu_emb_cons_params;
+ int res;
+
+ /* first connect all additional pipe */
+ memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+ memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+ odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+ odu_prod_params.desc_fifo_sz = 0x1000;
+ odu_prod_params.priv = NULL;
+ odu_prod_params.notify = odu_prod_notify;
+ res = ipa_setup_sys_pipe(&odu_prod_params,
+ &ctx->odu_prod_hdl);
+ if (res) {
+ IPA_UT_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+ return res;
+ }
+
+ odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+ odu_emb_cons_params.desc_fifo_sz = 0x1000;
+ odu_emb_cons_params.priv = NULL;
+ odu_emb_cons_params.notify = odu_cons_notify;
+ res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+ &ctx->odu_cons_hdl);
+ if (res) {
+ IPA_UT_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+ ipa_teardown_sys_pipe(ctx->odu_prod_hdl);
+ return res;
+ }
+
+ IPA_UT_INFO("Configured. Please connect USB RNDIS now\n");
+
+ return 0;
+}
+
+static int ipa_test_hw_stats_add_FnR(void *priv)
+{
+ struct ipa_ioc_add_rt_rule *rt_rule;
+ struct ipa_ioc_add_flt_rule *flt_rule;
+ struct ipa_ioc_get_rt_tbl rt_lookup;
+ int ret;
+
+ rt_rule = kzalloc(sizeof(*rt_rule) + 1 * sizeof(struct ipa_rt_rule_add),
+ GFP_KERNEL);
+ if (!rt_rule) {
+ IPA_UT_DBG("no mem\n");
+ return -ENOMEM;
+ }
+
+ flt_rule = kzalloc(sizeof(*flt_rule) +
+ 1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL);
+ if (!flt_rule) {
+ IPA_UT_DBG("no mem\n");
+ ret = -ENOMEM;
+ goto free_rt;
+ }
+
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ rt_lookup.ip = rt_rule->ip;
+ strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_USB_CONS",
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ rt_rule->num_rules = 1;
+ rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
+ rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ rt_rule->rules[0].rule.attrib.dst_port = 5002;
+ rt_rule->rules[0].rule.hashable = true;
+ if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ if (ipa_get_rt_tbl(&rt_lookup)) {
+ IPA_UT_ERR("failed to query V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ ctx->rt4_usb = rt_lookup.hdl;
+
+ memset(rt_rule, 0, sizeof(*rt_rule));
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v6;
+ rt_lookup.ip = rt_rule->ip;
+ strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_USB_CONS",
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ rt_rule->num_rules = 1;
+ rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
+ rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ rt_rule->rules[0].rule.attrib.dst_port = 5002;
+ rt_rule->rules[0].rule.hashable = true;
+ if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ if (ipa_get_rt_tbl(&rt_lookup)) {
+ IPA_UT_ERR("failed to query V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ ctx->rt6_usb = rt_lookup.hdl;
+
+ memset(rt_rule, 0, sizeof(*rt_rule));
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v4;
+ rt_lookup.ip = rt_rule->ip;
+ strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_ODU_CONS",
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ rt_rule->num_rules = 1;
+ rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+ rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ rt_rule->rules[0].rule.attrib.dst_port = 5002;
+ rt_rule->rules[0].rule.hashable = true;
+ if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ if (ipa_get_rt_tbl(&rt_lookup)) {
+ IPA_UT_ERR("failed to query V4 rules\n");
+ return -EFAULT;
+ }
+ ctx->rt4_odu_cons = rt_lookup.hdl;
+
+ memset(rt_rule, 0, sizeof(*rt_rule));
+ rt_rule->commit = 1;
+ rt_rule->ip = IPA_IP_v6;
+ rt_lookup.ip = rt_rule->ip;
+ strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_ODU_CONS",
+ IPA_RESOURCE_NAME_MAX);
+ strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+ rt_rule->num_rules = 1;
+ rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+ rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ rt_rule->rules[0].rule.attrib.dst_port = 5002;
+ rt_rule->rules[0].rule.hashable = true;
+ if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ if (ipa_get_rt_tbl(&rt_lookup)) {
+ IPA_UT_ERR("failed to query V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+ ctx->rt6_odu_cons = rt_lookup.hdl;
+
+ flt_rule->commit = 1;
+ flt_rule->ip = IPA_IP_v4;
+ flt_rule->ep = IPA_CLIENT_USB_PROD;
+ flt_rule->num_rules = 1;
+ flt_rule->rules[0].at_rear = 1;
+ flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+ flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ flt_rule->rules[0].rule.attrib.dst_port = 5002;
+ flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons;
+ flt_rule->rules[0].rule.hashable = 1;
+ if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+
+ memset(flt_rule, 0, sizeof(*flt_rule));
+ flt_rule->commit = 1;
+ flt_rule->ip = IPA_IP_v6;
+ flt_rule->ep = IPA_CLIENT_USB_PROD;
+ flt_rule->num_rules = 1;
+ flt_rule->rules[0].at_rear = 1;
+ flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+ flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ flt_rule->rules[0].rule.attrib.dst_port = 5002;
+ flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons;
+ flt_rule->rules[0].rule.hashable = 1;
+ if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V6 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+
+ memset(flt_rule, 0, sizeof(*flt_rule));
+ flt_rule->commit = 1;
+ flt_rule->ip = IPA_IP_v4;
+ flt_rule->ep = IPA_CLIENT_ODU_PROD;
+ flt_rule->num_rules = 1;
+ flt_rule->rules[0].at_rear = 1;
+ flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+ flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ flt_rule->rules[0].rule.attrib.dst_port = 5002;
+ flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_usb;
+ flt_rule->rules[0].rule.hashable = 1;
+ if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V4 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+
+ memset(flt_rule, 0, sizeof(*flt_rule));
+ flt_rule->commit = 1;
+ flt_rule->ip = IPA_IP_v6;
+ flt_rule->ep = IPA_CLIENT_ODU_PROD;
+ flt_rule->num_rules = 1;
+ flt_rule->rules[0].at_rear = 1;
+ flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+ flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+ flt_rule->rules[0].rule.attrib.dst_port = 5002;
+ flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_usb;
+ flt_rule->rules[0].rule.hashable = 1;
+ if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+ IPA_UT_ERR("failed to install V6 rules\n");
+ ret = -EFAULT;
+ goto free_flt;
+ }
+
+ IPA_UT_INFO(
+ "Rules added. Please start data transfer on ports 5001/5002\n");
+ ret = 0;
+free_flt:
+ kfree(flt_rule);
+free_rt:
+ kfree(rt_rule);
+ return ret;
+
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
+ ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown)
+{
+ IPA_UT_ADD_TEST(configure, "Configure the setup",
+ ipa_test_hw_stats_configure, false, IPA_HW_v4_0, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(add_rules, "Add FLT and RT rules",
+ ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_0, IPA_HW_MAX),
+
+} IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 5a41d64..3a89c7d 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -326,6 +326,7 @@
struct ipa_mem_buffer out_buffer;
u32 prod_hdl;
u32 cons_hdl;
+ u32 test_prod_hdl;
};
static struct ipa_test_mhi_context *test_mhi_ctx;
@@ -774,6 +775,7 @@
static int ipa_test_mhi_suite_setup(void **ppriv)
{
int rc = 0;
+ struct ipa_sys_connect_params sys_in;
IPA_UT_DBG("Start Setup\n");
@@ -815,9 +817,22 @@
goto fail_free_mmio_spc;
}
+ /* connect PROD pipe for remote wakeup */
+ memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+ sys_in.client = IPA_CLIENT_TEST_PROD;
+ sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+ sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+ sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+ if (ipa_setup_sys_pipe(&sys_in, &test_mhi_ctx->test_prod_hdl)) {
+ IPA_UT_ERR("setup sys pipe failed.\n");
+ goto fail_destroy_data_structures;
+ }
+
*ppriv = test_mhi_ctx;
return 0;
+fail_destroy_data_structures:
+ ipa_mhi_test_destroy_data_structures();
fail_free_mmio_spc:
ipa_test_mhi_free_mmio_space();
fail_iounmap:
@@ -838,6 +853,7 @@
if (!test_mhi_ctx)
return 0;
+ ipa_teardown_sys_pipe(test_mhi_ctx->test_prod_hdl);
ipa_mhi_test_destroy_data_structures();
ipa_test_mhi_free_mmio_space();
iounmap(test_mhi_ctx->gsi_mmio);
@@ -1811,7 +1827,7 @@
memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
}
- rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL);
if (rc) {
IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
@@ -1982,7 +1998,7 @@
memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
}
- rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+ rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL);
if (rc) {
IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 4a9d3b0..823edcf 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -23,6 +23,7 @@
IPA_UT_DECLARE_SUITE(mhi);
IPA_UT_DECLARE_SUITE(dma);
IPA_UT_DECLARE_SUITE(example);
+IPA_UT_DECLARE_SUITE(hw_stats);
/**
@@ -34,6 +35,7 @@
IPA_UT_REGISTER_SUITE(mhi),
IPA_UT_REGISTER_SUITE(dma),
IPA_UT_REGISTER_SUITE(example),
+ IPA_UT_REGISTER_SUITE(hw_stats),
} IPA_UT_DEFINE_ALL_SUITES_END;
#endif /* _IPA_UT_SUITE_LIST_H_ */
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 7c77280..c1e77aa 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -691,7 +691,7 @@
struct se_geni_rsc *rsc)
{
unsigned long flags;
- struct se_geni_rsc *tmp;
+ struct se_geni_rsc *tmp = NULL;
struct list_head *ins_list_head;
bool bus_bw_update = false;
int ret = 0;
@@ -709,7 +709,7 @@
list_add(&rsc->ib_list, ins_list_head);
/* Currently inserted node has greater average BW value */
if (ins_list_head == &geni_se_dev->ib_list_head)
- geni_se_dev->cur_ib = tmp->ib;
+ geni_se_dev->cur_ib = rsc->ib;
bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
spin_unlock_irqrestore(&geni_se_dev->ab_ib_lock, flags);
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
index ce073ed..a23f069 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.c
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -13,6 +13,11 @@
#define pr_fmt(fmt) "seemp: %s: " fmt, __func__
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/seemp_instrumentation.h>
+#include <soc/qcom/scm.h>
+
#include "seemp_logk.h"
#include "seemp_ringbuf.h"
@@ -24,6 +29,9 @@
#define FOUR_MB 4
#define YEAR_BASE 1900
+#define EL2_SCM_ID 0x02001902
+#define KP_EL2_REPORT_REVISION 0x01000101
+
static struct seemp_logk_dev *slogk_dev;
static unsigned int ring_sz = FOUR_MB;
@@ -49,11 +57,15 @@
static struct seemp_source_mask *pmask;
static unsigned int num_sources;
+static void *el2_shared_mem;
+static struct task_struct *rtic_thread;
+
static long seemp_logk_reserve_rdblks(
struct seemp_logk_dev *sdev, unsigned long arg);
static long seemp_logk_set_mask(unsigned long arg);
static long seemp_logk_set_mapping(unsigned long arg);
static long seemp_logk_check_filter(unsigned long arg);
+static int seemp_logk_rtic_thread(void *data);
void* (*seemp_logk_kernel_begin)(char **buf);
@@ -289,7 +301,7 @@
unsigned int bit_num = index%8;
unsigned char byte;
- if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE)
+ if (byte_num >= MASK_BUFFER_SIZE)
return false;
byte = pVec[byte_num];
@@ -569,6 +581,15 @@
}
}
+ if (!rtic_thread && el2_shared_mem) {
+ rtic_thread = kthread_run(seemp_logk_rtic_thread,
+ NULL, "seemp_logk_rtic_thread");
+ if (IS_ERR(rtic_thread)) {
+ pr_err("rtic_thread creation failed");
+ rtic_thread = NULL;
+ }
+ }
+
return 0;
}
@@ -580,10 +601,64 @@
.mmap = seemp_logk_mmap,
};
+static int seemp_logk_rtic_thread(void *data)
+{
+ struct el2_report_header_t *header;
+ __u64 last_sequence_number = 0;
+ int last_pos = -1;
+ int i;
+ int num_entries = (PAGE_SIZE - sizeof(struct el2_report_header_t))
+ / sizeof(struct el2_report_data_t);
+ header = (struct el2_report_header_t *) el2_shared_mem;
+
+ if (header->report_version < KP_EL2_REPORT_REVISION)
+ return -EINVAL;
+
+ while (!kthread_should_stop()) {
+ for (i = 1; i < num_entries + 1; i++) {
+ struct el2_report_data_t *report;
+ int cur_pos = last_pos + i;
+
+ if (cur_pos >= num_entries)
+ cur_pos -= num_entries;
+
+ report = el2_shared_mem +
+ sizeof(struct el2_report_header_t) +
+ cur_pos * sizeof(struct el2_report_data_t);
+
+ /* determine legitimacy of report */
+ if (report->report_valid &&
+ (last_sequence_number == 0
+ || report->sequence_number >
+ last_sequence_number)) {
+ seemp_logk_rtic(report->report_type,
+ ((struct task_struct *) report->actor)
+ ->pid,
+ /* leave this empty until
+ * asset id is provided
+ */
+ "",
+ report->asset_category,
+ report->response);
+ last_sequence_number = report->sequence_number;
+ } else {
+ last_pos = cur_pos - 1;
+ break;
+ }
+ }
+
+ /* periodically check el2 report every second */
+ ssleep(1);
+ }
+
+ return 0;
+}
+
__init int seemp_logk_init(void)
{
int ret;
int devno = 0;
+ struct scm_desc desc = {0};
num_sources = 0;
kmalloc_flag = 0;
@@ -650,6 +725,21 @@
init_waitqueue_head(&slogk_dev->readers_wq);
init_waitqueue_head(&slogk_dev->writers_wq);
rwlock_init(&filter_lock);
+
+ el2_shared_mem = (void *) __get_free_page(GFP_KERNEL);
+ if (el2_shared_mem) {
+ desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+ desc.args[0] = (uint64_t) virt_to_phys(el2_shared_mem);
+ desc.args[1] = PAGE_SIZE;
+ ret = scm_call2(EL2_SCM_ID, &desc);
+ if (ret || desc.ret[0] || desc.ret[1]) {
+ pr_err("SCM call failed with ret val = %d %d %d",
+ ret, (int)desc.ret[0], (int)desc.ret[1]);
+ free_page((unsigned long) el2_shared_mem);
+ el2_shared_mem = NULL;
+ }
+ }
+
return 0;
class_destroy_fail:
class_destroy(cl);
@@ -666,6 +756,11 @@
{
dev_t devno = MKDEV(slogk_dev->major, slogk_dev->minor);
+ if (rtic_thread) {
+ kthread_stop(rtic_thread);
+ rtic_thread = NULL;
+ }
+
seemp_logk_detach();
cdev_del(&slogk_dev->cdev);
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.h b/drivers/platform/msm/seemp_core/seemp_logk.h
index 1a41d4c..eecf4f7 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.h
+++ b/drivers/platform/msm/seemp_core/seemp_logk.h
@@ -158,4 +158,29 @@
__u32 hash;
bool isOn;
};
+
+/* report region header */
+struct el2_report_header_t {
+ __u64 report_version; /* Version of the EL2 report */
+ __u64 mp_catalog_version;
+ /* Version of MP catalogue used for kernel protection */
+ __u64 num_incidents; /* Number of Incidents Observed by EL2 */
+ __u8 protection_enabled; /* Kernel Assets protected by EL2 */
+ __u8 pad1;
+ __u8 pad2;
+ __u8 pad3;
+ __u32 pad4;
+};
+
+/* individual report */
+struct el2_report_data_t {
+ __u64 sequence_number; /* Sequence number of the report */
+ __u64 actor; /* Actor that caused the Incident. */
+ __u8 report_valid;
+ /* Flag to indicate whether report instance is valid */
+ __u8 report_type; /* Report Type */
+ __u8 asset_category; /* Asset Category */
+ __u8 response; /* Response From EL2 */
+};
+
#endif
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 62e0978..92321ad 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -3095,84 +3095,6 @@
return 0;
}
-static ssize_t
-usb_bam_show_inactivity_timer(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- char *buff = buf;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bam_enable_strings); i++) {
- buff += snprintf(buff, PAGE_SIZE, "%s: %dms\n",
- bam_enable_strings[i],
- msm_usb_bam[i].inactivity_timer_ms);
- }
-
- return buff - buf;
-}
-
-static ssize_t usb_bam_store_inactivity_timer(struct device *dev,
- struct device_attribute *attr,
- const char *buff, size_t count)
-{
- char buf[USB_BAM_MAX_STR_LEN];
- char *trimmed_buf, *bam_str, *bam_name, *timer;
- int timer_d;
- int bam, ret;
-
- if (strnstr(buff, "help", USB_BAM_MAX_STR_LEN)) {
- pr_info("Usage: <bam_name> <ms>,<bam_name> <ms>,...\n");
- pr_info("\tbam_name: [%s, %s, %s]\n",
- bam_enable_strings[DWC3_CTRL],
- bam_enable_strings[CI_CTRL],
- bam_enable_strings[HSIC_CTRL]);
- pr_info("\tms: time in ms. Use 0 to disable timer\n");
- return count;
- }
-
- strlcpy(buf, buff, sizeof(buf));
- trimmed_buf = strim(buf);
-
- while (trimmed_buf) {
- bam_str = strsep(&trimmed_buf, ",");
- if (bam_str) {
- bam_name = strsep(&bam_str, " ");
- bam = get_bam_type_from_core_name(bam_name);
- if (bam < 0 || bam >= MAX_BAMS) {
- log_event_err("%s: Invalid bam, type=%d ,name=%s\n",
- __func__, bam, bam_name);
- return -EINVAL;
- }
-
- timer = strsep(&bam_str, " ");
-
- if (!timer)
- continue;
-
- ret = kstrtoint(timer, 0, &timer_d);
- if (ret) {
- log_event_err("%s: err:%d with value:(%d)\n",
- __func__, ret, timer_d);
- return ret;
- }
-
- /* Apply new timer setting if bam has running pipes */
- if (msm_usb_bam[bam].inactivity_timer_ms != timer_d) {
- msm_usb_bam[bam].inactivity_timer_ms = timer_d;
- if (msm_usb_bam[bam].pipes_enabled_per_bam > 0
- && !info[bam].in_lpm)
- usb_bam_set_inactivity_timer(bam);
- }
- }
- }
-
- return count;
-}
-
-static DEVICE_ATTR(inactivity_timer, 0600,
- usb_bam_show_inactivity_timer,
- usb_bam_store_inactivity_timer);
-
static int usb_bam_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -3221,12 +3143,6 @@
dev_dbg(&pdev->dev, "usb_bam_probe\n");
- ret = device_create_file(&pdev->dev, &dev_attr_inactivity_timer);
- if (ret) {
- dev_err(&pdev->dev, "failed to create fs node\n");
- return ret;
- }
-
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io_res) {
dev_err(&pdev->dev, "missing BAM memory resource\n");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a7614fc..2f1615e 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@
case 8:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 8c43c4e..08e1505 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -46,7 +46,7 @@
static char *type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP",
"USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD",
- "Wireless", "BMS", "Parallel", "Main", "Wipower",
+ "Wireless", "USB_FLOAT", "BMS", "Parallel", "Main", "Wipower",
"TYPEC", "TYPEC_UFP", "TYPEC_DFP"
};
static char *status_text[] = {
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index bc19b24..6cc83ab 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -2,6 +2,6 @@
obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o pmic-voter.o
obj-$(CONFIG_SMB1355_SLAVE_CHARGER) += smb1355-charger.o pmic-voter.o
obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
-obj-$(CONFIG_QPNP_SMB2) += qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_SMB2) += step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
obj-$(CONFIG_SMB138X_CHARGER) += smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
obj-$(CONFIG_QPNP_QNOVO) += qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 8641a45..486e8c3 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -41,6 +41,7 @@
#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define FCC_CHANGE_VOTER "FCC_CHANGE_VOTER"
struct pl_data {
int pl_mode;
@@ -288,69 +289,11 @@
__ATTR_NULL,
};
-/***********
- * TAPER *
- ************/
-#define MINIMUM_PARALLEL_FCC_UA 500000
-#define PL_TAPER_WORK_DELAY_MS 100
-#define TAPER_RESIDUAL_PCT 75
-static void pl_taper_work(struct work_struct *work)
-{
- struct pl_data *chip = container_of(work, struct pl_data,
- pl_taper_work.work);
- union power_supply_propval pval = {0, };
- int rc;
-
- /* exit immediately if parallel is disabled */
- if (get_effective_result(chip->pl_disable_votable)) {
- pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
- goto done;
- }
-
- pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
- chip->slave_fcc_ua);
- if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
- pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
- vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
- goto done;
- }
-
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
- if (rc < 0) {
- pr_err("Couldn't get batt charge type rc=%d\n", rc);
- goto done;
- }
-
- chip->charge_type = pval.intval;
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
- pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
-
- vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
- /* Reduce the taper percent by 25 percent */
- chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
- rerun_election(chip->fcc_votable);
- pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
- PL_TAPER_WORK_DELAY_MS);
- schedule_delayed_work(&chip->pl_taper_work,
- msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
- return;
- }
-
- /*
- * Master back to Fast Charge, get out of this round of taper reduction
- */
- pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-
-done:
- vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
-}
-
/*********
* FCC *
**********/
#define EFFICIENCY_PCT 80
-static void split_fcc(struct pl_data *chip, int total_ua,
+static void get_fcc_split(struct pl_data *chip, int total_ua,
int *master_ua, int *slave_ua)
{
int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
@@ -389,7 +332,7 @@
effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
slave_limited_ua = min(effective_total_ua, bcl_ua);
*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
- *slave_ua = (*slave_ua * chip->taper_pct) / 100;
+
/*
* In USBIN_USBIN configuration with internal rsense parallel
* charger's current goes through main charger's BATFET, keep
@@ -399,14 +342,75 @@
*master_ua = max(0, total_ua);
else
*master_ua = max(0, total_ua - *slave_ua);
+
+ /* further reduce slave's share in accordance with taper reductions */
+ *slave_ua = (*slave_ua * chip->taper_pct) / 100;
+}
+
+#define MINIMUM_PARALLEL_FCC_UA 500000
+#define PL_TAPER_WORK_DELAY_MS 100
+#define TAPER_RESIDUAL_PCT 90
+static void pl_taper_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work, struct pl_data,
+ pl_taper_work.work);
+ union power_supply_propval pval = {0, };
+ int total_fcc_ua, master_fcc_ua, slave_fcc_ua;
+ int rc;
+
+ /* exit immediately if parallel is disabled */
+ if (get_effective_result(chip->pl_disable_votable)) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+ goto done;
+ }
+
+ total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ if (slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+ goto done;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+ slave_fcc_ua);
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ goto done;
+ }
+
+ chip->charge_type = pval.intval;
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+ /* Reduce the taper percent by 10 percent */
+ chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+ rerun_election(chip->fcc_votable);
+ pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+ PL_TAPER_WORK_DELAY_MS);
+ schedule_delayed_work(&chip->pl_taper_work,
+ msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+ return;
+ }
+
+ /*
+ * Master back to Fast Charge, get out of this round of taper reduction
+ */
+ pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
}
static int pl_fcc_vote_callback(struct votable *votable, void *data,
int total_fcc_ua, const char *client)
{
struct pl_data *chip = data;
- union power_supply_propval pval = {0, };
- int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+ int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
if (total_fcc_ua < 0)
return 0;
@@ -414,41 +418,23 @@
if (!chip->main_psy)
return 0;
- if (chip->pl_mode == POWER_SUPPLY_PL_NONE
- || get_effective_result_locked(chip->pl_disable_votable)) {
- pval.intval = total_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0)
- pr_err("Couldn't set main fcc, rc=%d\n", rc);
- return rc;
- }
-
if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
- split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chip->pl_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0) {
- pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
- return rc;
- }
-
- chip->slave_fcc_ua = slave_fcc_ua;
-
- pval.intval = master_fcc_ua;
- rc = power_supply_set_property(chip->main_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
- &pval);
- if (rc < 0) {
- pr_err("Could not set main fcc, rc=%d\n", rc);
- return rc;
+ if (slave_fcc_ua > 500000) {
+ chip->slave_fcc_ua = slave_fcc_ua;
+ vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+ false, 0);
+ } else {
+ chip->slave_fcc_ua = 0;
+ vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+ true, 0);
}
}
+ rerun_election(chip->pl_disable_votable);
+
pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
master_fcc_ua, slave_fcc_ua,
(master_fcc_ua * 100) / total_fcc_ua,
@@ -577,18 +563,34 @@
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
+static bool is_main_available(struct pl_data *chip)
+{
+ if (chip->main_psy)
+ return true;
+
+ chip->main_psy = power_supply_get_by_name("main");
+
+ return !!chip->main_psy;
+}
+
static int pl_disable_vote_callback(struct votable *votable,
void *data, int pl_disable, const char *client)
{
struct pl_data *chip = data;
union power_supply_propval pval = {0, };
+ int master_fcc_ua, total_fcc_ua, slave_fcc_ua;
int rc;
- chip->taper_pct = 100;
chip->total_settled_ua = 0;
chip->pl_settled_ua = 0;
- if (!pl_disable) { /* enable */
+ if (!is_main_available(chip))
+ return -ENODEV;
+
+ total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
+ /* enable parallel charging */
rc = power_supply_get_property(chip->pl_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
if (rc == -ENODEV) {
@@ -602,7 +604,30 @@
}
rerun_election(chip->fv_votable);
- rerun_election(chip->fcc_votable);
+
+ get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+ return rc;
+ }
+
/*
* Enable will be called with a valid pl_psy always. The
* PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
@@ -647,7 +672,17 @@
pr_err("Couldn't change slave suspend state rc=%d\n",
rc);
}
- rerun_election(chip->fcc_votable);
+
+ /* main psy gets all share */
+ pval.intval = total_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
rerun_election(chip->fv_votable);
}
@@ -681,16 +716,6 @@
return 0;
}
-static bool is_main_available(struct pl_data *chip)
-{
- if (chip->main_psy)
- return true;
-
- chip->main_psy = power_supply_get_by_name("main");
-
- return !!chip->main_psy;
-}
-
static bool is_batt_available(struct pl_data *chip)
{
if (!chip->batt_psy)
@@ -835,6 +860,7 @@
else
vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+ rerun_election(chip->fcc_votable);
if (get_effective_result(chip->pl_disable_votable))
return;
@@ -856,8 +882,6 @@
if (abs(new_total_settled_ua - chip->total_settled_ua)
> MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
- } else {
- rerun_election(chip->fcc_votable);
}
}
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 7e6a4e8..dd5f78f 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -51,9 +51,12 @@
#define PROFILE_LOAD "fg_profile_load"
#define DELTA_SOC "fg_delta_soc"
-/* Delta BSOC votable reasons */
+/* Delta BSOC irq votable reasons */
#define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq"
+/* Battery missing irq votable reasons */
+#define BATT_MISS_IRQ_VOTER "fg_batt_miss_irq"
+
#define DEBUG_PRINT_BUFFER_SIZE 64
/* 3 byte address + 1 space character */
#define ADDR_LEN 4
@@ -64,6 +67,7 @@
#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * \
CHARS_PER_ITEM) + 1) \
+#define NUM_PARTITIONS 3
#define FG_SRAM_ADDRESS_MAX 255
#define FG_SRAM_LEN 504
#define PROFILE_LEN 224
@@ -189,6 +193,18 @@
int val);
};
+struct fg_dma_address {
+ /* Starting word address of the partition */
+ u16 partition_start;
+ /* Last word address of the partition */
+ u16 partition_end;
+ /*
+ * Byte offset in the FG_DMA peripheral that maps to the partition_start
+ * in SRAM
+ */
+ u16 spmi_addr_base;
+};
+
enum fg_alg_flag_id {
ALG_FLAG_SOC_LT_OTG_MIN = 0,
ALG_FLAG_SOC_LT_RECHARGE,
@@ -262,6 +278,7 @@
int slope_limit_temp;
int esr_pulse_thresh_ma;
int esr_meas_curr_ma;
+ int bmd_en_delay_ms;
int jeita_thresholds[NUM_JEITA_LEVELS];
int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -357,11 +374,12 @@
struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
struct iio_channel *die_temp_chan;
- struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct votable *delta_bsoc_irq_en_votable;
+ struct votable *batt_miss_irq_en_votable;
struct fg_sram_param *sp;
+ struct fg_dma_address *addr_map;
struct fg_alg_flag *alg_flags;
int *debug_mask;
char batt_profile[PROFILE_LEN];
@@ -405,8 +423,10 @@
bool esr_flt_cold_temp_en;
bool slope_limit_en;
bool use_ima_single_mode;
+ bool use_dma;
struct completion soc_update;
struct completion soc_ready;
+ struct completion mem_grant;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
@@ -455,10 +475,15 @@
u8 offset, u8 *val, int len);
extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_direct_mem_read(struct fg_chip *chip, u16 address,
+ u8 offset, u8 *val, int len);
+extern int fg_direct_mem_write(struct fg_chip *chip, u16 address,
+ u8 offset, u8 *val, int len, bool atomic_access);
extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_dma_init(struct fg_chip *chip);
extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
extern int fg_debugfs_create(struct fg_chip *chip);
@@ -467,6 +492,7 @@
extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
extern s64 fg_float_decode(u16 val);
extern bool is_input_present(struct fg_chip *chip);
+extern bool is_qnovo_en(struct fg_chip *chip);
extern void fg_circ_buf_add(struct fg_circ_buf *buf, int val);
extern void fg_circ_buf_clr(struct fg_circ_buf *buf);
extern int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg);
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 8a949bf..0abc9df 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -746,6 +746,257 @@
return rc;
}
+#define MEM_GRANT_WAIT_MS 200
+static int fg_direct_mem_request(struct fg_chip *chip, bool request)
+{
+ int rc, ret;
+ u8 val, mask;
+ bool tried_again = false;
+
+ if (request)
+ reinit_completion(&chip->mem_grant);
+
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ val = request ? MEM_ACCESS_REQ_BIT : 0;
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask, val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ val = request ? mask : 0;
+ rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
+ return rc;
+ }
+
+ if (request)
+ pr_debug("requesting access\n");
+ else
+ pr_debug("releasing access\n");
+
+ if (!request)
+ return 0;
+
+wait:
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->mem_grant, msecs_to_jiffies(MEM_GRANT_WAIT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret <= 0) {
+ if ((ret == -ERESTARTSYS || ret == 0) && !tried_again) {
+ pr_debug("trying again, ret=%d\n", ret);
+ tried_again = true;
+ goto wait;
+ } else {
+ pr_err("wait for mem_grant timed out ret=%d\n",
+ ret);
+ }
+ }
+
+ if (ret <= 0) {
+ val = 0;
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask,
+ val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask,
+ val);
+ if (rc < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n",
+ rc);
+ return rc;
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ return rc;
+}
+
+static int fg_get_dma_address(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u16 *addr)
+{
+ int i;
+ u16 start_sram_addr, end_sram_addr;
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ start_sram_addr = chip->addr_map[i].partition_start;
+ end_sram_addr = chip->addr_map[i].partition_end;
+ if (sram_addr >= start_sram_addr &&
+ sram_addr <= end_sram_addr) {
+ *addr = chip->addr_map[i].spmi_addr_base + offset +
+ (sram_addr - start_sram_addr) *
+ BYTES_PER_SRAM_WORD;
+ return 0;
+ }
+ }
+
+ pr_err("Couldn't find address for %d from address map\n", sram_addr);
+ return -ENXIO;
+}
+
+static int fg_get_partition_count(struct fg_chip *chip, u16 sram_addr, int len,
+ int *count)
+{
+ int i, num = 0;
+ u16 end_addr, last_addr = 0;
+
+ end_addr = sram_addr + len / BYTES_PER_SRAM_WORD;
+ if (!(len % BYTES_PER_SRAM_WORD))
+ end_addr -= 1;
+
+ if (sram_addr == end_addr) {
+ *count = 1;
+ return 0;
+ }
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ pr_debug("address: %d last_addr: %d\n", sram_addr, last_addr);
+ if (sram_addr >= chip->addr_map[i].partition_start
+ && sram_addr <= chip->addr_map[i].partition_end
+ && last_addr < end_addr) {
+ num++;
+ last_addr = chip->addr_map[i].partition_end;
+ sram_addr = chip->addr_map[i+1].partition_start;
+ }
+ }
+
+ if (num > 0) {
+ *count = num;
+ return 0;
+ }
+
+ pr_err("Couldn't find number of partitions for address %d\n",
+ sram_addr);
+ return -ENXIO;
+}
+
+static int fg_get_partition_avail_bytes(struct fg_chip *chip, u16 sram_addr,
+ int len, int *rem_len)
+{
+ int i, part_len = 0, temp;
+ u16 end_addr;
+
+ for (i = 0; i < NUM_PARTITIONS; i++) {
+ if (sram_addr >= chip->addr_map[i].partition_start
+ && sram_addr <= chip->addr_map[i].partition_end) {
+ part_len = (chip->addr_map[i].partition_end -
+ chip->addr_map[i].partition_start + 1);
+ part_len *= BYTES_PER_SRAM_WORD;
+ end_addr = chip->addr_map[i].partition_end;
+ break;
+ }
+ }
+
+ if (part_len <= 0) {
+ pr_err("Bad address? total_len=%d\n", part_len);
+ return -ENXIO;
+ }
+
+ temp = (end_addr - sram_addr + 1) * BYTES_PER_SRAM_WORD;
+ if (temp > part_len || !temp) {
+ pr_err("Bad length=%d\n", temp);
+ return -ENXIO;
+ }
+
+ *rem_len = temp;
+ pr_debug("address %d len %d rem_len %d\n", sram_addr, len, *rem_len);
+ return 0;
+}
+
+static int __fg_direct_mem_rw(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len, bool access)
+{
+ int rc, ret, num_partitions, num_bytes = 0;
+ u16 addr;
+ u8 *ptr = val;
+ char *temp_str;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ rc = fg_get_partition_count(chip, sram_addr, len, &num_partitions);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("number of partitions: %d\n", num_partitions);
+
+ rc = fg_direct_mem_request(chip, true);
+ if (rc < 0) {
+ pr_err("Error in requesting direct_mem access rc=%d\n", rc);
+ return rc;
+ }
+
+ while (num_partitions-- && len) {
+ rc = fg_get_dma_address(chip, sram_addr, offset, &addr);
+ if (rc < 0) {
+ pr_err("Incorrect address %d/offset %d\n", sram_addr,
+ offset);
+ break;
+ }
+
+ rc = fg_get_partition_avail_bytes(chip, sram_addr + offset, len,
+ &num_bytes);
+ if (rc < 0)
+ break;
+
+ if (num_bytes > len)
+ num_bytes = len;
+
+ pr_debug("reading from address: [%d %d] dma_address = %x\n",
+ sram_addr, offset, addr);
+
+ if (access == FG_READ) {
+ rc = fg_read(chip, addr, ptr, num_bytes);
+ temp_str = "read";
+ } else {
+ rc = fg_write(chip, addr, ptr, num_bytes);
+ temp_str = "write";
+ }
+
+ if (rc < 0) {
+ pr_err("Error in %sing address %d rc=%d\n", temp_str,
+ sram_addr, rc);
+ break;
+ }
+
+ ptr += num_bytes;
+ len -= num_bytes;
+ sram_addr += (num_bytes / BYTES_PER_SRAM_WORD);
+ offset = 0;
+ }
+
+ ret = fg_direct_mem_request(chip, false);
+ if (ret < 0) {
+ pr_err("Error in releasing direct_mem access rc=%d\n", rc);
+ return ret;
+ }
+
+ return rc;
+}
+
+int fg_direct_mem_read(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len)
+{
+ return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_READ);
+}
+
+int fg_direct_mem_write(struct fg_chip *chip, u16 sram_addr, u8 offset,
+ u8 *val, int len, bool atomic_access)
+{
+ return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_WRITE);
+}
+
int fg_ima_init(struct fg_chip *chip)
{
int rc;
@@ -778,3 +1029,59 @@
return 0;
}
+
+/*
+ * This SRAM partition to DMA address partition mapping remains identical for
+ * PMICs that use GEN3 FG.
+ */
+static struct fg_dma_address fg_gen3_addr_map[NUM_PARTITIONS] = {
+ /* system partition */
+ {
+ .partition_start = 0,
+ .partition_end = 23,
+ .spmi_addr_base = FG_DMA0_BASE + SRAM_ADDR_OFFSET,
+ },
+ /* battery profile partition */
+ {
+ .partition_start = 24,
+ .partition_end = 79,
+ .spmi_addr_base = FG_DMA1_BASE + SRAM_ADDR_OFFSET,
+ },
+ /* scratch pad partition */
+ {
+ .partition_start = 80,
+ .partition_end = 125,
+ .spmi_addr_base = FG_DMA2_BASE + SRAM_ADDR_OFFSET,
+ },
+};
+int fg_dma_init(struct fg_chip *chip)
+{
+ int rc;
+
+ chip->addr_map = fg_gen3_addr_map;
+
+ /* Clear DMA errors if any before clearing IMA errors */
+ rc = fg_clear_dma_errors_if_any(chip);
+ if (rc < 0) {
+ pr_err("Error in checking DMA errors rc:%d\n", rc);
+ return rc;
+ }
+
+ /* Configure the DMA peripheral addressing to partition */
+ rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), ADDR_KIND_BIT,
+ ADDR_KIND_BIT);
+ if (rc < 0) {
+ pr_err("failed to configure DMA_CTL rc:%d\n", rc);
+ return rc;
+ }
+
+ /* Release the DMA initially so that request can happen */
+ rc = fg_direct_mem_request(chip, false);
+ if (rc < 0) {
+ pr_err("Error in releasing direct_mem access rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index cd0b2fb..8ca4376 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -31,6 +31,7 @@
#define BATT_SOC_LOW_PWR_STS(chip) (chip->batt_soc_base + 0x56)
/* BATT_SOC_INT_RT_STS */
+#define SOC_READY_BIT BIT(1)
#define MSOC_EMPTY_BIT BIT(5)
/* BATT_SOC_EN_CTL */
@@ -266,6 +267,7 @@
/* FG_MEM_IF register and bit definitions */
#define MEM_IF_INT_RT_STS(chip) ((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_ARB_CFG(chip) ((chip->mem_if_base) + 0x40)
#define MEM_IF_MEM_INTF_CFG(chip) ((chip->mem_if_base) + 0x50)
#define MEM_IF_IMA_CTL(chip) ((chip->mem_if_base) + 0x51)
#define MEM_IF_IMA_CFG(chip) ((chip->mem_if_base) + 0x52)
@@ -286,6 +288,11 @@
/* MEM_IF_INT_RT_STS */
#define MEM_XCP_BIT BIT(1)
+#define MEM_GNT_BIT BIT(2)
+
+/* MEM_IF_MEM_ARB_CFG */
+#define MEM_ARB_LO_LATENCY_EN_BIT BIT(1)
+#define MEM_ARB_REQ_BIT BIT(0)
/* MEM_IF_MEM_INTF_CFG */
#define MEM_ACCESS_REQ_BIT BIT(7)
@@ -325,5 +332,13 @@
#define DMA_READ_ERROR_BIT BIT(2)
/* MEM_IF_DMA_CTL */
+#define ADDR_KIND_BIT BIT(1)
#define DMA_CLEAR_LOG_BIT BIT(0)
+
+/* FG_DMAx */
+#define FG_DMA0_BASE 0x4800
+#define FG_DMA1_BASE 0x4900
+#define FG_DMA2_BASE 0x4A00
+#define FG_DMA3_BASE 0x4B00
+#define SRAM_ADDR_OFFSET 0x20
#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 839a771..d9ca47c 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -106,14 +106,17 @@
static bool is_usb_present(struct fg_chip *chip)
{
union power_supply_propval pval = {0, };
+ int rc;
if (!chip->usb_psy)
chip->usb_psy = power_supply_get_by_name("usb");
- if (chip->usb_psy)
- power_supply_get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_PRESENT, &pval);
- else
+ if (!chip->usb_psy)
+ return false;
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0)
return false;
return pval.intval != 0;
@@ -122,14 +125,17 @@
static bool is_dc_present(struct fg_chip *chip)
{
union power_supply_propval pval = {0, };
+ int rc;
if (!chip->dc_psy)
chip->dc_psy = power_supply_get_by_name("dc");
- if (chip->dc_psy)
- power_supply_get_property(chip->dc_psy,
- POWER_SUPPLY_PROP_PRESENT, &pval);
- else
+ if (!chip->dc_psy)
+ return false;
+
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0)
return false;
return pval.intval != 0;
@@ -140,6 +146,25 @@
return is_usb_present(chip) || is_dc_present(chip);
}
+bool is_qnovo_en(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &pval);
+ if (rc < 0)
+ return false;
+
+ return pval.intval != 0;
+}
+
#define EXPONENT_SHIFT 11
#define EXPONENT_OFFSET -9
#define MANTISSA_SIGN_BIT 10
@@ -230,8 +255,6 @@
reinit_completion(&chip->soc_update);
enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
atomic_access = true;
- } else {
- flags = FG_IMA_DEFAULT;
}
wait:
/*
@@ -257,11 +280,17 @@
}
}
- rc = fg_interleaved_mem_write(chip, address, offset, val, len,
- atomic_access);
+ if (chip->use_dma)
+ rc = fg_direct_mem_write(chip, address, offset, val, len,
+ false);
+ else
+ rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+ atomic_access);
+
if (rc < 0)
pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
address, offset, rc);
+
out:
if (atomic_access)
disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
@@ -288,9 +317,14 @@
if (!(flags & FG_IMA_NO_WLOCK))
vote(chip->awake_votable, SRAM_READ, true, 0);
+
mutex_lock(&chip->sram_rw_lock);
- rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+ if (chip->use_dma)
+ rc = fg_direct_mem_read(chip, address, offset, val, len);
+ else
+ rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+
if (rc < 0)
pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
address, offset, rc);
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index 10a1c54..e0a5150 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -438,12 +438,14 @@
int rerun_election(struct votable *votable)
{
int rc = 0;
+ int effective_result;
lock_votable(votable);
+ effective_result = get_effective_result_locked(votable);
if (votable->callback)
rc = votable->callback(votable,
- votable->data,
- votable->effective_result,
+ votable->data,
+ effective_result,
get_client_str(votable, votable->effective_client_id));
unlock_votable(votable);
return rc;
@@ -519,11 +521,10 @@
lock_votable(votable);
- seq_printf(m, "Votable %s:\n", votable->name);
- seq_puts(m, "clients:\n");
for (i = 0; i < votable->num_clients; i++) {
if (votable->client_strs[i]) {
- seq_printf(m, "%-15s:\t\ten=%d\t\tv=%d\n",
+ seq_printf(m, "%s: %s:\t\t\ten=%d v=%d\n",
+ votable->name,
votable->client_strs[i],
votable->votes[i].enabled,
votable->votes[i].value);
@@ -542,11 +543,11 @@
break;
}
- seq_printf(m, "type: %s\n", type_str);
- seq_puts(m, "Effective:\n");
effective_client_str = get_effective_client_locked(votable);
- seq_printf(m, "%-15s:\t\tv=%d\n",
+ seq_printf(m, "%s: effective=%s type=%s v=%d\n",
+ votable->name,
effective_client_str ? effective_client_str : "none",
+ type_str,
get_effective_result_locked(votable));
unlock_votable(votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 75e79bb..7c94744 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -75,6 +75,8 @@
#define ESR_TIMER_CHG_MAX_OFFSET 0
#define ESR_TIMER_CHG_INIT_WORD 18
#define ESR_TIMER_CHG_INIT_OFFSET 2
+#define ESR_EXTRACTION_ENABLE_WORD 19
+#define ESR_EXTRACTION_ENABLE_OFFSET 0
#define PROFILE_LOAD_WORD 24
#define PROFILE_LOAD_OFFSET 0
#define ESR_RSLOW_DISCHG_WORD 34
@@ -533,7 +535,7 @@
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
- pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+ pr_err("Error reading address %d[%d] rc=%d\n",
chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
return rc;
}
@@ -892,8 +894,8 @@
goto out;
}
- /* Wait for 200ms before enabling BMD again */
- msleep(200);
+ /* Wait for BATT_ID to settle down before enabling BMD again */
+ msleep(chip->dt.bmd_en_delay_ms);
fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
chip->batt_id_ohms = batt_id;
@@ -904,6 +906,7 @@
return ret;
}
+ vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, true, 0);
return rc;
}
@@ -1103,6 +1106,25 @@
fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
}
+static int fg_batt_miss_irq_en_cb(struct votable *votable, void *data,
+ int enable, const char *client)
+{
+ struct fg_chip *chip = data;
+
+ if (!chip->irqs[BATT_MISSING_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chip->irqs[BATT_MISSING_IRQ].irq);
+ enable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
+ } else {
+ disable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
+ disable_irq(chip->irqs[BATT_MISSING_IRQ].irq);
+ }
+
+ return 0;
+}
+
static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
int enable, const char *client)
{
@@ -1402,6 +1424,7 @@
static void fg_cap_learning_update(struct fg_chip *chip)
{
int rc, batt_soc, batt_soc_msb;
+ bool input_present = is_input_present(chip);
mutex_lock(&chip->cl.lock);
@@ -1442,11 +1465,29 @@
chip->cl.init_cc_uah = 0;
}
+ if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (!input_present) {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc_msb);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+ }
+
if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
- fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
- batt_soc_msb);
- chip->cl.active = false;
- chip->cl.init_cc_uah = 0;
+ if (is_qnovo_en(chip) && input_present) {
+ /*
+ * Don't abort the capacity learning when qnovo
+ * is enabled and input is present where the
+ * charging status can go to "not charging"
+ * intermittently.
+ */
+ } else {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc_msb);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
}
}
@@ -1572,7 +1613,7 @@
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
- int rc, msoc, bsoc, recharge_soc;
+ int rc, msoc, bsoc, recharge_soc, msoc_raw;
u8 full_soc[2] = {0xFF, 0xFF};
if (!chip->dt.hold_soc_while_full)
@@ -1608,6 +1649,7 @@
pr_err("Error in getting msoc, rc=%d\n", rc);
goto out;
}
+ msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
msoc, bsoc, chip->health, chip->charge_status,
@@ -1631,7 +1673,7 @@
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+ } else if (msoc_raw < recharge_soc && chip->charge_full) {
chip->delta_soc = FULL_CAPACITY - msoc;
/*
@@ -1661,8 +1703,8 @@
rc);
goto out;
}
- fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
- bsoc >> 8, recharge_soc, chip->delta_soc);
+ fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+ msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
} else {
goto out;
}
@@ -1981,7 +2023,7 @@
{
union power_supply_propval prop = {0, };
int rc;
- bool parallel_en = false, qnovo_en = false;
+ bool parallel_en = false, qnovo_en;
if (is_parallel_charger_available(chip)) {
rc = power_supply_get_property(chip->parallel_psy,
@@ -1994,10 +2036,7 @@
parallel_en = prop.intval;
}
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &prop);
- if (!rc)
- qnovo_en = prop.intval;
+ qnovo_en = is_qnovo_en(chip);
fg_dbg(chip, FG_POWER_SUPPLY, "chg_sts: %d par_en: %d qnov_en: %d esr_fcc_ctrl_en: %d\n",
chip->charge_status, parallel_en, qnovo_en,
@@ -2498,6 +2537,23 @@
int rc;
vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+
+ rc = fg_get_batt_id(chip);
+ if (rc < 0) {
+ pr_err("Error in getting battery id, rc:%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_get_batt_profile(chip);
+ if (rc < 0) {
+ pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+ chip->batt_id_ohms / 1000, rc);
+ goto out;
+ }
+
+ if (!chip->profile_available)
+ goto out;
+
if (!is_profile_load_required(chip))
goto done;
@@ -2562,9 +2618,9 @@
batt_psy_initialized(chip);
fg_notify_charger(chip);
chip->profile_loaded = true;
- chip->soc_reporting_ready = true;
fg_dbg(chip, FG_STATUS, "profile loaded successfully");
out:
+ chip->soc_reporting_ready = true;
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
}
@@ -2979,6 +3035,89 @@
return 0;
}
+static int fg_force_esr_meas(struct fg_chip *chip)
+{
+ int rc;
+ int esr_uohms;
+
+ /* force esr extraction enable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to enable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in configuring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait 1.5 seconds for hw to measure ESR */
+ msleep(1500);
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ 0);
+ if (rc < 0) {
+ pr_err("Error in restoring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
+ if (rc < 0) {
+ pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* force esr extraction disable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to disable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_get_battery_resistance(chip, &esr_uohms);
+ fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
+
+ return rc;
+}
+
+static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
+{
+ int rc;
+
+ /* force esr extraction disable when qnovo enables */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET,
+ BIT(0), qnovo_enable ? 0 : BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("Error in configuring esr extraction rc=%d\n", rc);
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT,
+ qnovo_enable ? LD_REG_CTRL_BIT : 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
+ return 0;
+}
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -3087,6 +3226,12 @@
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ rc = fg_force_esr_meas(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = fg_prepare_for_qnovo(chip, pval->intval);
+ break;
default:
break;
}
@@ -3450,6 +3595,9 @@
static int fg_memif_init(struct fg_chip *chip)
{
+ if (chip->use_dma)
+ return fg_dma_init(chip);
+
return fg_ima_init(chip);
}
@@ -3489,6 +3637,26 @@
/* INTERRUPT HANDLERS STAY HERE */
+static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
+{
+ struct fg_chip *chip = data;
+ u8 status;
+ int rc;
+
+ rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+ if (rc < 0) {
+ pr_err("failed to read addr=0x%04x, rc=%d\n",
+ MEM_IF_INT_RT_STS(chip), rc);
+ return IRQ_HANDLED;
+ }
+
+ fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+ if (status & MEM_GNT_BIT)
+ complete_all(&chip->mem_grant);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
@@ -3550,20 +3718,6 @@
return IRQ_HANDLED;
}
- rc = fg_get_batt_id(chip);
- if (rc < 0) {
- chip->soc_reporting_ready = true;
- pr_err("Error in getting battery id, rc:%d\n", rc);
- return IRQ_HANDLED;
- }
-
- rc = fg_get_batt_profile(chip);
- if (rc < 0) {
- chip->soc_reporting_ready = true;
- pr_err("Error in getting battery profile, rc:%d\n", rc);
- return IRQ_HANDLED;
- }
-
clear_battery_profile(chip);
schedule_delayed_work(&chip->profile_load_work, 0);
@@ -3783,7 +3937,8 @@
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
.name = "dma-grant",
- .handler = fg_dummy_irq_handler,
+ .handler = fg_dma_grant_irq_handler,
+ .wakeable = true,
},
[MEM_XCP_IRQ] = {
.name = "mem-xcp",
@@ -3972,6 +4127,7 @@
#define DEFAULT_ESR_CLAMP_MOHMS 20
#define DEFAULT_ESR_PULSE_THRESH_MA 110
#define DEFAULT_ESR_MEAS_CURR_MA 120
+#define DEFAULT_BMD_EN_DELAY_MS 200
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
@@ -4007,6 +4163,7 @@
switch (chip->pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
+ chip->use_dma = true;
if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
chip->sp = pmi8998_v1_sram_params;
chip->alg_flags = pmi8998_v1_alg_flags;
@@ -4317,6 +4474,13 @@
chip->dt.esr_meas_curr_ma = temp;
}
+ chip->dt.bmd_en_delay_ms = DEFAULT_BMD_EN_DELAY_MS;
+ rc = of_property_read_u32(node, "qcom,fg-bmd-en-delay-ms", &temp);
+ if (!rc) {
+ if (temp > DEFAULT_BMD_EN_DELAY_MS)
+ chip->dt.bmd_en_delay_ms = temp;
+ }
+
return 0;
}
@@ -4330,6 +4494,9 @@
if (chip->delta_bsoc_irq_en_votable)
destroy_votable(chip->delta_bsoc_irq_en_votable);
+ if (chip->batt_miss_irq_en_votable)
+ destroy_votable(chip->batt_miss_irq_en_votable);
+
if (chip->batt_id_chan)
iio_channel_release(chip->batt_id_chan);
@@ -4387,6 +4554,7 @@
chip);
if (IS_ERR(chip->awake_votable)) {
rc = PTR_ERR(chip->awake_votable);
+ chip->awake_votable = NULL;
goto exit;
}
@@ -4395,6 +4563,16 @@
fg_delta_bsoc_irq_en_cb, chip);
if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+ chip->delta_bsoc_irq_en_votable = NULL;
+ goto exit;
+ }
+
+ chip->batt_miss_irq_en_votable = create_votable("FG_BATT_MISS_IRQ",
+ VOTE_SET_ANY,
+ fg_batt_miss_irq_en_cb, chip);
+ if (IS_ERR(chip->batt_miss_irq_en_votable)) {
+ rc = PTR_ERR(chip->batt_miss_irq_en_votable);
+ chip->batt_miss_irq_en_votable = NULL;
goto exit;
}
@@ -4413,25 +4591,13 @@
mutex_init(&chip->charge_full_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
+ init_completion(&chip->mem_grant);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
INIT_WORK(&chip->cycle_count_work, cycle_count_work);
INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
- rc = fg_get_batt_id(chip);
- if (rc < 0) {
- pr_err("Error in getting battery id, rc:%d\n", rc);
- goto exit;
- }
-
- rc = fg_get_batt_profile(chip);
- if (rc < 0) {
- chip->soc_reporting_ready = true;
- pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
- chip->batt_id_ohms / 1000, rc);
- }
-
rc = fg_memif_init(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in initializing FG_MEMIF, rc:%d\n",
@@ -4439,6 +4605,25 @@
goto exit;
}
+ platform_set_drvdata(pdev, chip);
+
+ rc = fg_register_interrupts(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+ rc);
+ goto exit;
+ }
+
+ /* Keep SOC_UPDATE irq disabled until we require it */
+ if (fg_irqs[SOC_UPDATE_IRQ].irq)
+ disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+ /* Keep BSOC_DELTA_IRQ disabled until we require it */
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+ /* Keep BATT_MISSING_IRQ disabled until we require it */
+ vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
rc = fg_hw_init(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
@@ -4446,8 +4631,6 @@
goto exit;
}
- platform_set_drvdata(pdev, chip);
-
/* Register the power supply */
fg_psy_cfg.drv_data = chip;
fg_psy_cfg.of_node = NULL;
@@ -4468,20 +4651,6 @@
goto exit;
}
- rc = fg_register_interrupts(chip);
- if (rc < 0) {
- dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
- rc);
- goto exit;
- }
-
- /* Keep SOC_UPDATE irq disabled until we require it */
- if (fg_irqs[SOC_UPDATE_IRQ].irq)
- disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
-
- /* Keep BSOC_DELTA_IRQ irq disabled until we require it */
- rerun_election(chip->delta_bsoc_irq_en_votable);
-
rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
@@ -4505,8 +4674,7 @@
}
device_init_wakeup(chip->dev, true);
- if (chip->profile_available)
- schedule_delayed_work(&chip->profile_load_work, 0);
+ schedule_delayed_work(&chip->profile_load_work, 0);
pr_debug("FG GEN3 driver probed successfully\n");
return 0;
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index eb97eb0..cf90f90 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/pmic-voter.h>
+#include <linux/delay.h>
#define QNOVO_REVISION1 0x00
#define QNOVO_REVISION2 0x01
@@ -114,6 +115,17 @@
#define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter"
#define QNOVO_VOTER "qnovo_voter"
+#define FG_AVAILABLE_VOTER "FG_AVAILABLE_VOTER"
+#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER"
+#define QNI_PT_VOTER "QNI_PT_VOTER"
+#define ESR_VOTER "ESR_VOTER"
+
+#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER"
+#define CHG_READY_VOTER "CHG_READY_VOTER"
+#define USB_READY_VOTER "USB_READY_VOTER"
+#define DC_READY_VOTER "DC_READY_VOTER"
+
+#define PT_RESTART_VOTER "PT_RESTART_VOTER"
struct qnovo_dt_props {
bool external_rsense;
@@ -127,6 +139,10 @@
struct qnovo_dt_props dt;
struct device *dev;
struct votable *disable_votable;
+ struct votable *pt_dis_votable;
+ struct votable *not_ok_to_qnovo_votable;
+ struct votable *chg_ready_votable;
+ struct votable *awake_votable;
struct class qnovo_class;
struct pmic_revid_data *pmic_rev_id;
u32 wa_flags;
@@ -138,10 +154,18 @@
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- bool ok_to_qnovo;
+ int usb_present;
+ int dc_present;
+ struct delayed_work usb_debounce_work;
+ struct delayed_work dc_debounce_work;
+
+ struct delayed_work ptrain_restart_work;
};
static int debug_mask;
@@ -229,6 +253,39 @@
return true;
}
+static bool is_fg_available(struct qnovo *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_usb_available(struct qnovo *chip)
+{
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_dc_available(struct qnovo *chip)
+{
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
{
union power_supply_propval pval = {0};
@@ -281,10 +338,86 @@
return -EINVAL;
}
+ /*
+ * fg must be available for enable FG_AVAILABLE_VOTER
+ * won't enable it otherwise
+ */
+
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
+
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0);
rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
+static int pt_dis_votable_cb(struct votable *votable, void *data, int disable,
+ const char *client)
+{
+ struct qnovo *chip = data;
+ int rc;
+
+ if (disable) {
+ cancel_delayed_work_sync(&chip->ptrain_restart_work);
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+ }
+
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ if (!disable) {
+ vote(chip->awake_votable, PT_RESTART_VOTER, true, 0);
+ schedule_delayed_work(&chip->ptrain_restart_work,
+ msecs_to_jiffies(20));
+ }
+
+ return 0;
+}
+
+static int not_ok_to_qnovo_cb(struct votable *votable, void *data,
+ int not_ok_to_qnovo,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0);
+ if (not_ok_to_qnovo)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static int chg_ready_cb(struct votable *votable, void *data, int ready,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0);
+
+ return 0;
+}
+
+static int awake_cb(struct votable *votable, void *data, int awake,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ if (awake)
+ pm_stay_awake(chip->dev);
+ else
+ pm_relax(chip->dev);
+
+ return 0;
+}
+
static int qnovo_parse_dt(struct qnovo *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -626,8 +759,9 @@
char *buf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ int val = get_effective_result(chip->not_ok_to_qnovo_votable);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
@@ -656,21 +790,10 @@
static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
- int i = attr - qnovo_attributes;
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- u8 buf[2] = {0, 0};
- u16 regval;
- int rc;
+ int val = get_effective_result(chip->pt_dis_votable);
- rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
- if (rc < 0) {
- pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
- return -EINVAL;
- }
- regval = buf[1] << 8 | buf[0];
-
- return snprintf(ubuf, PAGE_SIZE, "%d\n",
- (int)(regval & QNOVO_PTRAIN_EN_BIT));
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
@@ -678,21 +801,12 @@
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- int rc = 0;
-
- if (get_effective_result(chip->disable_votable))
- return -EINVAL;
if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
- if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- (bool)val ? "enable" : "disable", rc);
- return rc;
- }
+ /* val being 0, userspace wishes to disable pt so vote true */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0);
return count;
}
@@ -1116,41 +1230,146 @@
{
u8 val = 0;
int rc;
- bool ok_to_qnovo;
- bool changed = false;
+ bool hw_ok_to_qnovo;
rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- ok_to_qnovo = false;
+ hw_ok_to_qnovo = false;
} else {
/*
* For CV mode keep qnovo enabled, userspace is expected to
* disable it after few runs
*/
- ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
+ hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ?
+ true : false;
}
- if (chip->ok_to_qnovo ^ ok_to_qnovo) {
-
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
- if (!ok_to_qnovo)
- vote(chip->disable_votable, USER_VOTER, true, 0);
-
- chip->ok_to_qnovo = ok_to_qnovo;
- changed = true;
- }
-
- return changed;
+ vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+ !hw_ok_to_qnovo, 0);
+ return 0;
}
+static void usb_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, usb_debounce_work.work);
+
+ vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+}
+
+static void dc_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, dc_debounce_work.work);
+
+ vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+}
+
+#define DEBOUNCE_MS 15000 /* 15 seconds */
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
+ union power_supply_propval pval;
+ bool usb_present = false, dc_present = false;
+ int rc;
- if (qnovo_update_status(chip))
- kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ if (is_fg_available(chip))
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0);
+
+ if (is_usb_available(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+ }
+
+ if (chip->usb_present && !usb_present) {
+ /* removal */
+ chip->usb_present = 0;
+ cancel_delayed_work_sync(&chip->usb_debounce_work);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+ } else if (!chip->usb_present && usb_present) {
+ /* insertion */
+ chip->usb_present = 1;
+ vote(chip->awake_votable, USB_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->usb_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ if (is_dc_available(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+ dc_present = (rc < 0) ? 0 : pval.intval;
+ }
+
+ if (usb_present)
+ dc_present = 0;
+
+ if (chip->dc_present && !dc_present) {
+ /* removal */
+ chip->dc_present = 0;
+ cancel_delayed_work_sync(&chip->dc_debounce_work);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+ } else if (!chip->dc_present && dc_present) {
+ /* insertion */
+ chip->dc_present = 1;
+ vote(chip->awake_votable, DC_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->dc_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ qnovo_update_status(chip);
+}
+
+static void ptrain_restart_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, ptrain_restart_work.work);
+ u8 pt_t1, pt_t2;
+ int rc;
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ /* pttime increments every 2 seconds */
+ msleep(2100);
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ if (pt_t1 != pt_t2)
+ goto clean_up;
+
+ /* Toggle pt enable to restart pulse train */
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+ msleep(1000);
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+
+clean_up:
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
}
static int qnovo_notifier_call(struct notifier_block *nb,
@@ -1162,7 +1381,10 @@
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if (strcmp(psy->desc->name, "battery") == 0)
+ if (strcmp(psy->desc->name, "battery") == 0
+ || strcmp(psy->desc->name, "bms") == 0
+ || strcmp(psy->desc->name, "usb") == 0
+ || strcmp(psy->desc->name, "dc") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1171,7 +1393,23 @@
static irqreturn_t handle_ptrain_done(int irq, void *data)
{
struct qnovo *chip = data;
+ union power_supply_propval pval = {0};
+ /*
+ * hw resets pt_en bit once ptrain_done triggers.
+ * vote on behalf of QNI to disable it such that
+ * once QNI enables it, the votable state changes
+ * and the callback that sets it is indeed invoked
+ */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, true, 0);
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ &pval);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
@@ -1186,6 +1424,11 @@
u8 val;
vote(chip->disable_votable, USER_VOTER, true, 0);
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0);
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
val = 0;
rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
@@ -1349,12 +1592,45 @@
goto cleanup;
}
+ chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY,
+ pt_dis_votable_cb, chip);
+ if (IS_ERR(chip->pt_dis_votable)) {
+ rc = PTR_ERR(chip->pt_dis_votable);
+ goto destroy_disable_votable;
+ }
+
+ chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK",
+ VOTE_SET_ANY,
+ not_ok_to_qnovo_cb, chip);
+ if (IS_ERR(chip->not_ok_to_qnovo_votable)) {
+ rc = PTR_ERR(chip->not_ok_to_qnovo_votable);
+ goto destroy_pt_dis_votable;
+ }
+
+ chip->chg_ready_votable = create_votable("QNOVO_CHG_READY",
+ VOTE_SET_ANY,
+ chg_ready_cb, chip);
+ if (IS_ERR(chip->chg_ready_votable)) {
+ rc = PTR_ERR(chip->chg_ready_votable);
+ goto destroy_not_ok_to_qnovo_votable;
+ }
+
+ chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY,
+ awake_cb, chip);
+ if (IS_ERR(chip->awake_votable)) {
+ rc = PTR_ERR(chip->awake_votable);
+ goto destroy_chg_ready_votable;
+ }
+
INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work);
+ INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work);
+ INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work);
rc = qnovo_hw_init(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
- goto destroy_votable;
+ goto destroy_awake_votable;
}
rc = qnovo_register_notifier(chip);
@@ -1390,7 +1666,15 @@
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
-destroy_votable:
+destroy_awake_votable:
+ destroy_votable(chip->awake_votable);
+destroy_chg_ready_votable:
+ destroy_votable(chip->chg_ready_votable);
+destroy_not_ok_to_qnovo_votable:
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+destroy_pt_dis_votable:
+ destroy_votable(chip->pt_dis_votable);
+destroy_disable_votable:
destroy_votable(chip->disable_votable);
cleanup:
platform_set_drvdata(pdev, NULL);
@@ -1403,6 +1687,9 @@
class_unregister(&chip->qnovo_class);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->chg_ready_votable);
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+ destroy_votable(chip->pt_dis_votable);
destroy_votable(chip->disable_votable);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index becce31..b3de8d0 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -19,6 +19,7 @@
#include <linux/power_supply.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/log2.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -122,87 +123,6 @@
.max_u = 1575000,
.step_u = 25000,
},
- .step_soc_threshold[0] = {
- .name = "step charge soc threshold 1",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH1_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[1] = {
- .name = "step charge soc threshold 2",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH2_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[2] = {
- .name = "step charge soc threshold 3",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH3_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc_threshold[3] = {
- .name = "step charge soc threshold 4",
- .reg = STEP_CHG_SOC_OR_BATT_V_TH4_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- },
- .step_soc = {
- .name = "step charge soc",
- .reg = STEP_CHG_SOC_VBATT_V_REG,
- .min_u = 0,
- .max_u = 100,
- .step_u = 1,
- .set_proc = smblib_mapping_soc_from_field_value,
- },
- .step_cc_delta[0] = {
- .name = "step charge current delta 1",
- .reg = STEP_CHG_CURRENT_DELTA1_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[1] = {
- .name = "step charge current delta 2",
- .reg = STEP_CHG_CURRENT_DELTA2_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[2] = {
- .name = "step charge current delta 3",
- .reg = STEP_CHG_CURRENT_DELTA3_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[3] = {
- .name = "step charge current delta 4",
- .reg = STEP_CHG_CURRENT_DELTA4_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
- .step_cc_delta[4] = {
- .name = "step charge current delta 5",
- .reg = STEP_CHG_CURRENT_DELTA5_REG,
- .min_u = 100000,
- .max_u = 3200000,
- .step_u = 100000,
- .get_proc = smblib_mapping_cc_delta_to_field_value,
- .set_proc = smblib_mapping_cc_delta_from_field_value,
- },
.freq_buck = {
.name = "buck switching frequency",
.reg = CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
@@ -236,7 +156,6 @@
},
};
-#define STEP_CHARGING_MAX_STEPS 5
struct smb_dt_props {
int usb_icl_ua;
int dc_icl_ua;
@@ -244,14 +163,13 @@
int wipower_max_uw;
int min_freq_khz;
int max_freq_khz;
- u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
- s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
int float_option;
int chg_inhibit_thr_mv;
bool no_battery;
bool hvdcp_disable;
bool auto_recharge_soc;
+ int wd_bark_time;
};
struct smb2 {
@@ -266,9 +184,18 @@
debug_mask, __debug_mask, int, 0600
);
+static int __weak_chg_icl_ua = 500000;
+module_param_named(
+ weak_chg_icl_ua, __weak_chg_icl_ua, int, 0600);
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
+#define MIN_WD_BARK_TIME 16
+#define DEFAULT_WD_BARK_TIME 64
+#define BITE_WDOG_TIMEOUT_8S 0x3
+#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT 2
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -280,27 +207,16 @@
return -EINVAL;
}
- chg->step_chg_enabled = true;
+ chg->step_chg_enabled = of_property_read_bool(node,
+ "qcom,step-charging-enable");
- if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
- != STEP_CHARGING_MAX_STEPS - 1)
- chg->step_chg_enabled = false;
+ chg->sw_jeita_enabled = of_property_read_bool(node,
+ "qcom,sw-jeita-enable");
- rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
- chip->dt.step_soc_threshold,
- STEP_CHARGING_MAX_STEPS - 1);
- if (rc < 0)
- chg->step_chg_enabled = false;
-
- if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
- != STEP_CHARGING_MAX_STEPS)
- chg->step_chg_enabled = false;
-
- rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
- chip->dt.step_cc_delta,
- STEP_CHARGING_MAX_STEPS);
- if (rc < 0)
- chg->step_chg_enabled = false;
+ rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+ &chip->dt.wd_bark_time);
+ if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+ chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
chip->dt.no_battery = of_property_read_bool(node,
"qcom,batteryless-platform");
@@ -461,6 +377,8 @@
val->intval = 0;
else
val->intval = 1;
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN)
+ val->intval = 0;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
val->intval = chg->voltage_min_uv;
@@ -982,7 +900,6 @@
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
- POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
POWER_SUPPLY_PROP_CHARGE_DONE,
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
@@ -1045,9 +962,6 @@
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
val->intval = chg->step_chg_enabled;
break;
- case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
- rc = smblib_get_prop_step_chg_step(chg, val);
- break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
@@ -1161,6 +1075,9 @@
vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
}
break;
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+ chg->step_chg_enabled = !!val->intval;
+ break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
chg->batt_profile_fcc_ua = val->intval;
vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1201,6 +1118,7 @@
case POWER_SUPPLY_PROP_DP_DM:
case POWER_SUPPLY_PROP_RERUN_AICL:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
return 1;
default:
break;
@@ -1328,73 +1246,6 @@
/***************************
* HARDWARE INITIALIZATION *
***************************/
-static int smb2_config_step_charging(struct smb2 *chip)
-{
- struct smb_charger *chg = &chip->chg;
- int rc = 0;
- int i;
-
- if (!chg->step_chg_enabled)
- return rc;
-
- for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
- rc = smblib_set_charge_param(chg,
- &chg->param.step_soc_threshold[i],
- chip->dt.step_soc_threshold[i]);
- if (rc < 0) {
- pr_err("Couldn't configure soc thresholds rc = %d\n",
- rc);
- goto err_out;
- }
- }
-
- for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
- rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
- chip->dt.step_cc_delta[i]);
- if (rc < 0) {
- pr_err("Couldn't configure cc delta rc = %d\n",
- rc);
- goto err_out;
- }
- }
-
- rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
- STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure soc request timeout reg rc=%d\n",
- rc);
- goto err_out;
- }
-
- rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
- STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure soc fail timeout reg rc=%d\n",
- rc);
- goto err_out;
- }
-
- /*
- * enable step charging, source soc, standard mode, go to final
- * state in case of failure.
- */
- rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
- STEP_CHARGING_ENABLE_BIT |
- STEP_CHARGING_SOURCE_SELECT_BIT |
- STEP_CHARGING_SOC_FAIL_OPTION_BIT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
- goto err_out;
- }
-
- return 0;
-err_out:
- chg->step_chg_enabled = false;
- return rc;
-}
-
static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
{
int rc;
@@ -1466,15 +1317,6 @@
return rc;
}
- /* configure power role for dual-role */
- rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- TYPEC_POWER_ROLE_CMD_MASK, 0);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure power role for DRP rc=%d\n", rc);
- return rc;
- }
-
/*
* disable Type-C factory mode and stay in Attached.SRC state when VCONN
* over-current happens
@@ -1579,7 +1421,7 @@
{
struct smb_charger *chg = &chip->chg;
int rc;
- u8 stat;
+ u8 stat, val;
if (chip->dt.no_battery)
chg->fake_capacity = 50;
@@ -1727,11 +1569,27 @@
return rc;
}
- /* configure step charging */
- rc = smb2_config_step_charging(chip);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
- rc);
+ val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT) &
+ BARK_WDOG_TIMEOUT_MASK;
+ val |= BITE_WDOG_TIMEOUT_8S;
+ rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+ BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+ BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+ val);
+ if (rc) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable WD BARK and enable it on plugin */
+ rc = smblib_masked_write(chg, WD_CFG_REG,
+ WATCHDOG_TRIGGER_AFP_EN_BIT |
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT,
+ WDOG_TIMER_EN_ON_PLUGIN_BIT |
+ BARK_WDOG_INT_EN_BIT);
+ if (rc) {
+ pr_err("Couldn't configue WD config rc=%d\n", rc);
return rc;
}
@@ -1790,6 +1648,13 @@
return rc;
}
+ rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
switch (chip->dt.chg_inhibit_thr_mv) {
case 50:
rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
@@ -1852,6 +1717,22 @@
static int smb2_post_init(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
+ int rc;
+
+ /* In case the usb path is suspended, we would have missed disabling
+ * the icl change interrupt because the interrupt could have been
+ * not requested
+ */
+ rerun_election(chg->usb_icl_votable);
+
+ /* configure power role for dual-role */
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure power role for DRP rc=%d\n", rc);
+ return rc;
+ }
rerun_election(chg->usb_irq_enable_votable);
@@ -1935,8 +1816,8 @@
smblib_handle_usb_source_change(0, &irq_data);
smblib_handle_chg_state_change(0, &irq_data);
smblib_handle_icl_change(0, &irq_data);
- smblib_handle_step_chg_state_change(0, &irq_data);
- smblib_handle_step_chg_soc_update_request(0, &irq_data);
+ smblib_handle_batt_temp_changed(0, &irq_data);
+ smblib_handle_wdog_bark(0, &irq_data);
return 0;
}
@@ -1958,18 +1839,15 @@
},
[STEP_CHG_STATE_CHANGE_IRQ] = {
.name = "step-chg-state-change",
- .handler = smblib_handle_step_chg_state_change,
- .wake = true,
+ .handler = NULL,
},
[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
.name = "step-chg-soc-update-fail",
- .handler = smblib_handle_step_chg_soc_update_fail,
- .wake = true,
+ .handler = NULL,
},
[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
.name = "step-chg-soc-update-request",
- .handler = smblib_handle_step_chg_soc_update_request,
- .wake = true,
+ .handler = NULL,
},
/* OTG IRQs */
[OTG_FAIL_IRQ] = {
@@ -1992,6 +1870,7 @@
[BATT_TEMP_IRQ] = {
.name = "bat-temp",
.handler = smblib_handle_batt_temp_changed,
+ .wake = true,
},
[BATT_OCP_IRQ] = {
.name = "bat-ocp",
@@ -2087,7 +1966,8 @@
},
[WDOG_BARK_IRQ] = {
.name = "wdog-bark",
- .handler = NULL,
+ .handler = smblib_handle_wdog_bark,
+ .wake = true,
},
[AICL_FAIL_IRQ] = {
.name = "aicl-fail",
@@ -2113,7 +1993,7 @@
[SWITCH_POWER_OK_IRQ] = {
.name = "switcher-power-ok",
.handler = smblib_handle_switcher_power_ok,
- .storm_data = {true, 1000, 3},
+ .storm_data = {true, 1000, 8},
},
};
@@ -2193,6 +2073,8 @@
return rc;
}
}
+ if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+ chg->usb_icl_change_irq_enabled = true;
return rc;
}
@@ -2307,6 +2189,7 @@
chg->dev = &pdev->dev;
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
+ chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
chg->mode = PARALLEL_MASTER;
chg->irq_info = smb2_irqs;
chg->name = "PMI";
@@ -2324,18 +2207,18 @@
return rc;
}
- rc = smblib_init(chg);
- if (rc < 0) {
- pr_err("Smblib_init failed rc=%d\n", rc);
- goto cleanup;
- }
-
rc = smb2_parse_dt(chip);
if (rc < 0) {
pr_err("Couldn't parse device tree rc=%d\n", rc);
goto cleanup;
}
+ rc = smblib_init(chg);
+ if (rc < 0) {
+ pr_err("Smblib_init failed rc=%d\n", rc);
+ goto cleanup;
+ }
+
/* set driver data before resources request it */
platform_set_drvdata(pdev, chip);
@@ -2418,7 +2301,11 @@
goto cleanup;
}
- smb2_post_init(chip);
+ rc = smb2_post_init(chip);
+ if (rc < 0) {
+ pr_err("Failed in post init rc=%d\n", rc);
+ goto cleanup;
+ }
smb2_create_debugfs(chip);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 6ead522..5ae653e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -22,6 +22,7 @@
#include "smb-lib.h"
#include "smb-reg.h"
#include "battery.h"
+#include "step-chg-jeita.h"
#include "storm-watch.h"
#define smblib_err(chg, fmt, ...) \
@@ -101,35 +102,6 @@
return rc;
}
-static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
-{
- int rc, step_state;
- u8 stat;
-
- if (!chg->step_chg_enabled) {
- *cc_delta_ua = 0;
- return 0;
- }
-
- rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
- rc);
- return rc;
- }
-
- step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
- STEP_CHARGING_STATUS_SHIFT;
- rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
- cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
{
int rc, cc_minus_ua;
@@ -148,7 +120,7 @@
}
rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
- &cc_minus_ua);
+ &cc_minus_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
return rc;
@@ -260,7 +232,7 @@
[FLOAT] = {
.name = "FLOAT",
.bit = FLOAT_CHARGER_BIT,
- .pst = POWER_SUPPLY_TYPE_USB_DCP
+ .pst = POWER_SUPPLY_TYPE_USB_FLOAT
},
[HVDCP2] = {
.name = "HVDCP2",
@@ -401,31 +373,17 @@
return rc;
}
-static int step_charge_soc_update(struct smb_charger *chg, int capacity)
-{
- int rc = 0;
-
- rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
- if (rc < 0) {
- smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
- return rc;
- }
-
- rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
- STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
- rc);
- return rc;
- }
-
- return rc;
-}
-
int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
{
int rc = 0;
+ int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+ if (suspend && irq) {
+ if (chg->usb_icl_change_irq_enabled) {
+ disable_irq_nosync(irq);
+ chg->usb_icl_change_irq_enabled = false;
+ }
+ }
rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
suspend ? USBIN_SUSPEND_BIT : 0);
@@ -433,6 +391,13 @@
smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
suspend ? "suspend" : "resume", rc);
+ if (!suspend && irq) {
+ if (!chg->usb_icl_change_irq_enabled) {
+ enable_irq(irq);
+ chg->usb_icl_change_irq_enabled = true;
+ }
+ }
+
return rc;
}
@@ -522,6 +487,45 @@
/********************
* HELPER FUNCTIONS *
********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ rc = PTR_ERR(chg->dpdm_reg);
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+ rc);
+ chg->dpdm_reg = NULL;
+ return rc;
+ }
+ }
+
+ if (enable) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+ } else {
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+ }
+
+ return rc;
+}
static void smblib_rerun_apsd(struct smb_charger *chg)
{
@@ -548,10 +552,17 @@
const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* if PD is active, APSD is disabled so won't have a valid result */
- if (chg->pd_active)
+ if (chg->pd_active) {
chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
- else
+ } else {
+ /*
+ * Update real charger type only if its not FLOAT
+ * detected as as SDP
+ */
+ if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
chg->real_charger_type = apsd_result->pst;
+ }
smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
apsd_result->name, chg->pd_active);
@@ -629,8 +640,25 @@
static void smblib_uusb_removal(struct smb_charger *chg)
{
int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
cancel_delayed_work_sync(&chg->pl_enable_work);
+
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
@@ -711,24 +739,9 @@
if (!val.intval)
return 0;
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
chg->uusb_apsd_rerun_done = true;
smblib_rerun_apsd(chg);
@@ -736,7 +749,7 @@
return 0;
}
-static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+static int smblib_get_hw_pulse_cnt(struct smb_charger *chg, int *count)
{
int rc;
u8 val[2];
@@ -770,6 +783,24 @@
return 0;
}
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+ int rc;
+
+ /* Use software based pulse count if HW INOV is disabled */
+ if (get_effective_result(chg->hvdcp_hw_inov_dis_votable) > 0) {
+ *count = chg->pulse_cnt;
+ return 0;
+ }
+
+ /* Use h/w pulse count if autonomous mode is enabled */
+ rc = smblib_get_hw_pulse_cnt(chg, count);
+ if (rc < 0)
+ smblib_err(chg, "failed to read h/w pulse count rc=%d\n", rc);
+
+ return rc;
+}
+
#define USBIN_25MA 25000
#define USBIN_100MA 100000
#define USBIN_150MA 150000
@@ -780,6 +811,7 @@
{
int rc;
u8 icl_options;
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* power source is SDP */
switch (icl_ua) {
@@ -804,6 +836,21 @@
return -EINVAL;
}
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+ apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ /*
+ * change the float charger configuration to SDP, if this
+ * is the case of SDP being detected as FLOAT
+ */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
if (rc < 0) {
@@ -845,7 +892,6 @@
if (icl_ua < USBIN_25MA)
return smblib_set_usb_suspend(chg, true);
- disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
if (icl_ua == INT_MAX)
goto override_suspend_config;
@@ -903,7 +949,6 @@
}
enable_icl_changed_interrupt:
- enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
return rc;
}
@@ -1126,7 +1171,7 @@
* the pulse count register get zeroed when autonomous mode is
* disabled. Track that in variables before disabling
*/
- rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt);
+ rc = smblib_get_hw_pulse_cnt(chg, &chg->pulse_cnt);
if (rc < 0) {
pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
rc);
@@ -1279,11 +1324,84 @@
#define MAX_RETRY 15
#define MIN_DELAY_US 2000
#define MAX_DELAY_US 9000
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
+{
+ u8 stat;
+ int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
+
+ for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+ smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+ otg_current[i]);
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+ otg_current[i]);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ return rc;
+ }
+
+ retry_count = 0;
+ min_delay = MIN_DELAY_US;
+ do {
+ usleep_range(min_delay, min_delay + 100);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ if (stat & BOOST_SOFTSTART_DONE_BIT) {
+ rc = smblib_set_charge_param(chg,
+ &chg->param.otg_cl, chg->otg_cl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+ rc);
+ goto out;
+ }
+ break;
+ }
+ /* increase the delay for following iterations */
+ if (retry_count > 5)
+ min_delay = MAX_DELAY_US;
+
+ } while (retry_count++ < MAX_RETRY);
+
+ if (retry_count >= MAX_RETRY) {
+ smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+ otg_current[i]);
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "disable OTG rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+ return 0;
+ }
+ }
+
+ if (i == ARRAY_SIZE(otg_current)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ smblib_write(chg, CMD_OTG_REG, 0);
+ return rc;
+}
+
static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
- int rc, retry_count = 0, min_delay = MIN_DELAY_US;
- u8 stat;
+ int rc;
smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1296,48 +1414,17 @@
}
smblib_dbg(chg, PR_OTG, "enabling OTG\n");
- rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
- return rc;
- }
if (chg->wa_flags & OTG_WA) {
- /* check for softstart */
- do {
- usleep_range(min_delay, min_delay + 100);
- rc = smblib_read(chg, OTG_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't read OTG status rc=%d\n",
- rc);
- goto out;
- }
-
- if (stat & BOOST_SOFTSTART_DONE_BIT) {
- rc = smblib_set_charge_param(chg,
- &chg->param.otg_cl, chg->otg_cl_ua);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't set otg limit\n");
- break;
- }
-
- /* increase the delay for following iterations */
- if (retry_count > 5)
- min_delay = MAX_DELAY_US;
- } while (retry_count++ < MAX_RETRY);
-
- if (retry_count >= MAX_RETRY) {
- smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
- goto out;
- }
+ rc = smblib_enable_otg_wa(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ } else {
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
}
- return 0;
-out:
- /* disable OTG if softstart failed */
- smblib_write(chg, CMD_OTG_REG, 0);
return rc;
}
@@ -1483,8 +1570,8 @@
union power_supply_propval *val)
{
union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
- u8 stat;
+ bool usb_online, dc_online, qnovo_en;
+ u8 stat, pt_en_cmd;
int rc;
rc = smblib_get_prop_usb_online(chg, &pval);
@@ -1552,11 +1639,22 @@
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
rc);
return rc;
- }
+ }
stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
- if (!stat)
+
+ rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT);
+
+ /* ignore stat7 when qnovo is enabled */
+ if (!qnovo_en && !stat)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
return 0;
@@ -1716,30 +1814,6 @@
return rc;
}
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
- union power_supply_propval *val)
-{
- int rc;
- u8 stat;
-
- if (!chg->step_chg_enabled) {
- val->intval = -1;
- return 0;
- }
-
- rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
- rc);
- return rc;
- }
-
- val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
- STEP_CHARGING_STATUS_SHIFT;
-
- return rc;
-}
-
int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
union power_supply_propval *val)
{
@@ -2307,27 +2381,21 @@
int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
union power_supply_propval *val)
{
- const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
int rc, pulses;
- u8 stat;
- val->intval = MICRO_5V;
- if (apsd_result == NULL) {
- smblib_err(chg, "APSD result is NULL\n");
- return 0;
- }
-
- switch (apsd_result->pst) {
+ switch (chg->real_charger_type) {
case POWER_SUPPLY_TYPE_USB_HVDCP_3:
- rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+ rc = smblib_get_pulse_cnt(chg, &pulses);
if (rc < 0) {
smblib_err(chg,
"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
return 0;
}
- pulses = (stat & QC_PULSE_COUNT_MASK);
val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
break;
+ case POWER_SUPPLY_TYPE_USB_PD:
+ val->intval = chg->voltage_min_uv;
+ break;
default:
val->intval = MICRO_5V;
break;
@@ -2390,6 +2458,31 @@
return 0;
}
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+#define TYPEC_DEFAULT_CURRENT_UA 900000
+#define TYPEC_MEDIUM_CURRENT_UA 1500000
+#define TYPEC_HIGH_CURRENT_UA 3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+ int rp_ua;
+
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ rp_ua = TYPEC_HIGH_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ /* fall through */
+ default:
+ rp_ua = DCP_CURRENT_UA;
+ }
+
+ return rp_ua;
+}
+
/*******************
* USB PSY SETTERS *
* *****************/
@@ -2407,14 +2500,54 @@
return rc;
}
+static int smblib_handle_usb_current(struct smb_charger *chg,
+ int usb_current)
+{
+ int rc = 0, rp_ua, typec_mode;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (usb_current == -ETIMEDOUT) {
+ /*
+ * Valid FLOAT charger, report the current based
+ * of Rp
+ */
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ true, rp_ua);
+ if (rc < 0)
+ return rc;
+ } else {
+ /*
+ * FLOAT charger detected as SDP by USB driver,
+ * charge with the requested current and update the
+ * real_charger_type
+ */
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ if (rc < 0)
+ return rc;
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ false, 0);
+ if (rc < 0)
+ return rc;
+ }
+ } else {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ }
+
+ return rc;
+}
+
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc = 0;
if (!chg->pd_active) {
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
- true, val->intval);
+ rc = smblib_handle_usb_current(chg, val->intval);
} else if (chg->system_suspend_supported) {
if (val->intval <= USBIN_25MA)
rc = vote(chg->usb_icl_votable,
@@ -2510,6 +2643,7 @@
}
chg->voltage_min_uv = min_uv;
+ power_supply_changed(chg->usb_main_psy);
return rc;
}
@@ -2797,46 +2931,72 @@
return rc;
}
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+ u8 stat_1, stat_2;
+ int rc;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat_1);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat_2);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if ((chg->jeita_status && !(stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+ ((stat_1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+ /*
+ * We are moving from JEITA soft -> Normal and charging
+ * is terminated
+ */
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable charging rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chg->jeita_status = stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+ return 0;
+}
+
/************************
* USB MAIN PSY GETTERS *
************************/
int smblib_get_prop_fcc_delta(struct smb_charger *chg,
- union power_supply_propval *val)
+ union power_supply_propval *val)
{
- int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
-
- rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
- step_cc_delta_ua = 0;
- } else {
- hw_cc_delta_ua = step_cc_delta_ua;
- }
+ int rc, jeita_cc_delta_ua = 0;
rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
jeita_cc_delta_ua = 0;
- } else if (jeita_cc_delta_ua < 0) {
- /* HW will take the min between JEITA and step charge */
- hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
}
- val->intval = hw_cc_delta_ua;
+ val->intval = jeita_cc_delta_ua;
return 0;
}
/************************
* USB MAIN PSY SETTERS *
************************/
-
-#define SDP_CURRENT_MA 500000
-#define CDP_CURRENT_MA 1500000
-#define DCP_CURRENT_MA 1500000
-#define HVDCP_CURRENT_MA 3000000
-#define TYPEC_DEFAULT_CURRENT_MA 900000
-#define TYPEC_MEDIUM_CURRENT_MA 1500000
-#define TYPEC_HIGH_CURRENT_MA 3000000
int smblib_get_charge_current(struct smb_charger *chg,
int *total_current_ua)
{
@@ -2870,19 +3030,19 @@
/* QC 2.0/3.0 adapter */
if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) {
- *total_current_ua = HVDCP_CURRENT_MA;
+ *total_current_ua = HVDCP_CURRENT_UA;
return 0;
}
if (non_compliant) {
switch (apsd_result->bit) {
case CDP_CHARGER_BIT:
- current_ua = CDP_CURRENT_MA;
+ current_ua = CDP_CURRENT_UA;
break;
case DCP_CHARGER_BIT:
case OCP_CHARGER_BIT:
case FLOAT_CHARGER_BIT:
- current_ua = DCP_CURRENT_MA;
+ current_ua = DCP_CURRENT_UA;
break;
default:
current_ua = 0;
@@ -2897,7 +3057,7 @@
case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
switch (apsd_result->bit) {
case CDP_CHARGER_BIT:
- current_ua = CDP_CURRENT_MA;
+ current_ua = CDP_CURRENT_UA;
break;
case DCP_CHARGER_BIT:
case OCP_CHARGER_BIT:
@@ -2910,10 +3070,10 @@
}
break;
case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
- current_ua = TYPEC_MEDIUM_CURRENT_MA;
+ current_ua = TYPEC_MEDIUM_CURRENT_UA;
break;
case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- current_ua = TYPEC_HIGH_CURRENT_MA;
+ current_ua = TYPEC_HIGH_CURRENT_UA;
break;
case POWER_SUPPLY_TYPEC_NON_COMPLIANT:
case POWER_SUPPLY_TYPEC_NONE:
@@ -3003,61 +3163,18 @@
return IRQ_HANDLED;
}
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (chg->step_chg_enabled)
- rerun_election(chg->fcc_votable);
-
- return IRQ_HANDLED;
-}
-
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (chg->step_chg_enabled)
- rerun_election(chg->fcc_votable);
-
- return IRQ_HANDLED;
-}
-
-#define STEP_SOC_REQ_MS 3000
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
- int rc;
- union power_supply_propval pval = {0, };
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
- if (!chg->bms_psy) {
- schedule_delayed_work(&chg->step_soc_req_work,
- msecs_to_jiffies(STEP_SOC_REQ_MS));
- return IRQ_HANDLED;
- }
-
- rc = smblib_get_prop_batt_capacity(chg, &pval);
- if (rc < 0)
- smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
- else
- step_charge_soc_update(chg, pval.intval);
-
- return IRQ_HANDLED;
-}
-
irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+
+ rc = smblib_recover_from_soft_jeita(chg);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
rerun_election(chg->fcc_votable);
power_supply_changed(chg->batt_psy);
@@ -3117,6 +3234,8 @@
int rc;
u8 stat;
bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
@@ -3126,10 +3245,23 @@
vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
- if (vbus_rising)
+ if (vbus_rising) {
smblib_cc2_sink_removal_exit(chg);
- else
+ } else {
smblib_cc2_sink_removal_enter(chg);
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+ }
power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n",
@@ -3142,6 +3274,8 @@
int rc;
u8 stat;
bool vbus_rising;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
@@ -3153,41 +3287,32 @@
smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
chg->chg_freq.freq_removal);
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
if (vbus_rising) {
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
/* Schedule work to enable parallel charger */
vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
schedule_delayed_work(&chg->pl_enable_work,
msecs_to_jiffies(PL_DELAY_MS));
} else {
- if (chg->wa_flags & BOOST_BACK_WA)
- vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
-
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata,
+ WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER,
+ false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
}
+
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
}
if (chg->micro_usb_mode)
@@ -3302,13 +3427,12 @@
}
if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
- rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+ rc = smblib_get_pulse_cnt(chg, &pulses);
if (rc < 0) {
smblib_err(chg,
"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
return;
}
- pulses = (stat & QC_PULSE_COUNT_MASK);
if (pulses < QC3_PULSES_FOR_6V)
smblib_set_opt_freq_buck(chg,
@@ -3411,6 +3535,9 @@
static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
{
+ int typec_mode;
+ int rp_ua;
+
/* while PD is active it should have complete ICL control */
if (chg->pd_active)
return;
@@ -3431,7 +3558,16 @@
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
break;
case POWER_SUPPLY_TYPE_USB_DCP:
- vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
+ break;
+ case POWER_SUPPLY_TYPE_USB_FLOAT:
+ /*
+ * limit ICL to 100mA, the USB driver will enumerate to check
+ * if this is a SDP and appropriately set the current
+ */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
break;
case POWER_SUPPLY_TYPE_USB_HVDCP:
case POWER_SUPPLY_TYPE_USB_HVDCP_3:
@@ -3563,9 +3699,26 @@
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
int rc;
+ struct smb_irq_data *data;
+ struct storm_watch *wdata;
chg->cc2_detach_wa_active = false;
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
+
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ update_storm_count(wdata, WEAK_CHG_STORM_COUNT);
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ false, 0);
+ }
+ }
+
/* reset APSD voters */
vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
@@ -3580,6 +3733,7 @@
vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
+ vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
/* reset hvdcp voters */
vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
@@ -3596,6 +3750,7 @@
/* reset parallel voters */
vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+ vote(chg->pl_disable_votable, FCC_CHANGE_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
@@ -3610,6 +3765,13 @@
chg->pd_hard_reset = 0;
chg->typec_legacy_valid = false;
+ /* write back the default FLOAT charger configuration */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ (u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+ rc);
+
/* reset back to 120mS tCC debounce */
rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
if (rc < 0)
@@ -3689,18 +3851,68 @@
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
typec_sink_insertion(chg);
- else
+ } else {
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
typec_sink_removal(chg);
+ }
+}
+
+static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
+{
+ int rp_ua;
+ const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+
+ if ((apsd->pst != POWER_SUPPLY_TYPE_USB_DCP)
+ && (apsd->pst != POWER_SUPPLY_TYPE_USB_FLOAT))
+ return;
+
+ /*
+ * if APSD indicates FLOAT and the USB stack had detected SDP,
+ * do not respond to Rp changes as we do not confirm that its
+ * a legacy cable
+ */
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ return;
+ /*
+ * We want the ICL vote @ 100mA for a FLOAT charger
+ * until the detection by the USB stack is complete.
+ * Ignore the Rp changes unless there is a
+ * pre-existing valid vote.
+ */
+ if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ get_client_vote(chg->usb_icl_votable,
+ LEGACY_UNKNOWN_VOTER) <= 100000)
+ return;
+
+ /*
+ * handle Rp change for DCP/FLOAT/OCP.
+ * Update the current only if the Rp is different from
+ * the last Rp value.
+ */
+ smblib_dbg(chg, PR_MISC, "CC change old_mode=%d new_mode=%d\n",
+ chg->typec_mode, typec_mode);
+
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
}
static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
{
+ int typec_mode;
+
if (chg->pr_swap_in_progress)
return;
- chg->typec_mode = smblib_get_prop_typec_mode(chg);
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ if (chg->typec_present && (typec_mode != chg->typec_mode))
+ smblib_handle_rp_change(chg, typec_mode);
+
+ chg->typec_mode = typec_mode;
+
if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
chg->typec_present = true;
smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
@@ -3713,6 +3925,12 @@
smblib_handle_typec_removal(chg);
}
+ /* suspend usb if sink */
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+ vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
+ else
+ vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+
smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
smblib_typec_mode_name[chg->typec_mode]);
}
@@ -3760,6 +3978,12 @@
return IRQ_HANDLED;
}
+ if (chg->pr_swap_in_progress) {
+ smblib_dbg(chg, PR_INTERRUPT,
+ "Ignoring since pr_swap_in_progress\n");
+ return IRQ_HANDLED;
+ }
+
mutex_lock(&chg->lock);
smblib_usb_typec_change(chg);
mutex_unlock(&chg->lock);
@@ -3786,10 +4010,23 @@
return IRQ_HANDLED;
}
+static void smblib_bb_removal_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ bb_removal_work.work);
+
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, false, 0);
+}
+
+#define BOOST_BACK_UNVOTE_DELAY_MS 750
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
+ struct storm_watch *wdata = &irq_data->storm_data;
int rc, usb_icl;
u8 stat;
@@ -3811,8 +4048,32 @@
return IRQ_HANDLED;
if (is_storming(&irq_data->storm_data)) {
- smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n");
- vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+ /* This could be a weak charger reduce ICL */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ WEAK_CHARGER_VOTER)) {
+ smblib_err(chg,
+ "Weak charger detected: voting %dmA ICL\n",
+ *chg->weak_chg_icl_ua / 1000);
+ vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER,
+ true, *chg->weak_chg_icl_ua);
+ /*
+ * reset storm data and set the storm threshold
+ * to 3 for reverse boost detection.
+ */
+ update_storm_count(wdata, BOOST_BACK_STORM_COUNT);
+ } else {
+ smblib_err(chg,
+ "Reverse boost detected: voting 0mA to suspend input\n");
+ vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+ vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0);
+ /*
+ * Remove the boost-back vote after a delay, to avoid
+ * permanently suspending the input if the boost-back
+ * condition is unintentionally hit.
+ */
+ schedule_delayed_work(&chg->bb_removal_work,
+ msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS));
+ }
}
return IRQ_HANDLED;
@@ -3824,10 +4085,15 @@
struct smb_charger *chg = irq_data->parent_data;
int rc;
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
if (rc < 0)
smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+ if (chg->step_chg_enabled || chg->sw_jeita_enabled)
+ power_supply_changed(chg->batt_psy);
+
return IRQ_HANDLED;
}
@@ -3910,22 +4176,6 @@
power_supply_changed(chg->batt_psy);
}
-static void step_soc_req_work(struct work_struct *work)
-{
- struct smb_charger *chg = container_of(work, struct smb_charger,
- step_soc_req_work.work);
- union power_supply_propval pval = {0, };
- int rc;
-
- rc = smblib_get_prop_batt_capacity(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
- return;
- }
-
- step_charge_soc_update(chg, pval.intval);
-}
-
static void clear_hdc_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -4458,7 +4708,6 @@
INIT_WORK(&chg->bms_update_work, bms_update_work);
INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
- INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
@@ -4467,6 +4716,7 @@
INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
+ INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work);
chg->fake_capacity = -EINVAL;
chg->fake_input_current_limited = -EINVAL;
@@ -4479,6 +4729,14 @@
return rc;
}
+ rc = qcom_step_chg_init(chg->step_chg_enabled,
+ chg->sw_jeita_enabled);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = smblib_create_votables(chg);
if (rc < 0) {
smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4513,7 +4771,6 @@
cancel_work_sync(&chg->bms_update_work);
cancel_work_sync(&chg->rdstd_cc2_detach_work);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- cancel_delayed_work_sync(&chg->step_soc_req_work);
cancel_delayed_work_sync(&chg->clear_hdc_work);
cancel_work_sync(&chg->otg_oc_work);
cancel_work_sync(&chg->vconn_oc_work);
@@ -4522,8 +4779,10 @@
cancel_delayed_work_sync(&chg->pl_enable_work);
cancel_work_sync(&chg->legacy_detection_work);
cancel_delayed_work_sync(&chg->uusb_otg_work);
+ cancel_delayed_work_sync(&chg->bb_removal_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
+ qcom_step_chg_deinit();
qcom_batt_deinit();
break;
case PARALLEL_SLAVE:
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index f39f2c9..5b59597 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -64,9 +64,14 @@
#define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER"
#define OTG_DELAY_VOTER "OTG_DELAY_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER"
+#define OTG_VOTER "OTG_VOTER"
+#define FCC_CHANGE_VOTER "FCC_CHANGE_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
+#define BOOST_BACK_STORM_COUNT 3
+#define WEAK_CHG_STORM_COUNT 8
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -187,9 +192,6 @@
struct smb_chg_param dc_icl_div2_mid_hv;
struct smb_chg_param dc_icl_div2_hv;
struct smb_chg_param jeita_cc_comp;
- struct smb_chg_param step_soc_threshold[4];
- struct smb_chg_param step_soc;
- struct smb_chg_param step_cc_delta[5];
struct smb_chg_param freq_buck;
struct smb_chg_param freq_boost;
};
@@ -230,6 +232,7 @@
struct smb_chg_freq chg_freq;
int smb_version;
int otg_delay_ms;
+ int *weak_chg_icl_ua;
/* locks */
struct mutex lock;
@@ -283,7 +286,6 @@
struct work_struct rdstd_cc2_detach_work;
struct delayed_work hvdcp_detect_work;
struct delayed_work ps_change_timeout_work;
- struct delayed_work step_soc_req_work;
struct delayed_work clear_hdc_work;
struct work_struct otg_oc_work;
struct work_struct vconn_oc_work;
@@ -292,6 +294,7 @@
struct delayed_work pl_enable_work;
struct work_struct legacy_detection_work;
struct delayed_work uusb_otg_work;
+ struct delayed_work bb_removal_work;
/* cached status */
int voltage_min_uv;
@@ -305,6 +308,7 @@
int dcp_icl_ua;
int fake_capacity;
bool step_chg_enabled;
+ bool sw_jeita_enabled;
bool is_hdc;
bool chg_done;
bool micro_usb_mode;
@@ -323,6 +327,9 @@
int fake_input_current_limited;
bool pr_swap_in_progress;
int typec_mode;
+ int usb_icl_change_irq_enabled;
+ u32 jeita_status;
+ u8 float_cfg;
/* workaround flag */
u32 wa_flags;
@@ -377,9 +384,6 @@
irqreturn_t smblib_handle_debug(int irq, void *data);
irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
@@ -419,9 +423,6 @@
union power_supply_propval *val);
int smblib_get_prop_batt_temp(struct smb_charger *chg,
union power_supply_propval *val);
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
- union power_supply_propval *val);
-
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -501,7 +502,7 @@
void smblib_suspend_on_debug_battery(struct smb_charger *chg);
int smblib_rerun_apsd_if_required(struct smb_charger *chg);
int smblib_get_prop_fcc_delta(struct smb_charger *chg,
- union power_supply_propval *val);
+ union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_dp_dm(struct smb_charger *chg, int val);
int smblib_rerun_aicl(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index d5fff74..4e1bb17 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/iio/consumer.h>
#include <linux/platform_device.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/of.h>
@@ -61,6 +62,19 @@
#define CHGR_BATTOV_CFG_REG (CHGR_BASE + 0x70)
#define BATTOV_SETTING_MASK GENMASK(7, 0)
+#define POWER_MODE_HICCUP_CFG (BATIF_BASE + 0x72)
+#define MAX_HICCUP_DUETO_BATDIS_MASK GENMASK(5, 2)
+#define HICCUP_TIMEOUT_CFG_MASK GENMASK(1, 0)
+
+#define TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
+#define SKIN_TEMP_RST_HOT_BIT BIT(6)
+#define SKIN_TEMP_UB_HOT_BIT BIT(5)
+#define SKIN_TEMP_LB_HOT_BIT BIT(4)
+#define DIE_TEMP_TSD_HOT_BIT BIT(3)
+#define DIE_TEMP_RST_HOT_BIT BIT(2)
+#define DIE_TEMP_UB_HOT_BIT BIT(1)
+#define DIE_TEMP_LB_HOT_BIT BIT(0)
+
#define BARK_BITE_WDOG_PET_REG (MISC_BASE + 0x43)
#define BARK_BITE_WDOG_PET_BIT BIT(0)
@@ -115,12 +129,18 @@
int irq;
};
+struct smb_iio {
+ struct iio_channel *temp_chan;
+ struct iio_channel *temp_max_chan;
+};
+
struct smb1355 {
struct device *dev;
char *name;
struct regmap *regmap;
struct smb_params param;
+ struct smb_iio iio;
struct mutex write_lock;
@@ -257,9 +277,13 @@
POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
+ POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
};
static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip,
@@ -305,6 +329,65 @@
return 0;
}
+static int smb1355_get_prop_connector_health(struct smb1355 *chip)
+{
+ u8 temp;
+ int rc;
+
+ rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp);
+ if (rc < 0) {
+ pr_err("Couldn't read comp stat reg rc = %d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (temp & SKIN_TEMP_RST_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+
+ if (temp & SKIN_TEMP_UB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ if (temp & SKIN_TEMP_LB_HOT_BIT)
+ return POWER_SUPPLY_HEALTH_WARM;
+
+ return POWER_SUPPLY_HEALTH_COOL;
+}
+
+
+static int smb1355_get_prop_charger_temp(struct smb1355 *chip,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chip->iio.temp_chan ||
+ PTR_ERR(chip->iio.temp_chan) == -EPROBE_DEFER)
+ chip->iio.temp_chan = devm_iio_channel_get(chip->dev,
+ "charger_temp");
+
+ if (IS_ERR(chip->iio.temp_chan))
+ return PTR_ERR(chip->iio.temp_chan);
+
+ rc = iio_read_channel_processed(chip->iio.temp_chan, &val->intval);
+ val->intval /= 100;
+ return rc;
+}
+
+static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chip->iio.temp_max_chan ||
+ PTR_ERR(chip->iio.temp_max_chan) == -EPROBE_DEFER)
+ chip->iio.temp_max_chan = devm_iio_channel_get(chip->dev,
+ "charger_temp_max");
+ if (IS_ERR(chip->iio.temp_max_chan))
+ return PTR_ERR(chip->iio.temp_max_chan);
+
+ rc = iio_read_channel_processed(chip->iio.temp_max_chan, &val->intval);
+ val->intval /= 100;
+ return rc;
+}
+
static int smb1355_parallel_get_prop(struct power_supply *psy,
enum power_supply_property prop,
union power_supply_propval *val)
@@ -327,6 +410,12 @@
if (rc >= 0)
val->intval = !(stat & DISABLE_CHARGING_BIT);
break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP:
+ rc = smb1355_get_prop_charger_temp(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+ rc = smb1355_get_prop_charger_temp_max(chip, val);
+ break;
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smb1355_get_parallel_charging(chip, &val->intval);
break;
@@ -344,6 +433,9 @@
case POWER_SUPPLY_PROP_PARALLEL_MODE:
val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
break;
+ case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+ val->intval = smb1355_get_prop_connector_health(chip);
+ break;
default:
pr_err_ratelimited("parallel psy get prop %d not supported\n",
prop);
@@ -492,6 +584,16 @@
return rc;
}
+ /* HICCUP setting, unlimited retry with 250ms interval */
+ rc = smb1355_masked_write(chip, POWER_MODE_HICCUP_CFG,
+ HICCUP_TIMEOUT_CFG_MASK | MAX_HICCUP_DUETO_BATDIS_MASK,
+ 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable parallel current sensing rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* enable parallel current sensing */
rc = smb1355_masked_write(chip, CFG_REG,
VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index ca0a2c6..dd949e7 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -845,6 +845,13 @@
}
}
+ /* configure to a fixed 700khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ if (rc < 0) {
+ pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ return rc;
+ }
+
/* enable watchdog bark and bite interrupts, and disable the watchdog */
rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
new file mode 100644
index 0000000..053aac3
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -0,0 +1,494 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define MAX_STEP_CHG_ENTRIES 8
+#define STEP_CHG_VOTER "STEP_CHG_VOTER"
+#define JEITA_VOTER "JEITA_VOTER"
+
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+struct range_data {
+ u32 low_threshold;
+ u32 high_threshold;
+ u32 value;
+};
+
+struct step_chg_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fcc_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fv_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+ ktime_t step_last_update_time;
+ ktime_t jeita_last_update_time;
+ bool step_chg_enable;
+ bool sw_jeita_enable;
+ int jeita_fcc_index;
+ int jeita_fv_index;
+ int step_index;
+
+ struct votable *fcc_votable;
+ struct votable *fv_votable;
+ struct wakeup_source *step_chg_ws;
+ struct power_supply *batt_psy;
+ struct delayed_work status_change_work;
+ struct notifier_block nb;
+};
+
+static struct step_chg_info *the_chip;
+
+#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
+
+/*
+ * Step Charging Configuration
+ * Update the table based on the battery profile
+ * Supports VBATT and SOC based source
+ * range data must be in increasing ranges and shouldn't overlap
+ */
+static struct step_chg_cfg step_chg_config = {
+ .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ .prop_name = "VBATT",
+ .hysteresis = 100000, /* 100mV */
+ .fcc_cfg = {
+ /* VBAT_LOW VBAT_HIGH FCC */
+ {3600000, 4000000, 3000000},
+ {4001000, 4200000, 2800000},
+ {4201000, 4400000, 2000000},
+ },
+ /*
+ * SOC STEP-CHG configuration example.
+ *
+ * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ * .prop_name = "SOC",
+ * .fcc_cfg = {
+ * //SOC_LOW SOC_HIGH FCC
+ * {20, 70, 3000000},
+ * {70, 90, 2750000},
+ * {90, 100, 2500000},
+ * },
+ */
+};
+
+/*
+ * Jeita Charging Configuration
+ * Update the table based on the battery profile
+ * Please ensure that the TEMP ranges are programmed in the hw so that
+ * an interrupt is issued and a consequent psy changed will cause us to
+ * react immediately.
+ * range data must be in increasing ranges and shouldn't overlap.
+ * Gaps are okay
+ */
+static struct jeita_fcc_cfg jeita_fcc_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fcc_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 600000},
+ {101, 200, 2000000},
+ {201, 450, 3000000},
+ {451, 550, 600000},
+ },
+};
+
+static struct jeita_fv_cfg jeita_fv_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fv_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 4200000},
+ {101, 450, 4400000},
+ {451, 550, 4200000},
+ },
+};
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int get_val(struct range_data *range, int hysteresis, int current_index,
+ int threshold,
+ int *new_index, int *val)
+{
+ int i;
+
+ *new_index = -EINVAL;
+ /* first find the matching index without hysteresis */
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ if (is_between(range[i].low_threshold,
+ range[i].high_threshold, threshold)) {
+ *new_index = i;
+ *val = range[i].value;
+ }
+
+ /* if nothing was found, return -ENODATA */
+ if (*new_index == -EINVAL)
+ return -ENODATA;
+ /*
+ * If we don't have a current_index return this
+ * newfound value. There is no hysterisis from out of range
+ * to in range transition
+ */
+ if (current_index == -EINVAL)
+ return 0;
+
+ /*
+ * Check for hysteresis if it in the neighbourhood
+ * of our current index.
+ */
+ if (*new_index == current_index + 1) {
+ if (threshold < range[*new_index].low_threshold + hysteresis) {
+ /*
+ * Stay in the current index, threshold is not higher
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ } else if (*new_index == current_index - 1) {
+ if (threshold > range[*new_index].high_threshold - hysteresis) {
+ /*
+ * stay in the current index, threshold is not lower
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ }
+ return 0;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0;
+ u64 elapsed_us;
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+ if (rc < 0)
+ chip->step_chg_enable = 0;
+ else
+ chip->step_chg_enable = pval.intval;
+
+ if (!chip->step_chg_enable) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy,
+ step_chg_config.psy_prop, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ step_chg_config.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+ chip->step_index,
+ pval.intval,
+ &chip->step_index,
+ &fcc_ua);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ return -EINVAL;
+
+ vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+ pr_debug("%s = %d Step-FCC = %duA\n",
+ step_chg_config.prop_name, pval.intval, fcc_ua);
+
+update_time:
+ chip->step_last_update_time = ktime_get();
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static int handle_jeita(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0, fv_uv = 0;
+ u64 elapsed_us;
+
+ if (!chip->sw_jeita_enable) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ return 0;
+ }
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ jeita_fcc_config.psy_prop, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ step_chg_config.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+ chip->jeita_fcc_index,
+ pval.intval,
+ &chip->jeita_fcc_index,
+ &fcc_ua);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ /* changing FCC is a must */
+ return -EINVAL;
+
+ vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
+
+ rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+ chip->jeita_fv_index,
+ pval.intval,
+ &chip->jeita_fv_index,
+ &fv_uv);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ chip->fv_votable = find_votable("FV");
+ if (!chip->fv_votable)
+ goto update_time;
+
+ vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
+
+ pr_debug("%s = %d FCC = %duA FV = %duV\n",
+ step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+
+update_time:
+ chip->jeita_last_update_time = ktime_get();
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct step_chg_info *chip = container_of(work,
+ struct step_chg_info, status_change_work.work);
+ int rc = 0;
+ int reschedule_us;
+ int reschedule_jeita_work_us = 0;
+ int reschedule_step_work_us = 0;
+
+ if (!is_batt_available(chip))
+ return;
+
+ /* skip elapsed_us debounce for handling battery temperature */
+ rc = handle_jeita(chip);
+ if (rc > 0)
+ reschedule_jeita_work_us = rc;
+ else if (rc < 0)
+ pr_err("Couldn't handle sw jeita rc = %d\n", rc);
+
+ rc = handle_step_chg_config(chip);
+ if (rc > 0)
+ reschedule_step_work_us = rc;
+ if (rc < 0)
+ pr_err("Couldn't handle step rc = %d\n", rc);
+
+ reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us);
+ if (reschedule_us == 0)
+ __pm_relax(chip->step_chg_ws);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ usecs_to_jiffies(reschedule_us));
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "battery") == 0)) {
+ __pm_stay_awake(chip->step_chg_ws);
+ schedule_delayed_work(&chip->status_change_work, 0);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = step_chg_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
+{
+ int rc;
+ struct step_chg_info *chip;
+
+ if (the_chip) {
+ pr_err("Already initialized\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+ if (!chip->step_chg_ws) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ chip->step_chg_enable = step_chg_enable;
+ chip->sw_jeita_enable = sw_jeita_enable;
+
+ chip->step_index = -EINVAL;
+ chip->jeita_fcc_index = -EINVAL;
+ chip->jeita_fv_index = -EINVAL;
+
+ if (step_chg_enable && (!step_chg_config.psy_prop ||
+ !step_chg_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Step-chg configuration not defined - fail\n");
+ rc = -ENODATA;
+ goto release_wakeup_source;
+ }
+
+ if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
+ !jeita_fcc_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ rc = -ENODATA;
+ goto release_wakeup_source;
+ }
+
+ if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
+ !jeita_fv_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ rc = -ENODATA;
+ goto release_wakeup_source;
+ }
+
+ INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+
+ rc = step_chg_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ goto release_wakeup_source;
+ }
+
+ the_chip = chip;
+
+ if (step_chg_enable)
+ pr_info("Step charging enabled. Using %s source\n",
+ step_chg_config.prop_name);
+
+ return 0;
+
+release_wakeup_source:
+ wakeup_source_unregister(chip->step_chg_ws);
+cleanup:
+ kfree(chip);
+ return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+ struct step_chg_info *chip = the_chip;
+
+ if (!chip)
+ return;
+
+ cancel_delayed_work_sync(&chip->status_change_work);
+ power_supply_unreg_notifier(&chip->nb);
+ wakeup_source_unregister(chip->step_chg_ws);
+ the_chip = NULL;
+ kfree(chip);
+}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
new file mode 100644
index 0000000..5bb2b99
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable);
+void qcom_step_chg_deinit(void);
+#endif /* __STEP_CHG_H__ */
diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c
index 5275079..21ac669 100644
--- a/drivers/power/supply/qcom/storm-watch.c
+++ b/drivers/power/supply/qcom/storm-watch.c
@@ -64,3 +64,13 @@
data->storm_count = 0;
mutex_unlock(&data->storm_lock);
}
+
+void update_storm_count(struct storm_watch *data, int max_count)
+{
+ if (!data)
+ return;
+
+ mutex_lock(&data->storm_lock);
+ data->max_storm_count = max_count;
+ mutex_unlock(&data->storm_lock);
+}
diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h
index ff05c4a..5275d73 100644
--- a/drivers/power/supply/qcom/storm-watch.h
+++ b/drivers/power/supply/qcom/storm-watch.h
@@ -37,4 +37,5 @@
bool is_storming(struct storm_watch *data);
void reset_storm_count(struct storm_watch *data);
+void update_storm_count(struct storm_watch *data, int max_count);
#endif
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7ad650e..d75f157 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2408,6 +2408,14 @@
count = rdev->deferred_disables;
rdev->deferred_disables = 0;
+ /*
+ * Workqueue functions queue the new work instance while the previous
+ * work instance is being processed. Cancel the queued work instance
+ * as the work instance under processing does the job of the queued
+ * work instance.
+ */
+ cancel_delayed_work(&rdev->disable_work);
+
for (i = 0; i < count; i++) {
ret = _regulator_disable(rdev);
if (ret != 0)
@@ -2451,10 +2459,10 @@
mutex_lock(&rdev->mutex);
rdev->deferred_disables++;
+ mod_delayed_work(system_power_efficient_wq, &rdev->disable_work,
+ msecs_to_jiffies(ms));
mutex_unlock(&rdev->mutex);
- queue_delayed_work(system_power_efficient_wq, &rdev->disable_work,
- msecs_to_jiffies(ms));
return 0;
}
EXPORT_SYMBOL_GPL(regulator_disable_deferred);
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index b1e6a3b..9510016 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1316,6 +1316,27 @@
static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl);
/**
+ * cpr3_regulator_cprh_initialized() - checks if CPRh has already been
+ * initialized by the boot loader
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: true if CPRh controller is already initialized else false
+ */
+static bool cpr3_regulator_cprh_initialized(struct cpr3_controller *ctrl)
+{
+ u32 reg;
+
+ if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+ return false;
+
+ ctrl->cpr_hw_version = readl_relaxed(ctrl->cpr_ctrl_base
+ + CPR3_REG_CPR_VERSION);
+ reg = readl_relaxed(ctrl->cpr_ctrl_base + CPRH_REG_CTL(ctrl));
+
+ return reg & CPRH_CTL_OSM_ENABLED;
+}
+
+/**
* cpr3_regulator_init_cprh() - performs hardware initialization at the
* controller and thread level required for CPRh operation.
* @ctrl: Pointer to the CPR3 controller
@@ -6459,6 +6480,11 @@
}
ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (cpr3_regulator_cprh_initialized(ctrl)) {
+ cpr3_err(ctrl, "CPRh controller already initialized by boot loader\n");
+ return -EPERM;
+ }
+
if (ctrl->aging_possible_mask) {
/*
* Aging possible register address is required if an aging
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index deb0ce5..c393940 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -38,12 +38,10 @@
#define SDM660_KBSS_FUSE_CORNERS 5
#define SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS 4
-#define SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS 3
-/*
- * This must be set to the larger of SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS and
- * SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS values.
- */
-#define SDM845_KBSS_MAX_FUSE_CORNERS 4
+#define SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS 3
+#define SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS 5
+/* This must be set to the largest of SDM845 FUSE_CORNERS values. */
+#define SDM845_KBSS_MAX_FUSE_CORNERS 5
/**
* struct cprh_kbss_fuses - KBSS specific fuse data
@@ -153,18 +151,38 @@
#define CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID 0
static const char * const
-cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+cprh_sdm845_v1_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
"LowSVS",
"SVS_L1",
"NOM_L1",
"TURBO",
+ "",
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
"SVS",
"NOM",
"TURBO_L2",
"",
+ "",
+ },
+};
+
+static const char * const
+cprh_sdm845_v2_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ "LowSVS",
+ "SVS_L1",
+ "NOM",
+ "TURBO",
+ "",
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ "LowSVS",
+ "SVS",
+ "NOM_L1",
+ "TURBO_L2",
+ "BINNING",
},
};
@@ -334,7 +352,7 @@
* different fuse rows.
*/
static const struct cpr3_fuse_param
-sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v1_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{66, 52, 55}, {} },
@@ -359,7 +377,34 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ [CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+ {{66, 52, 55}, {} },
+ {{66, 48, 51}, {} },
+ {{66, 44, 47}, {} },
+ {{66, 40, 43}, {} },
+ },
+ [CPRH_KBSS_L3_THREAD_ID] = {
+ {{66, 52, 55}, {} },
+ {{66, 48, 51}, {} },
+ {{66, 44, 47}, {} },
+ {{66, 40, 43}, {} },
+ },
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{73, 5, 8}, {} },
+ {{70, 12, 15}, {} },
+ {{70, 8, 11}, {} },
+ {{70, 4, 7}, {} },
+ {{70, 0, 3}, {} },
+ },
+ },
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{67, 10, 15}, {} },
@@ -384,7 +429,34 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ [CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+ {{67, 10, 15}, {} },
+ {{67, 4, 9}, {} },
+ {{66, 62, 63}, {67, 0, 3}, {} },
+ {{66, 56, 61}, {} },
+ },
+ [CPRH_KBSS_L3_THREAD_ID] = {
+ {{68, 50, 55}, {} },
+ {{68, 44, 49}, {} },
+ {{68, 38, 43}, {} },
+ {{68, 32, 37}, {} },
+ },
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{72, 10, 15}, {} },
+ {{70, 34, 39}, {} },
+ {{70, 28, 33}, {} },
+ {{70, 22, 27}, {} },
+ {{70, 16, 21}, {} },
+ },
+ },
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{67, 52, 63}, {} },
@@ -409,7 +481,34 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+sdm845_v2_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ [CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+ {{67, 52, 63}, {} },
+ {{67, 40, 51}, {} },
+ {{67, 28, 39}, {} },
+ {{67, 16, 27}, {} },
+ },
+ [CPRH_KBSS_L3_THREAD_ID] = {
+ {{69, 28, 39}, {} },
+ {{69, 16, 27}, {} },
+ {{69, 4, 15}, {} },
+ {{68, 56, 63}, {69, 0, 3}, {} },
+ },
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{72, 16, 27}, {} },
+ {{71, 12, 23}, {} },
+ {{71, 0, 11}, {} },
+ {{70, 52, 63}, {} },
+ {{70, 40, 51}, {} },
+ },
+ },
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
{{} },
@@ -433,6 +532,33 @@
},
};
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ [CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{68, 16, 23}, {} },
+ {{68, 8, 15}, {} },
+ {{68, 0, 7}, {} },
+ },
+ [CPRH_KBSS_L3_THREAD_ID] = {
+ {{} },
+ {{69, 56, 63}, {} },
+ {{69, 48, 55}, {} },
+ {{69, 40, 47}, {} },
+ },
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{72, 28, 35}, {} },
+ {{71, 40, 47}, {} },
+ {{71, 32, 39}, {} },
+ {{71, 24, 31}, {} },
+ },
+ },
+};
+
static const struct cpr3_fuse_param msm8998_cpr_fusing_rev_param[] = {
{39, 51, 53},
{},
@@ -443,11 +569,16 @@
{},
};
-static const struct cpr3_fuse_param sdm845_cpr_fusing_rev_param[] = {
+static const struct cpr3_fuse_param sdm845_v1_cpr_fusing_rev_param[] = {
{73, 3, 5},
{},
};
+static const struct cpr3_fuse_param sdm845_v2_cpr_fusing_rev_param[] = {
+ {75, 34, 36},
+ {},
+};
+
static const struct cpr3_fuse_param kbss_speed_bin_param[] = {
{38, 29, 31},
{},
@@ -490,7 +621,7 @@
};
static const struct cpr3_fuse_param
-sdm845_kbss_aging_init_quot_diff_param[2][2] = {
+sdm845_v1_kbss_aging_init_quot_diff_param[2][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{68, 21, 28},
{},
@@ -501,6 +632,18 @@
},
};
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_aging_init_quot_diff_param[2][2] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ {68, 24, 31},
+ {},
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ {71, 48, 55},
+ {},
+ },
+};
+
/*
* Open loop voltage fuse reference voltages in microvolts for MSM8998 v1
*/
@@ -556,7 +699,7 @@
* Open loop voltage fuse reference voltages in microvolts for SDM845
*/
static const int
-sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+sdm845_v1_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
688000,
@@ -580,6 +723,33 @@
},
};
+static const int
+sdm845_v2_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ [CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+ 688000,
+ 812000,
+ 828000,
+ 952000,
+ },
+ [CPRH_KBSS_L3_THREAD_ID] = {
+ 688000,
+ 812000,
+ 828000,
+ 952000,
+ },
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ 688000,
+ 812000,
+ 884000,
+ 1000000,
+ 1000000,
+ },
+ },
+};
+
#define CPRH_KBSS_FUSE_STEP_VOLT 10000
#define CPRH_SDM845_KBSS_FUSE_STEP_VOLT 8000
#define CPRH_KBSS_VOLTAGE_FUSE_SIZE 6
@@ -880,9 +1050,11 @@
struct cprh_kbss_fuses *fuse)
{
void __iomem *base = vreg->thread->ctrl->fuse_base;
+ bool is_v1 = (vreg->thread->ctrl->soc_revision == SDM845_V1_SOC_ID);
int i, cid, tid, rc;
- rc = cpr3_read_fuse_param(base, sdm845_cpr_fusing_rev_param,
+ rc = cpr3_read_fuse_param(base, is_v1 ? sdm845_v1_cpr_fusing_rev_param
+ : sdm845_v2_cpr_fusing_rev_param,
&fuse->cpr_fusing_rev);
if (rc) {
cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
@@ -895,8 +1067,9 @@
cid = vreg->thread->ctrl->ctrl_id;
for (i = 0; i < vreg->fuse_corner_count; i++) {
- rc = cpr3_read_fuse_param(base,
- sdm845_kbss_init_voltage_param[cid][tid][i],
+ rc = cpr3_read_fuse_param(base, is_v1 ?
+ sdm845_v1_kbss_init_voltage_param[cid][tid][i] :
+ sdm845_v2_kbss_init_voltage_param[cid][tid][i],
&fuse->init_voltage[i]);
if (rc) {
cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
@@ -904,8 +1077,9 @@
return rc;
}
- rc = cpr3_read_fuse_param(base,
- sdm845_kbss_target_quot_param[cid][tid][i],
+ rc = cpr3_read_fuse_param(base, is_v1 ?
+ sdm845_v1_kbss_target_quot_param[cid][tid][i] :
+ sdm845_v2_kbss_target_quot_param[cid][tid][i],
&fuse->target_quot[i]);
if (rc) {
cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
@@ -913,8 +1087,9 @@
return rc;
}
- rc = cpr3_read_fuse_param(base,
- sdm845_kbss_ro_sel_param[cid][tid][i],
+ rc = cpr3_read_fuse_param(base, is_v1 ?
+ sdm845_v1_kbss_ro_sel_param[cid][tid][i] :
+ sdm845_v2_kbss_ro_sel_param[cid][tid][i],
&fuse->ro_sel[i]);
if (rc) {
cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
@@ -922,8 +1097,9 @@
return rc;
}
- rc = cpr3_read_fuse_param(base,
- sdm845_kbss_quot_offset_param[cid][tid][i],
+ rc = cpr3_read_fuse_param(base, is_v1 ?
+ sdm845_v1_kbss_quot_offset_param[cid][tid][i] :
+ sdm845_v2_kbss_quot_offset_param[cid][tid][i],
&fuse->quot_offset[i]);
if (rc) {
cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
@@ -932,8 +1108,9 @@
}
}
- rc = cpr3_read_fuse_param(base,
- sdm845_kbss_aging_init_quot_diff_param[cid],
+ rc = cpr3_read_fuse_param(base, is_v1 ?
+ sdm845_v1_kbss_aging_init_quot_diff_param[cid] :
+ sdm845_v2_kbss_aging_init_quot_diff_param[cid],
&fuse->aging_init_quot_diff);
if (rc) {
cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
@@ -994,11 +1171,16 @@
fuse_corners = MSM8998_KBSS_FUSE_CORNERS;
break;
case SDM845_V1_SOC_ID:
+ fuse_corners = vreg->thread->ctrl->ctrl_id
+ == CPRH_KBSS_POWER_CLUSTER_ID
+ ? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
+ : SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS;
+ break;
case SDM845_V2_SOC_ID:
fuse_corners = vreg->thread->ctrl->ctrl_id
== CPRH_KBSS_POWER_CLUSTER_ID
? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
- : SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS;
+ : SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS;
break;
default:
cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1156,10 +1338,14 @@
corner_name = cprh_msm8998_kbss_fuse_corner_name;
break;
case SDM845_V1_SOC_ID:
+ tid = cprh_kbss_get_thread_id(vreg->thread);
+ ref_volt = sdm845_v1_kbss_fuse_ref_volt[id][tid];
+ corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[id];
+ break;
case SDM845_V2_SOC_ID:
tid = cprh_kbss_get_thread_id(vreg->thread);
- ref_volt = sdm845_kbss_fuse_ref_volt[id][tid];
- corner_name = cprh_sdm845_kbss_fuse_corner_name[id];
+ ref_volt = sdm845_v2_kbss_fuse_ref_volt[id][tid];
+ corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[id];
break;
default:
cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1744,8 +1930,13 @@
CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1;
break;
case SDM845_V1_SOC_ID:
+ corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[
+ vreg->thread->ctrl->ctrl_id];
+ lowest_fuse_corner = 0;
+ highest_fuse_corner = vreg->fuse_corner_count - 1;
+ break;
case SDM845_V2_SOC_ID:
- corner_name = cprh_sdm845_kbss_fuse_corner_name[
+ corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[
vreg->thread->ctrl->ctrl_id];
lowest_fuse_corner = 0;
highest_fuse_corner = vreg->fuse_corner_count - 1;
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 4f5f86c..562b05a 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -433,6 +433,7 @@
bool sleep_set_differs = aggr_vreg->sleep_request_sent;
bool wait_for_ack = aggr_vreg->always_wait_for_ack
|| aggr_vreg->next_wait_for_ack;
+ bool resend_active = false;
int i, j, max_reg_index, rc;
enum rpmh_state state;
u32 sent_mask;
@@ -471,6 +472,12 @@
if ((req_active.reg[i] != req_sleep.reg[i])
&& (req_sleep.valid & BIT(i))) {
sleep_set_differs = true;
+ /*
+ * Resend full active set request so that
+ * all parameters are specified in the wake-only
+ * state request.
+ */
+ resend_active = !aggr_vreg->use_awake_state;
break;
}
}
@@ -523,7 +530,7 @@
if ((req_active.valid & BIT(i))
&& (!(aggr_vreg->aggr_req_active.valid & BIT(i))
|| aggr_vreg->aggr_req_active.reg[i]
- != req_active.reg[i])) {
+ != req_active.reg[i] || resend_active)) {
cmd[j].addr = aggr_vreg->addr + i * 4;
cmd[j].data = req_active.reg[i];
j++;
@@ -1187,6 +1194,9 @@
aggr_vreg->vreg_count = 0;
for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+ /* Skip child nodes handled by other drivers. */
+ if (of_find_property(node, "compatible", NULL))
+ continue;
aggr_vreg->vreg_count++;
}
@@ -1202,6 +1212,10 @@
i = 0;
for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+ /* Skip child nodes handled by other drivers. */
+ if (of_find_property(node, "compatible", NULL))
+ continue;
+
aggr_vreg->vreg[i].of_node = node;
aggr_vreg->vreg[i].aggr_vreg = aggr_vreg;
@@ -1623,6 +1637,7 @@
mutex_unlock(&aggr_vreg->lock);
}
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
platform_set_drvdata(pdev, aggr_vreg);
aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n",
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index caf174f..6dbf3cf 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -156,19 +156,19 @@
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
- TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)),
- TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)),
+ TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+ TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
};
-static int tps65086_of_parse_cb(struct device_node *dev,
+static int tps65086_of_parse_cb(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
int ret;
/* Check for 25mV step mode */
- if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
+ if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
switch (desc->id) {
case BUCK1:
case BUCK2:
@@ -192,7 +192,7 @@
}
/* Check for decay mode */
- if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) {
+ if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
ret = regmap_write_bits(config->regmap,
regulators[desc->id].decay_reg,
regulators[desc->id].decay_mask,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bf..7b696d1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 49b4c79..2d4f4b5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5951,18 +5951,25 @@
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8e63a7b..91ec068 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1555,7 +1555,8 @@
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e4..bddaabb 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2487,6 +2487,10 @@
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index feab7ea..91f5f55 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3067,7 +3067,7 @@
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
@@ -6463,12 +6463,29 @@
if (!vha->flags.online)
return;
- while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+ fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
- ha_locked);
+ if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+ /*
+ * This packet is corrupted. The header + payload
+ * can not be trusted. There is no point in passing
+ * it further up.
+ */
+ ql_log(ql_log_warn, vha, 0xffff,
+ "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+ pkt->u.isp24.fcp_hdr.s_id,
+ be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+ le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+ adjust_corrupted_atio(pkt);
+ qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ } else {
+ qlt_24xx_atio_pkt_all_vps(vha,
+ (struct atio_from_isp *)pkt, ha_locked);
+ }
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f6..0824a81 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@
struct {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN 0x38
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
} __packed;
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+ if (atio->entry_type == ATIO_TYPE7 &&
+ (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
+ return 1;
+ else
+ return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+ atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+ atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a06069b..1d57b34 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -384,11 +384,12 @@
= container_of(kref, struct scsi_target, reap_ref);
/*
- * if we get here and the target is still in the CREATED state that
+ * if we get here and the target is still in a CREATED state that
* means it was allocated but never made visible (because a scan
* turned up no LUNs), so don't call device_del() on it.
*/
- if (starget->state != STARGET_CREATED) {
+ if ((starget->state != STARGET_CREATED) &&
+ (starget->state != STARGET_CREATED_REMOVE)) {
transport_remove_device(&starget->dev);
device_del(&starget->dev);
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e90a8e1..f14d95e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1371,11 +1371,15 @@
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL ||
- starget->state == STARGET_REMOVE)
+ starget->state == STARGET_REMOVE ||
+ starget->state == STARGET_CREATED_REMOVE)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
- starget->state = STARGET_REMOVE;
+ if (starget->state == STARGET_CREATED)
+ starget->state = STARGET_CREATED_REMOVE;
+ else
+ starget->state = STARGET_REMOVE;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 967bb0d..b9290e7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2484,7 +2484,8 @@
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index bc2d2d4..557ca19 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -850,8 +850,8 @@
static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
{
int err = 0;
- int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ int buff_len = QUERY_DESC_DEVICE_DEF_SIZE;
+ u8 desc_buf[QUERY_DESC_DEVICE_DEF_SIZE];
struct ufs_hba *hba = (struct ufs_hba *)file->private;
struct desc_field_offset device_desc_field_name[] = {
@@ -1469,6 +1469,11 @@
hba->debugfs_files.err_occurred = true;
}
+void ufsdbg_clr_err_state(struct ufs_hba *hba)
+{
+ hba->debugfs_files.err_occurred = false;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_err_state,
ufsdbg_read_err_state,
ufsdbg_clear_err_state,
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
index 13848e8..8ae5eb4 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
char *str, void *priv);
void ufsdbg_set_err_state(struct ufs_hba *hba);
+void ufsdbg_clr_err_state(struct ufs_hba *hba);
#else
static inline void ufsdbg_add_debugfs(struct ufs_hba *hba)
{
@@ -52,6 +53,9 @@
void ufsdbg_set_err_state(struct ufs_hba *hba)
{
}
+void ufsdbg_clr_err_state(struct ufs_hba *hba)
+{
+}
#endif
#ifdef CONFIG_UFS_FAULT_INJECTION
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 3245fe1..0ae51b9 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -65,6 +65,10 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
#define UFS_UPIU_MAX_GENERAL_LUN 8
+#define UFS_MAX_WLUS 4
+#define UFS_MAX_LUS (UFS_UPIU_MAX_GENERAL_LUN + UFS_MAX_WLUS)
+
+#define QUERY_DESC_IDN_CONFIGURATION QUERY_DESC_IDN_CONFIGURAION
/* Well known logical unit id in LUN field of UPIU */
enum {
@@ -144,19 +148,13 @@
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
- /*
- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
- * of descriptor header.
- */
- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
- QUERY_DESC_POWER_MAX_SIZE = 0x62,
- QUERY_DESC_RFU_MAX_SIZE = 0x00,
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
};
/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
index 3210d60..da2bfd5 100644
--- a/drivers/scsi/ufs/ufs_quirks.c
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -51,7 +51,7 @@
void ufs_advertise_fixup_device(struct ufs_hba *hba)
{
int err;
- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1];
char *model;
struct ufs_card_fix *f;
@@ -59,13 +59,13 @@
if (!model)
goto out;
- memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+ memset(str_desc_buf, 0, QUERY_DESC_MAX_SIZE);
err = ufshcd_read_string_desc(hba, hba->dev_info.i_product_name,
- str_desc_buf, QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ str_desc_buf, QUERY_DESC_MAX_SIZE, ASCII_STD);
if (err)
goto out;
- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
strlcpy(model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
index d41871a..2e3997d 100644
--- a/drivers/scsi/ufs/ufs_test.c
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -603,8 +603,8 @@
struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
struct scsi_device *sdev;
struct ufs_hba *hba;
- int buff_len = QUERY_DESC_UNIT_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE];
+ int buff_len = QUERY_DESC_UNIT_DEF_SIZE;
+ u8 desc_buf[QUERY_DESC_UNIT_DEF_SIZE];
bool flag;
u32 att;
int ret = 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 59222ea..c132dbc 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -251,19 +251,6 @@
16, 4, buf, len, false);
}
-static u32 ufs_query_desc_max_size[] = {
- QUERY_DESC_DEVICE_MAX_SIZE,
- QUERY_DESC_CONFIGURAION_MAX_SIZE,
- QUERY_DESC_UNIT_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_INTERCONNECT_MAX_SIZE,
- QUERY_DESC_STRING_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE,
- QUERY_DESC_POWER_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
-};
-
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -344,6 +331,26 @@
return ufs_pm_lvl_states[lvl].link_state;
}
+static inline void ufshcd_set_card_online(struct ufs_hba *hba)
+{
+ atomic_set(&hba->card_state, UFS_CARD_STATE_ONLINE);
+}
+
+static inline void ufshcd_set_card_offline(struct ufs_hba *hba)
+{
+ atomic_set(&hba->card_state, UFS_CARD_STATE_OFFLINE);
+}
+
+static inline bool ufshcd_is_card_online(struct ufs_hba *hba)
+{
+ return (atomic_read(&hba->card_state) == UFS_CARD_STATE_ONLINE);
+}
+
+static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
+{
+ return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
+}
+
static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
enum uic_link_state link_state)
@@ -397,6 +404,7 @@
unsigned long *freq, u32 flags);
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat);
+static void __ufshcd_shutdown_clkscaling(struct ufs_hba *hba);
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
@@ -1500,7 +1508,7 @@
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.ungating_workq,
+ queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
@@ -1768,7 +1776,8 @@
struct ufs_hba *hba = container_of(timer, struct ufs_hba,
clk_gating.gate_hrtimer);
- schedule_work(&hba->clk_gating.gate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work);
return HRTIMER_NORESTART;
}
@@ -1776,7 +1785,7 @@
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
- char wq_name[sizeof("ufs_clk_ungating_00")];
+ char wq_name[sizeof("ufs_clk_gating_00")];
hba->clk_gating.state = CLKS_ON;
@@ -1805,9 +1814,10 @@
hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
- hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.clk_gating_workq =
+ create_singlethread_workqueue(wq_name);
gating->is_enabled = true;
@@ -1871,7 +1881,7 @@
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- destroy_workqueue(hba->clk_gating.ungating_workq);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -2994,6 +3004,12 @@
has_read_lock = true;
}
+ /*
+ * err might be non-zero here but logic later in this function
+ * assumes that err is set to 0.
+ */
+ err = 0;
+
spin_lock_irqsave(hba->host->host_lock, flags);
/* if error handling is in progress, return host busy */
@@ -3002,6 +3018,12 @@
goto out_unlock;
}
+ if (hba->extcon && ufshcd_is_card_offline(hba)) {
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
+
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -3628,7 +3650,7 @@
goto out;
}
- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
err = -EINVAL;
@@ -3708,6 +3730,92 @@
EXPORT_SYMBOL(ufshcd_query_descriptor);
/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3721,37 +3829,45 @@
static int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u32 param_offset,
+ u8 param_offset,
u8 *param_read_buf,
- u32 param_size)
+ u8 param_size)
{
int ret;
u8 *desc_buf;
- u32 buff_len;
+ int buff_len;
bool is_kmalloc = true;
- /* safety checks */
- if (desc_id >= QUERY_DESC_IDN_MAX)
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- buff_len = ufs_query_desc_max_size[desc_id];
- if ((param_offset + param_size) > buff_len)
- return -EINVAL;
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!param_offset && (param_size == buff_len)) {
- /* memory space already available to hold full descriptor */
- desc_buf = param_read_buf;
- is_kmalloc = false;
- } else {
- /* allocate memory to hold full descriptor */
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
}
+ /* Request for full descriptor */
ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
@@ -3768,25 +3884,9 @@
goto out;
}
- /*
- * While reading variable size descriptors (like string descriptor),
- * some UFS devices may report the "LENGTH" (field in "Transaction
- * Specific fields" of Query Response UPIU) same as what was requested
- * in Query Request UPIU instead of reporting the actual size of the
- * variable size descriptor.
- * Although it's safe to ignore the "LENGTH" field for variable size
- * descriptors as we can always derive the length of the descriptor from
- * the descriptor header fields. Hence this change impose the length
- * match check only for fixed size descriptors (for which we always
- * request the correct size as part of Query Request UPIU).
- */
- if ((desc_id != QUERY_DESC_IDN_STRING) &&
- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -5085,6 +5185,14 @@
out:
if (ret)
dev_err(hba->dev, "link startup failed %d\n", ret);
+ /*
+ * For some external cards, link startup succeeds only after few link
+ * startup attempts and err_state may get set in this case.
+ * But as the link startup has finally succeded, we are clearing the
+ * error state.
+ */
+ else if (hba->extcon)
+ ufsdbg_clr_err_state(hba);
return ret;
}
@@ -5529,8 +5637,15 @@
__func__, (intr_status & UIC_HIBERNATE_ENTER) ?
"Enter" : "Exit",
intr_status, ufshcd_get_upmcrs(hba));
- __ufshcd_print_host_regs(hba, true);
- ufshcd_print_host_state(hba);
+ /*
+ * It is possible to see auto-h8 errors during card
+ * removal, so set this flag and let the error handler
+ * decide if this error is seen while card was present
+ * or due to card removal.
+ * If error is seen during card removal, we don't want
+ * to printout the debug messages.
+ */
+ hba->auto_h8_err = true;
schedule_work(&hba->eh_work);
retval = IRQ_HANDLED;
}
@@ -6130,6 +6245,32 @@
hba = container_of(work, struct ufs_hba, eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->extcon) {
+ if (ufshcd_is_card_online(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * TODO: need better way to ensure that this delay is
+ * more than extcon's debounce-ms
+ */
+ msleep(300);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
+
+ /*
+ * ignore error if card was online and offline/removed now or
+ * card was already offline.
+ */
+ if (ufshcd_is_card_offline(hba)) {
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ hba->saved_ce_err = 0;
+ hba->auto_h8_err = false;
+ hba->force_host_reset = false;
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ goto out;
+ }
+ }
+
ufsdbg_set_err_state(hba);
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
@@ -6174,7 +6315,8 @@
* Dump controller state before resetting. Transfer requests state
* will be dump as part of the request completion.
*/
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ if ((hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) ||
+ hba->auto_h8_err) {
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
if (!hba->silence_err_logs) {
@@ -6187,6 +6329,7 @@
ufshcd_print_cmd_log(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
}
+ hba->auto_h8_err = false;
}
if ((hba->saved_err & INT_FATAL_ERRORS)
@@ -6425,7 +6568,10 @@
queue_eh_work = true;
}
- if (queue_eh_work) {
+ if (hba->extcon && ufshcd_is_card_offline(hba)) {
+ /* ignore UIC errors if card is offline */
+ retval |= IRQ_HANDLED;
+ } else if (queue_eh_work) {
/*
* update the transfer error masks to sticky bits, let's do this
* irrespective of current ufshcd_state.
@@ -7170,10 +7316,19 @@
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 *desc_buf = NULL;
u32 icc_level;
+ if (buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf) {
+ dev_err(hba->dev,
+ "%s: Failed to allocate desc_buf\n", __func__);
+ return;
+ }
+ }
+
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
dev_err(hba->dev,
@@ -7554,9 +7709,18 @@
static int ufs_read_device_desc_data(struct ufs_hba *hba)
{
int err;
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ u8 *desc_buf = NULL;
- err = ufshcd_read_device_desc(hba, desc_buf, sizeof(desc_buf));
+ if (hba->desc_size.dev_desc) {
+ desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL);
+ if (!desc_buf) {
+ err = -ENOMEM;
+ dev_err(hba->dev,
+ "%s: Failed to allocate desc_buf\n", __func__);
+ return err;
+ }
+ }
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err)
return err;
@@ -7574,6 +7738,51 @@
return 0;
}
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -7614,6 +7823,8 @@
if (ret)
goto out;
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
ufs_advertise_fixup_device(hba);
ufshcd_tune_unipro_params(hba);
@@ -7703,6 +7914,18 @@
ufshcd_set_auto_hibern8_timer(hba,
hba->hibern8_on_idle.delay_ms);
out:
+ if (ret) {
+ ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_link_off(hba);
+ if (hba->extcon) {
+ if (!ufshcd_is_card_online(hba))
+ ufsdbg_clr_err_state(hba);
+ ufshcd_set_card_offline(hba);
+ }
+ } else if (hba->extcon) {
+ ufshcd_set_card_online(hba);
+ }
+
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
@@ -7716,22 +7939,60 @@
return ret;
}
+static void ufshcd_remove_device(struct ufs_hba *hba)
+{
+ struct scsi_device *sdev;
+ struct scsi_device *sdev_cache[UFS_MAX_LUS];
+ int sdev_count = 0, i;
+ unsigned long flags;
+
+ ufshcd_hold_all(hba);
+ /* Reset the host controller */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = true;
+ ufshcd_hba_stop(hba, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_link_off(hba);
+ __ufshcd_shutdown_clkscaling(hba);
+
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
+
+ /* remove all scsi devices */
+ list_for_each_entry(sdev, &hba->host->__devices, siblings) {
+ if (sdev_count < UFS_MAX_LUS) {
+ sdev_cache[sdev_count] = sdev;
+ sdev_count++;
+ }
+ }
+
+ for (i = 0; i < sdev_count; i++)
+ scsi_remove_device(sdev_cache[i]);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ufshcd_release_all(hba);
+}
+
static void ufshcd_card_detect_handler(struct work_struct *work)
{
struct ufs_hba *hba;
hba = container_of(work, struct ufs_hba, card_detect_work);
- if (hba->card_detect_event &&
- (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
- dev_dbg(hba->dev, "%s: card detect notification received\n",
- __func__);
+
+ if (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) {
pm_runtime_get_sync(hba->dev);
ufshcd_detect_device(hba);
+ /* ufshcd_probe_hba() calls pm_runtime_put_sync() on exit */
+ } else if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_remove_device(hba);
pm_runtime_put_sync(hba->dev);
- } else {
- dev_dbg(hba->dev, "%s: card removed notification received\n",
- __func__);
- /* TODO: remove the scsi device instances */
+ ufsdbg_clr_err_state(hba);
}
}
@@ -7740,9 +8001,23 @@
{
struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
- hba->card_detect_event = event;
- schedule_work(&hba->card_detect_work);
+ if (event)
+ ufshcd_set_card_online(hba);
+ else
+ ufshcd_set_card_offline(hba);
+ if (ufshcd_is_card_offline(hba) && !hba->sdev_ufs_device)
+ goto out;
+
+ /*
+ * card insertion/removal are very infrequent events and having this
+ * message helps if there is some issue with card detection/removal.
+ */
+ dev_info(hba->dev, "%s: card %s notification rcvd\n",
+ __func__, ufshcd_is_card_online(hba) ? "inserted" : "removed");
+
+ schedule_work(&hba->card_detect_work);
+out:
return NOTIFY_DONE;
}
@@ -9021,7 +9296,9 @@
if (ret)
goto disable_vreg;
- if (ufshcd_is_link_off(hba))
+ if (hba->extcon &&
+ (ufshcd_is_card_offline(hba) ||
+ (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
goto skip_dev_ops;
if (ufshcd_is_link_hibern8(hba)) {
@@ -9380,7 +9657,7 @@
ufshcd_add_spm_lvl_sysfs_nodes(hba);
}
-static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+static void __ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
{
bool suspend = false;
unsigned long flags;
@@ -9397,7 +9674,6 @@
* doesn't race with shutdown
*/
if (ufshcd_is_clkscaling_supported(hba)) {
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
if (suspend)
@@ -9405,8 +9681,16 @@
}
/* Unregister so that devfreq_monitor can't race with shutdown */
- if (hba->devfreq)
+ if (hba->devfreq) {
devfreq_remove_device(hba->devfreq);
+ hba->devfreq = NULL;
+ }
+}
+
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+ __ufshcd_shutdown_clkscaling(hba);
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
}
/**
@@ -9741,6 +10025,9 @@
{
int ret = 0;
+ if (hba->extcon && ufshcd_is_card_offline(hba))
+ return 0;
+
/* let's not get into low power until clock scaling is completed */
hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
ufshcd_hold_all(hba);
@@ -10075,6 +10362,9 @@
ufshcd_init_lanes_per_dir(hba);
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a485885..fc855db 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -249,6 +249,15 @@
struct ufs_query query;
};
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+};
+
/**
* struct ufs_clk_info - UFS clock related info
* @list: list headed by hba->clk_list_head
@@ -437,7 +446,7 @@
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
- struct workqueue_struct *ungating_workq;
+ struct workqueue_struct *clk_gating_workq;
};
/* Hibern8 state */
@@ -679,6 +688,13 @@
u32 seq_num;
};
+/* UFS card state - hotplug state */
+enum ufshcd_card_state {
+ UFS_CARD_STATE_UNKNOWN = 0,
+ UFS_CARD_STATE_ONLINE = 1,
+ UFS_CARD_STATE_OFFLINE = 2,
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -728,7 +744,7 @@
* @extcon: pointer to external connector device
* @card_detect_nb: card detector notifier registered with @extcon
* @card_detect_work: work to exectute the card detect function
- * @card_detect_event: card detect event, 0 = removed, 1 = inserted
+ * @card_state: card state event, enum ufshcd_card_state defines possible states
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
@@ -738,6 +754,7 @@
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
* @scsi_block_reqs_cnt: reference counting for scsi block requests
+ * @desc_size: descriptor sizes reported by device
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -865,6 +882,7 @@
u32 saved_ce_err;
bool silence_err_logs;
bool force_host_reset;
+ bool auto_h8_err;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -904,7 +922,7 @@
struct extcon_dev *extcon;
struct notifier_block card_detect_nb;
struct work_struct card_detect_work;
- unsigned long card_detect_event;
+ atomic_t card_state;
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
@@ -967,6 +985,7 @@
int latency_hist_enabled;
struct io_latency_state io_lat_s;
+ struct ufs_desc_size desc_size;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
@@ -1208,6 +1227,10 @@
struct ufs_pa_layer_attr *pwr_mode);
void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
int result);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
void ufshcd_scsi_block_requests(struct ufs_hba *hba);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd0..c680d76 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -562,8 +564,15 @@
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index d738767..dfdbd8e 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -32,6 +32,7 @@
#define IMAGE_LOAD_CMD 1
#define IMAGE_UNLOAD_CMD 0
+#define SSR_RESET_CMD 1
#define CLASS_NAME "ssc"
#define DRV_NAME "sensors"
#define DRV_VERSION "2.00"
@@ -53,6 +54,10 @@
struct kobj_attribute *attr,
const char *buf, size_t count);
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
struct slpi_loader_private {
void *pil_h;
struct kobject *boot_slpi_obj;
@@ -62,8 +67,12 @@
static struct kobj_attribute slpi_boot_attribute =
__ATTR(boot, 0220, NULL, slpi_boot_store);
+static struct kobj_attribute slpi_ssr_attribute =
+ __ATTR(ssr, 0220, NULL, slpi_ssr_store);
+
static struct attribute *attrs[] = {
&slpi_boot_attribute.attr,
+ &slpi_ssr_attribute.attr,
NULL,
};
@@ -138,6 +147,44 @@
}
}
+static ssize_t slpi_ssr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ssr_cmd = 0;
+ struct subsys_device *sns_dev = NULL;
+ struct platform_device *pdev = slpi_private;
+ struct slpi_loader_private *priv = NULL;
+
+ pr_debug("%s: going to call slpi_ssr\n", __func__);
+
+ if (kstrtoint(buf, 10, &ssr_cmd) < 0)
+ return -EINVAL;
+
+ if (ssr_cmd != SSR_RESET_CMD)
+ return -EINVAL;
+
+ priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return -EINVAL;
+
+ sns_dev = (struct subsys_device *)priv->pil_h;
+ if (!sns_dev)
+ return -EINVAL;
+
+ dev_err(&pdev->dev, "Something went wrong with SLPI, restarting\n");
+
+ /* subsystem_restart_dev has worker queue to handle */
+ if (subsystem_restart_dev(sns_dev) != 0) {
+ dev_err(&pdev->dev, "subsystem_restart_dev failed\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "SLPI restarted\n");
+ return count;
+}
+
static ssize_t slpi_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index a72cb17..ca56462 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -9,11 +9,13 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <asm/dma-iommu.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -23,6 +25,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/of_slimbus.h>
#include <linux/timer.h>
#include <linux/msm-sps.h>
@@ -1665,6 +1668,43 @@
static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask);
+static const struct of_device_id ngd_slim_dt_match[] = {
+ {
+ .compatible = "qcom,slim-ngd",
+ },
+ {
+ .compatible = "qcom,iommu-slim-ctrl-cb",
+ },
+ {}
+};
+
+static int ngd_slim_iommu_probe(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct msm_slim_ctrl *ctrl_dev;
+
+ if (unlikely(!dev->parent)) {
+ dev_err(dev, "%s no parent for this device\n", __func__);
+ return -EINVAL;
+ }
+
+ pdev = to_platform_device(dev->parent);
+ if (!pdev) {
+ dev_err(dev, "%s Parent platform device not found\n", __func__);
+ return -EINVAL;
+ }
+
+ ctrl_dev = platform_get_drvdata(pdev);
+ if (!ctrl_dev) {
+ dev_err(dev, "%s NULL controller device\n", __func__);
+ return -EINVAL;
+
+ }
+ ctrl_dev->iommu_desc.cb_dev = dev;
+ SLIM_INFO(ctrl_dev, "NGD IOMMU initialization complete\n");
+ return 0;
+}
+
static int ngd_slim_probe(struct platform_device *pdev)
{
struct msm_slim_ctrl *dev;
@@ -1676,6 +1716,10 @@
bool slim_mdm = false;
const char *ext_modem_id = NULL;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "qcom,iommu-slim-ctrl-cb"))
+ return ngd_slim_iommu_probe(&pdev->dev);
+
slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"slimbus_physical");
if (!slim_mem) {
@@ -1774,6 +1818,17 @@
"qcom,slim-mdm", &ext_modem_id);
if (!ret)
slim_mdm = true;
+
+ dev->iommu_desc.s1_bypass = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,iommu-s1-bypass");
+ ret = of_platform_populate(pdev->dev.of_node, ngd_slim_dt_match,
+ NULL, &pdev->dev);
+ if (ret) {
+ dev_err(dev->dev, "%s: Failed to of_platform_populate %d\n",
+ __func__, ret);
+ goto err_ctrl_failed;
+ }
} else {
dev->ctrl.nr = pdev->id;
}
@@ -1920,6 +1975,10 @@
struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
ngd_slim_enable(dev, false);
+ if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
+ arm_iommu_detach_device(dev->iommu_desc.cb_dev);
+ arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
+ }
if (dev->sysfs_created)
sysfs_remove_file(&dev->dev->kobj,
&dev_attr_debug_mask.attr);
@@ -2015,8 +2074,13 @@
{
int ret = -EBUSY;
struct platform_device *pdev = to_platform_device(dev);
- struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+ struct msm_slim_ctrl *cdev;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "qcom,iommu-slim-ctrl-cb"))
+ return 0;
+
+ cdev = platform_get_drvdata(pdev);
if (!pm_runtime_enabled(dev) ||
(!pm_runtime_suspended(dev) &&
cdev->state == MSM_CTRL_IDLE)) {
@@ -2052,9 +2116,14 @@
static int ngd_slim_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+ struct msm_slim_ctrl *cdev;
int ret = 0;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "qcom,iommu-slim-ctrl-cb"))
+ return 0;
+
+ cdev = platform_get_drvdata(pdev);
/*
* If deferred response was requested for power-off and it failed,
* mark runtime-pm status as active to be consistent
@@ -2091,13 +2160,6 @@
)
};
-static const struct of_device_id ngd_slim_dt_match[] = {
- {
- .compatible = "qcom,slim-ngd",
- },
- {}
-};
-
static struct platform_driver ngd_slim_driver = {
.probe = ngd_slim_probe,
.remove = ngd_slim_remove,
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index ef10e64..01779f9 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -9,17 +9,21 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/pm_runtime.h>
-#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gcd.h>
+#include <linux/msm-sps.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/slimbus/slimbus.h>
-#include <linux/msm-sps.h>
-#include <linux/gcd.h>
#include "slim-msm.h"
/* Pipe Number Offset Mask */
#define P_OFF_MASK 0x3FC
+#define MSM_SLIM_VA_START (0x40000000)
+#define MSM_SLIM_VA_SIZE (0xC0000000)
int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
{
@@ -164,17 +168,64 @@
ep->sps = NULL;
}
+static int msm_slim_iommu_attach(struct msm_slim_ctrl *ctrl_dev)
+{
+ struct dma_iommu_mapping *iommu_map;
+ dma_addr_t va_start = MSM_SLIM_VA_START;
+ size_t va_size = MSM_SLIM_VA_SIZE;
+ int bypass = 1;
+ struct device *dev;
+
+ if (unlikely(!ctrl_dev))
+ return -EINVAL;
+
+ if (!ctrl_dev->iommu_desc.cb_dev)
+ return 0;
+
+ if (!IS_ERR_OR_NULL(ctrl_dev->iommu_desc.iommu_map))
+ return 0;
+
+ dev = ctrl_dev->iommu_desc.cb_dev;
+ iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+ va_start, va_size);
+ if (IS_ERR(iommu_map)) {
+ dev_err(dev, "%s iommu_create_mapping failure\n", __func__);
+ return PTR_ERR(iommu_map);
+ }
+
+ if (ctrl_dev->iommu_desc.s1_bypass) {
+ if (iommu_domain_set_attr(iommu_map->domain,
+ DOMAIN_ATTR_S1_BYPASS, &bypass)) {
+ dev_err(dev, "%s Can't bypass s1 translation\n",
+ __func__);
+ arm_iommu_release_mapping(iommu_map);
+ return -EIO;
+ }
+ }
+
+ if (arm_iommu_attach_device(dev, iommu_map)) {
+ dev_err(dev, "%s can't arm_iommu_attach_device\n", __func__);
+ arm_iommu_release_mapping(iommu_map);
+ return -EIO;
+ }
+ ctrl_dev->iommu_desc.iommu_map = iommu_map;
+ SLIM_INFO(ctrl_dev, "NGD IOMMU Attach complete\n");
+ return 0;
+}
+
int msm_slim_sps_mem_alloc(
struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
{
dma_addr_t phys;
+ struct device *dma_dev = dev->iommu_desc.cb_dev ?
+ dev->iommu_desc.cb_dev : dev->dev;
mem->size = len;
mem->min_size = 0;
- mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
+ mem->base = dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL);
if (!mem->base) {
- dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
+ dev_err(dma_dev, "dma_alloc_coherent(%d) failed\n", len);
return -ENOMEM;
}
@@ -387,6 +438,10 @@
if (pn >= dev->port_nums)
return -ENODEV;
+ ret = msm_slim_iommu_attach(dev);
+ if (ret)
+ return ret;
+
endpoint = &dev->pipes[pn];
ret = msm_slim_init_endpoint(dev, endpoint);
dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
@@ -435,9 +490,37 @@
return SLIM_P_INPROGRESS;
}
+static int msm_slim_iommu_map(struct msm_slim_ctrl *dev, phys_addr_t iobuf,
+ u32 len)
+{
+ int ret;
+
+ if (!dev->iommu_desc.cb_dev)
+ return 0;
+
+ ret = iommu_map(dev->iommu_desc.iommu_map->domain,
+ rounddown(iobuf, PAGE_SIZE),
+ rounddown(iobuf, PAGE_SIZE),
+ roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))),
+ PAGE_SIZE), IOMMU_READ | IOMMU_WRITE);
+ return ret;
+}
+
+static void msm_slim_iommu_unmap(struct msm_slim_ctrl *dev, phys_addr_t iobuf,
+ u32 len)
+{
+ if (!dev->iommu_desc.cb_dev)
+ return;
+
+ iommu_unmap(dev->iommu_desc.iommu_map->domain,
+ rounddown(iobuf, PAGE_SIZE),
+ roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))),
+ PAGE_SIZE));
+}
+
static void msm_slim_port_cb(struct sps_event_notify *ev)
{
-
+ struct msm_slim_ctrl *dev = ev->user;
struct completion *comp = ev->data.transfer.user;
struct sps_iovec *iovec = &ev->data.transfer.iovec;
@@ -450,6 +533,8 @@
pr_err("%s: ERR event %d\n",
__func__, ev->event_id);
}
+ if (dev)
+ msm_slim_iommu_unmap(dev, iovec->addr, iovec->size);
if (comp)
complete(comp);
}
@@ -467,14 +552,19 @@
if (!dev->pipes[pn].connected)
return -ENOTCONN;
+ ret = msm_slim_iommu_map(dev, iobuf, len);
+ if (ret)
+ return ret;
+
sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
sreg.mode = SPS_TRIGGER_WAIT;
sreg.xfer_done = NULL;
sreg.callback = msm_slim_port_cb;
- sreg.user = NULL;
+ sreg.user = dev;
ret = sps_register_event(dev->pipes[pn].sps, &sreg);
if (ret) {
dev_dbg(dev->dev, "sps register event error:%x\n", ret);
+ msm_slim_iommu_unmap(dev, iobuf, len);
return ret;
}
ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
@@ -490,6 +580,8 @@
PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
/* Make sure that port registers are updated before returning */
mb();
+ } else {
+ msm_slim_iommu_unmap(dev, iobuf, len);
}
return ret;
@@ -1102,6 +1194,12 @@
}
init_msgq:
+ ret = msm_slim_iommu_attach(dev);
+ if (ret) {
+ sps_deregister_bam_device(bam_handle);
+ return ret;
+ }
+
ret = msm_slim_init_rx_msgq(dev, pipe_reg);
if (ret)
dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
@@ -1204,6 +1302,13 @@
if (dev->pipes[i].connected)
msm_slim_disconn_pipe_port(dev, i);
}
+
+ if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
+ arm_iommu_detach_device(dev->iommu_desc.cb_dev);
+ arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
+ dev->iommu_desc.iommu_map = NULL;
+ }
+
if (dereg) {
for (i = 0; i < dev->port_nums; i++) {
if (dev->pipes[i].connected)
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index ee0f625..5859c5f 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -263,10 +263,17 @@
bool in_progress;
};
+struct msm_slim_iommu {
+ struct device *cb_dev;
+ struct dma_iommu_mapping *iommu_map;
+ bool s1_bypass;
+};
+
struct msm_slim_ctrl {
struct slim_controller ctrl;
struct slim_framer framer;
struct device *dev;
+ struct msm_slim_iommu iommu_desc;
void __iomem *base;
struct resource *slew_mem;
struct resource *bam_mem;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 8317c09..38eff96 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -506,6 +506,7 @@
config ICNSS
tristate "Platform driver for Q6 integrated connectivity"
+ select CNSS_UTILS
---help---
This module adds support for Q6 integrated WLAN connectivity
subsystem. This module is responsible for communicating WLAN on/off
@@ -545,6 +546,38 @@
used by audio driver to configure QDSP6v2's
ASM, ADM and AFE.
+config MSM_QDSP6_SSR
+ bool "Audio QDSP6 SSR support"
+ depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Subsystem Restart. Reset audio
+ clients when the ADSP subsystem is
+ restarted. Subsystem Restart for audio
+ is only used for processes on the ADSP
+ and signals audio drivers through APR.
+
+
+config MSM_QDSP6_PDR
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
+ help
+ Enable Protection Domain Restart. Reset
+ audio clients when a process on the ADSP
+ is restarted. PDR for audio is only used
+ for processes on the ADSP and signals
+ audio drivers through APR.
+
+config MSM_QDSP6_NOTIFIER
+ bool "Audio QDSP6 PDR support"
+ depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR
+ help
+ Enable notifier which decides whether
+ to use SSR or PDR and notifies all
+ audio clients of the event. Both SSR
+ and PDR are recovery methods when
+ there is a crash on ADSP. Audio drivers
+ are contacted by ADSP through APR.
+
config MSM_ADSP_LOADER
tristate "ADSP loader support"
select SND_SOC_MSM_APRV2_INTF
@@ -599,6 +632,14 @@
between MSM and WCD DSP over glink transport protocol. This driver
provides read and write interface via char device.
+config QCOM_SMCINVOKE
+ bool "Secure QSEE Support"
+ help
+ Enable SMCInvoke driver which supports capability based secure
+ communication between QTI Secure Execution Environment (QSEE)
+ and high level operating system. It exposes APIs for both
+ userspace and kernel clients.
+
config MSM_EVENT_TIMER
bool "Event timer"
help
@@ -701,3 +742,20 @@
This forces a watchdog bite when the device restarts due to a
kernel panic. On certain MSM SoCs, this provides us
additional debugging information.
+
+config QMP_DEBUGFS_CLIENT
+ bool "Debugfs Client to communicate with AOP using QMP protocol"
+ depends on DEBUG_FS
+ default n
+ help
+ This options enables a driver which allows clients to send messages
+ to Alway On processor using QMP transport.
+
+source "drivers/soc/qcom/memshare/Kconfig"
+
+config QSEE_IPC_IRQ_BRIDGE
+ tristate "QSEE IPC Interrupt Bridge"
+ help
+ This module enables bridging an Inter-Processor Communication(IPC)
+ interrupt from a remote subsystem directed towards Qualcomm
+ Technologies, Inc. Secure Execution Environment(QSEE).
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index dc303e2..2d7d62a 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -56,6 +56,7 @@
obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/
obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
@@ -76,3 +77,6 @@
obj-$(CONFIG_APSS_CORE_EA) += msm-core.o debug_core.o
obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
+obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
+obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 5156bc1..641f70e 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -38,6 +38,7 @@
int ret;
u32 resp;
struct scm_desc desc;
+ u64 bytes_received;
data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
desc.args[0] = (unsigned long) data.out_buf;
@@ -46,19 +47,28 @@
dmac_flush_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE);
- if (!is_scm_armv8())
+ if (!is_scm_armv8()) {
ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
sizeof(data), &resp, sizeof(resp),
common_scm_buf,
SCM_BUFFER_SIZE(common_scm_buf));
- else
+ bytes_received = resp;
+ } else {
ret = scm_call2(SCM_SIP_FNID(TZ_SVC_CRYPTO, PRNG_CMD_ID),
&desc);
-
+ bytes_received = desc.ret[0];
+ }
if (!ret) {
+ if (bytes_received != SZ_512)
+ pr_warn("Did not receive the expected number of bytes from PRNG: %llu\n",
+ bytes_received);
+
dmac_inv_range(random_buffer, random_buffer +
RANDOM_BUFFER_SIZE);
- add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3);
+ bytes_received = (bytes_received <= RANDOM_BUFFER_SIZE) ?
+ bytes_received : RANDOM_BUFFER_SIZE;
+ add_hwgenerator_randomness(random_buffer, bytes_received,
+ bytes_received << 3);
}
}
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index fd4c604..1d605e3 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,7 +30,6 @@
#include "glink_private.h"
#include "glink_xprt_if.h"
-#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
/* Number of internal IPC Logging log pages */
#define NUM_LOG_PAGES 10
#define GLINK_PM_QOS_HOLDOFF_MS 10
@@ -40,7 +39,6 @@
#define GLINK_KTHREAD_PRIO 1
-static rwlock_t magic_lock;
/**
* struct glink_qos_priority_bin - Packet Scheduler's priority bucket
* @max_rate_kBps: Maximum rate supported by the priority bucket.
@@ -232,6 +230,8 @@
* @req_rate_kBps: Current QoS request by the channel.
* @tx_intent_cnt: Intent count to transmit soon in future.
* @tx_cnt: Packets to be picked by tx scheduler.
+ * @rt_vote_on: Number of times RT vote on is called.
+ * @rt_vote_off: Number of times RT vote off is called.
*/
struct channel_ctx {
struct rwref_lock ch_state_lhb2;
@@ -312,7 +312,9 @@
unsigned long req_rate_kBps;
uint32_t tx_intent_cnt;
uint32_t tx_cnt;
- uint32_t magic_number;
+
+ uint32_t rt_vote_on;
+ uint32_t rt_vote_off;
};
static struct glink_core_if core_impl;
@@ -443,33 +445,15 @@
static int glink_get_ch_ctx(struct channel_ctx *ctx)
{
- unsigned long flags;
-
if (!ctx)
return -EINVAL;
- read_lock_irqsave(&magic_lock, flags);
- if (ctx->magic_number != GLINK_CTX_CANARY) {
- read_unlock_irqrestore(&magic_lock, flags);
- return -EINVAL;
- }
rwref_get(&ctx->ch_state_lhb2);
- read_unlock_irqrestore(&magic_lock, flags);
return 0;
}
-static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+static void glink_put_ch_ctx(struct channel_ctx *ctx)
{
- unsigned long flags;
-
- if (!update_magic) {
- rwref_put(&ctx->ch_state_lhb2);
- return 0;
- }
- write_lock_irqsave(&magic_lock, flags);
- ctx->magic_number = 0;
rwref_put(&ctx->ch_state_lhb2);
- write_unlock_irqrestore(&magic_lock, flags);
- return 0;
}
/**
@@ -1930,13 +1914,14 @@
kfree(flcid);
}
+ ctx->transport_ptr = xprt_ctx;
+ rwref_get(&ctx->ch_state_lhb2);
list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
"%s: local:GLINK_CHANNEL_CLOSED\n",
__func__);
}
- rwref_get(&ctx->ch_state_lhb2);
spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
@@ -2418,6 +2403,25 @@
}
/**
+ * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
+ * @if_ptr: The transport to transmit on.
+
+ */
+static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
+ * @if_ptr: The transport to transmit on.
+ */
+static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* notif_if_up_all_xprts() - Check and notify existing transport state if up
* @notif_info: Data structure containing transport information to be notified.
*
@@ -2599,7 +2603,6 @@
ctx->notify_tx_abort = cfg->notify_tx_abort;
ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
- ctx->magic_number = GLINK_CTX_CANARY;
if (!ctx->notify_rx_intent_req)
ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2616,7 +2619,6 @@
ctx->local_xprt_req = best_id;
ctx->no_migrate = cfg->transport &&
!(cfg->options & GLINK_OPT_INITIAL_XPORT);
- ctx->transport_ptr = transport_ptr;
ctx->local_open_state = GLINK_CHANNEL_OPENING;
GLINK_INFO_PERF_CH(ctx,
"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
@@ -2742,13 +2744,13 @@
GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
/* close already pending */
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -2813,7 +2815,7 @@
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
- glink_put_ch_ctx(ctx, true);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_close);
@@ -2862,7 +2864,7 @@
struct channel_ctx *ctx = (struct channel_ctx *)handle;
uint32_t riid;
int ret = 0;
- struct glink_core_tx_pkt *tx_info;
+ struct glink_core_tx_pkt *tx_info = NULL;
size_t intent_size;
bool is_atomic =
tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
@@ -2877,6 +2879,13 @@
return ret;
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+ tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+ is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!tx_info) {
+ GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+ ret = -ENOMEM;
+ goto glink_tx_common_err;
+ }
if (!(vbuf_provider || pbuf_provider)) {
ret = -EINVAL;
goto glink_tx_common_err;
@@ -2996,14 +3005,7 @@
GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
__func__, riid, intent_size,
data ? data : iovec, size, current->pid);
- tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
- is_atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (!tx_info) {
- GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
- ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
- ret = -ENOMEM;
- goto glink_tx_common_err;
- }
+
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
INIT_LIST_HEAD(&tx_info->list_done);
INIT_LIST_HEAD(&tx_info->list_node);
@@ -3028,10 +3030,15 @@
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+ rwref_read_put(&ctx->ch_state_lhb2);
+ glink_put_ch_ctx(ctx);
+ return ret;
+
glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
glink_tx_common_err_2:
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
+ kfree(tx_info);
return ret;
}
@@ -3080,7 +3087,7 @@
/* Can only queue rx intents if channel is fully opened */
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3089,14 +3096,14 @@
GLINK_ERR_CH(ctx,
"%s: Intent pointer allocation failed size[%zu]\n",
__func__, size);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -ENOMEM;
}
GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
intent_ptr->intent_size);
if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
@@ -3106,7 +3113,7 @@
if (ret)
/* unable to transmit, dequeue intent */
ch_remove_local_rx_intent(ctx, intent_ptr->id);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3138,12 +3145,12 @@
if (size <= intent->intent_size) {
spin_unlock_irqrestore(
&ctx->local_rx_intent_lst_lock_lhc1, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return true;
}
}
spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return false;
}
EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3172,7 +3179,7 @@
if (IS_ERR_OR_NULL(liid_ptr)) {
/* invalid pointer */
GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EINVAL;
}
@@ -3198,7 +3205,7 @@
/* send rx done */
ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
ctx->lcid, id, reuse);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_rx_done);
@@ -3252,7 +3259,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3262,7 +3269,7 @@
ctx->lcid, ctx->lsigs);
GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_sigs_set);
@@ -3288,12 +3295,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
*sigs = ctx->lsigs;
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3320,12 +3327,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
*sigs = ctx->rsigs;
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return 0;
}
EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3429,7 +3436,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3439,7 +3446,7 @@
if (ret < 0)
GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
__func__, latency_us, pkt_size);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_latency);
@@ -3463,12 +3470,12 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
ret = glink_qos_reset_priority(ctx);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_cancel);
@@ -3495,7 +3502,7 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EBUSY;
}
@@ -3504,7 +3511,7 @@
ret = glink_qos_add_ch_tx_intent(ctx);
spin_unlock(&ctx->tx_lists_lock_lhc3);
spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ret;
}
EXPORT_SYMBOL(glink_qos_start);
@@ -3532,11 +3539,11 @@
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return (unsigned long)-EBUSY;
}
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ctx->transport_ptr->ops->get_power_vote_ramp_time(
ctx->transport_ptr->ops,
glink_prio_to_power_state(ctx->transport_ptr,
@@ -3544,6 +3551,61 @@
}
EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
+ ctx->rt_vote_on++;
+ GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx);
+ return ret;
+}
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ glink_put_ch_ctx(ctx);
+ return -EBUSY;
+ }
+ ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
+ ctx->rt_vote_off++;
+ GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
+ glink_put_ch_ctx(ctx);
+ return ret;
+}
+
/**
* glink_rpm_rx_poll() - Poll and receive any available events
* @handle: Channel handle in which this operation is performed.
@@ -3626,10 +3688,10 @@
if (ret)
return ret;
if (!ctx->transport_ptr) {
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return -EOPNOTSUPP;
}
- glink_put_ch_ctx(ctx, false);
+ glink_put_ch_ctx(ctx);
return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
}
EXPORT_SYMBOL(glink_wait_link_down);
@@ -3951,6 +4013,10 @@
if_ptr->power_vote = dummy_power_vote;
if (!if_ptr->power_unvote)
if_ptr->power_unvote = dummy_power_unvote;
+ if (!if_ptr->rx_rt_vote)
+ if_ptr->rx_rt_vote = dummy_rx_rt_vote;
+ if (!if_ptr->rx_rt_unvote)
+ if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
xprt_ptr->capabilities = 0;
xprt_ptr->ops = if_ptr;
spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
@@ -4095,7 +4161,7 @@
xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
if (!xprt_ptr)
return ERR_PTR(-ENOMEM);
- if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+ if_ptr = kzalloc(sizeof(*if_ptr), GFP_KERNEL);
if (!if_ptr) {
kfree(xprt_ptr);
return ERR_PTR(-ENOMEM);
@@ -6170,7 +6236,6 @@
static int glink_init(void)
{
log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
- rwlock_init(&magic_lock);
if (!log_ctx)
GLINK_ERR("%s: unable to create log context\n", __func__);
glink_debugfs_init();
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 0ce1bda..384347d 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -182,6 +182,8 @@
* @deferred_cmds: List of deferred commands that need to be
* processed in process context.
* @deferred_cmds_cnt: Number of deferred commands in queue.
+ * @rt_vote_lock: Serialize access to RT rx votes
+ * @rt_votes: Vote count for RT rx thread priority
* @num_pw_states: Size of @ramp_time_us.
* @ramp_time_us: Array of ramp times in microseconds where array
* index position represents a power state.
@@ -221,6 +223,8 @@
spinlock_t rx_lock;
struct list_head deferred_cmds;
uint32_t deferred_cmds_cnt;
+ spinlock_t rt_vote_lock;
+ uint32_t rt_votes;
uint32_t num_pw_states;
unsigned long *ramp_time_us;
struct mailbox_config_info *mailbox;
@@ -2125,6 +2129,52 @@
}
/**
+ * rx_rt_vote() - Increment and RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 1 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
+ ¶m);
+ einfo->rt_votes++;
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
+ * rx_rt_unvote() - Remove a RX thread RT vote
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+ struct sched_param param = { .sched_priority = 0 };
+ int ret = 0;
+ unsigned long flags;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+ einfo->rt_votes--;
+ if (!einfo->rt_votes)
+ ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
+ ¶m);
+ spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+ return ret;
+}
+
+/**
* negotiate_features_v1() - determine what features of a version can be used
* @if_ptr: The transport for which features are negotiated for.
* @version: The version negotiated.
@@ -2169,6 +2219,8 @@
einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
einfo->xprt_if.power_vote = power_vote;
einfo->xprt_if.power_unvote = power_unvote;
+ einfo->xprt_if.rx_rt_vote = rx_rt_vote;
+ einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
}
/**
@@ -2341,6 +2393,8 @@
init_srcu_struct(&einfo->use_ref);
spin_lock_init(&einfo->rx_lock);
INIT_LIST_HEAD(&einfo->deferred_cmds);
+ spin_lock_init(&einfo->rt_vote_lock);
+ einfo->rt_votes = 0;
mutex_lock(&probe_lock);
if (edge_infos[einfo->remote_proc_id]) {
@@ -2388,7 +2442,7 @@
einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
einfo->tx_fifo_size,
einfo->remote_proc_id,
- SMEM_ITEM_CACHED_FLAG);
+ 0);
if (!einfo->tx_fifo) {
pr_err("%s: smem alloc of tx fifo failed\n", __func__);
rc = -ENOMEM;
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index e02c07a..c44aa93 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -111,7 +111,7 @@
* @xprt_cfg: The transport configuration for the glink core
* assocaited with this edge.
* @subsys_name: Name of the remote subsystem in the edge.
- * @spi_dev: Pointer to the connectingSPI Device.
+ * @spi_ops: Function pointers for ops provided by spi.
* @fifo_size: Size of the FIFO at the remote end.
* @tx_fifo_start: Base Address of the TX FIFO.
* @tx_fifo_end: End Address of the TX FIFO.
@@ -147,7 +147,7 @@
struct glink_transport_if xprt_if;
struct glink_core_transport_cfg xprt_cfg;
char subsys_name[GLINK_NAME_SIZE];
- struct spi_device *spi_dev;
+ struct wcd_spi_ops spi_ops;
uint32_t fifo_size;
uint32_t tx_fifo_start;
@@ -286,11 +286,14 @@
{
struct wcd_spi_msg spi_msg;
+ if (unlikely(!einfo->spi_ops.read_dev))
+ return -EINVAL;
+
memset(&spi_msg, 0, sizeof(spi_msg));
spi_msg.data = dst;
spi_msg.remote_addr = (uint32_t)(size_t)src;
spi_msg.len = (size_t)size;
- return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+ return einfo->spi_ops.read_dev(einfo->spi_ops.spi_dev, &spi_msg);
}
/**
@@ -310,11 +313,14 @@
{
struct wcd_spi_msg spi_msg;
+ if (unlikely(!einfo->spi_ops.write_dev))
+ return -EINVAL;
+
memset(&spi_msg, 0, sizeof(spi_msg));
spi_msg.data = src;
spi_msg.remote_addr = (uint32_t)(size_t)dst;
spi_msg.len = (size_t)size;
- return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+ return einfo->spi_ops.write_dev(einfo->spi_ops.spi_dev, &spi_msg);
}
/**
@@ -1796,27 +1802,20 @@
{
struct edge_info *einfo = dev_get_drvdata(dev);
struct glink_cmpnt *cmpnt = &einfo->cmpnt;
- struct device *sdev;
- struct spi_device *spi_dev;
+ int rc = -EINVAL;
switch (event) {
case WDSP_EVENT_PRE_BOOTUP:
if (cmpnt && cmpnt->master_dev &&
cmpnt->master_ops &&
- cmpnt->master_ops->get_dev_for_cmpnt)
- sdev = cmpnt->master_ops->get_dev_for_cmpnt(
- cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
- else
- sdev = NULL;
+ cmpnt->master_ops->get_devops_for_cmpnt)
+ rc = cmpnt->master_ops->get_devops_for_cmpnt(
+ cmpnt->master_dev, WDSP_CMPNT_TRANSPORT,
+ &einfo->spi_ops);
- if (!sdev) {
+ if (rc)
dev_err(dev, "%s: Failed to get transport device\n",
__func__);
- break;
- }
-
- spi_dev = to_spi_device(sdev);
- einfo->spi_dev = spi_dev;
break;
case WDSP_EVENT_POST_BOOTUP:
einfo->in_ssr = false;
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index f4d5a3b..47c1580 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,8 @@
struct glink_transport_if *if_ptr, uint32_t state);
int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
int (*power_unvote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_vote)(struct glink_transport_if *if_ptr);
+ int (*rx_rt_unvote)(struct glink_transport_if *if_ptr);
/*
* Keep data pointers at the end of the structure after all function
* pointer to allow for in-place initialization.
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index b5bb719..a2ab266 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "icnss: " fmt
#include <asm/dma-iommu.h>
+#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/iommu.h>
#include <linux/export.h>
@@ -83,34 +84,58 @@
} while (0)
#define icnss_pr_err(_fmt, ...) do { \
- pr_err(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_warn(_fmt, ...) do { \
- pr_warn(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("WRN: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_WARNING, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_info(_fmt, ...) do { \
- pr_info(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("INF: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_INFO, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define icnss_pr_dbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_string(pr_fmt(_fmt), ##__VA_ARGS__); \
} while (0)
#define icnss_pr_vdbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__); \
} while (0)
+#elif defined(DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define icnss_pr_dbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#endif
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
@@ -168,10 +193,79 @@
ICNSS_DRIVER_EVENT_MAX,
};
+enum icnss_msa_perm {
+ ICNSS_MSA_PERM_HLOS_ALL = 0,
+ ICNSS_MSA_PERM_WLAN_HW_RW = 1,
+ ICNSS_MSA_PERM_DUMP_COLLECT = 2,
+ ICNSS_MSA_PERM_MAX,
+};
+
+#define ICNSS_MAX_VMIDS 4
+
+struct icnss_mem_region_info {
+ uint64_t reg_addr;
+ uint32_t size;
+ uint8_t secure_flag;
+ enum icnss_msa_perm perm;
+};
+
+struct icnss_msa_perm_list_t {
+ int vmids[ICNSS_MAX_VMIDS];
+ int perms[ICNSS_MAX_VMIDS];
+ int nelems;
+};
+
+struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
+ [ICNSS_MSA_PERM_HLOS_ALL] = {
+ .vmids = {VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+ .nelems = 1,
+ },
+
+ [ICNSS_MSA_PERM_WLAN_HW_RW] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE},
+ .nelems = 2,
+ },
+
+ [ICNSS_MSA_PERM_DUMP_COLLECT] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ},
+ .nelems = 3,
+ },
+};
+
+struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
+ [ICNSS_MSA_PERM_HLOS_ALL] = {
+ .vmids = {VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE | PERM_EXEC},
+ .nelems = 1,
+ },
+
+ [ICNSS_MSA_PERM_WLAN_HW_RW] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE},
+ .nelems = 3,
+ },
+
+ [ICNSS_MSA_PERM_DUMP_COLLECT] = {
+ .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS},
+ .perms = {PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ | PERM_WRITE,
+ PERM_READ},
+ .nelems = 4,
+ },
+};
+
struct icnss_event_pd_service_down_data {
bool crashed;
bool fw_rejuvenate;
- bool wdog_bite;
};
struct icnss_driver_event {
@@ -196,8 +290,8 @@
ICNSS_PD_RESTART,
ICNSS_MSA0_ASSIGNED,
ICNSS_WLFW_EXISTS,
- ICNSS_WDOG_BITE,
ICNSS_SHUTDOWN_DONE,
+ ICNSS_HOST_TRIGGERED_PDR,
};
struct ce_irq_list {
@@ -250,6 +344,13 @@
uint32_t disable;
} ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
+ struct {
+ uint32_t pdr_fw_crash;
+ uint32_t pdr_host_error;
+ uint32_t root_pd_crash;
+ uint32_t root_pd_shutdown;
+ } recovery;
+
uint32_t pm_suspend;
uint32_t pm_suspend_err;
uint32_t pm_resume;
@@ -287,10 +388,10 @@
uint32_t vbatt_req;
uint32_t vbatt_resp;
uint32_t vbatt_req_err;
+ u32 rejuvenate_ind;
uint32_t rejuvenate_ack_req;
uint32_t rejuvenate_ack_resp;
uint32_t rejuvenate_ack_err;
- uint32_t trigger_recovery;
};
#define MAX_NO_OF_MAC_ADDR 4
@@ -299,6 +400,20 @@
uint32_t no_of_mac_addr_set;
};
+enum icnss_pdr_cause_index {
+ ICNSS_FW_CRASH,
+ ICNSS_ROOT_PD_CRASH,
+ ICNSS_ROOT_PD_SHUTDOWN,
+ ICNSS_HOST_ERROR,
+};
+
+static const char * const icnss_pdr_cause[] = {
+ [ICNSS_FW_CRASH] = "FW crash",
+ [ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
+ [ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
+ [ICNSS_HOST_ERROR] = "Host error",
+};
+
struct service_notifier_context {
void *handle;
uint32_t instance_id;
@@ -364,6 +479,11 @@
bool is_wlan_mac_set;
struct icnss_wlan_mac_addr wlan_mac_addr;
bool bypass_s1_smmu;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system;
+ u16 line_number;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+ struct mutex dev_lock;
} *penv;
#ifdef CONFIG_ICNSS_DEBUG
@@ -375,6 +495,84 @@
static void icnss_ignore_qmi_timeout(bool ignore) { }
#endif
+static int icnss_assign_msa_perm(struct icnss_mem_region_info
+ *mem_region, enum icnss_msa_perm new_perm)
+{
+ int ret = 0;
+ phys_addr_t addr;
+ u32 size;
+ u32 i = 0;
+ u32 source_vmids[ICNSS_MAX_VMIDS];
+ u32 source_nelems;
+ u32 dest_vmids[ICNSS_MAX_VMIDS];
+ u32 dest_perms[ICNSS_MAX_VMIDS];
+ u32 dest_nelems;
+ enum icnss_msa_perm cur_perm = mem_region->perm;
+ struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list;
+
+ addr = mem_region->reg_addr;
+ size = mem_region->size;
+
+ if (mem_region->secure_flag) {
+ new_perm_list = &msa_perm_secure_list[new_perm];
+ old_perm_list = &msa_perm_secure_list[cur_perm];
+ } else {
+ new_perm_list = &msa_perm_list[new_perm];
+ old_perm_list = &msa_perm_list[cur_perm];
+ }
+
+ source_nelems = old_perm_list->nelems;
+ dest_nelems = new_perm_list->nelems;
+
+ for (i = 0; i < source_nelems; ++i)
+ source_vmids[i] = old_perm_list->vmids[i];
+
+ for (i = 0; i < dest_nelems; ++i) {
+ dest_vmids[i] = new_perm_list->vmids[i];
+ dest_perms[i] = new_perm_list->perms[i];
+ }
+
+ ret = hyp_assign_phys(addr, size, source_vmids, source_nelems,
+ dest_vmids, dest_perms, dest_nelems);
+ if (ret) {
+ icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
+ &addr, size, ret);
+ goto out;
+ }
+
+ icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x,"
+ "source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n",
+ source_nelems, source_vmids[0], source_vmids[1],
+ source_vmids[2], source_vmids[3], dest_nelems,
+ dest_vmids[0], dest_vmids[1], dest_vmids[2],
+ dest_vmids[3]);
+out:
+ return ret;
+}
+
+static int icnss_assign_msa_perm_all(struct icnss_priv *priv,
+ enum icnss_msa_perm new_perm)
+{
+ int ret;
+ int i;
+ enum icnss_msa_perm old_perm;
+
+ for (i = 0; i < priv->nr_mem_region; i++) {
+ old_perm = priv->mem_region[i].perm;
+ ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm);
+ if (ret)
+ goto err_unmap;
+ priv->mem_region[i].perm = new_perm;
+ }
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--) {
+ icnss_assign_msa_perm(&priv->mem_region[i], old_perm);
+ }
+ return ret;
+}
+
static void icnss_pm_stay_awake(struct icnss_priv *priv)
{
if (atomic_inc_return(&priv->pm_count) != 1)
@@ -980,119 +1178,6 @@
}
EXPORT_SYMBOL(icnss_power_off);
-static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
- int ret = 0;
- phys_addr_t addr;
- u32 size;
- u32 source_vmlist[1] = {VMID_HLOS};
- int dest_vmids[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
- int dest_perms[3] = {PERM_READ|PERM_WRITE,
- PERM_READ|PERM_WRITE,
- PERM_READ|PERM_WRITE};
- int source_nelems = sizeof(source_vmlist)/sizeof(u32);
- int dest_nelems = 0;
-
- addr = mem_region->reg_addr;
- size = mem_region->size;
-
- if (!mem_region->secure_flag) {
- dest_vmids[2] = VMID_WLAN_CE;
- dest_nelems = 3;
- } else {
- dest_vmids[2] = 0;
- dest_nelems = 2;
- }
- ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
- dest_vmids, dest_perms, dest_nelems);
- if (ret) {
- icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n",
- &addr, size, ret);
- goto out;
- }
-
- icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n",
- source_vmlist[0], dest_nelems, dest_vmids[0],
- dest_vmids[1], dest_vmids[2]);
-out:
- return ret;
-
-}
-
-static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region)
-{
- int ret = 0;
- phys_addr_t addr;
- u32 size;
- u32 dest_vmids[1] = {VMID_HLOS};
- int source_vmlist[3] = {VMID_MSS_MSA, VMID_WLAN, 0};
- int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
- int source_nelems = 0;
- int dest_nelems = sizeof(dest_vmids)/sizeof(u32);
-
- addr = mem_region->reg_addr;
- size = mem_region->size;
-
- if (!mem_region->secure_flag) {
- source_vmlist[2] = VMID_WLAN_CE;
- source_nelems = 3;
- } else {
- source_vmlist[2] = 0;
- source_nelems = 2;
- }
-
- ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems,
- dest_vmids, dest_perms, dest_nelems);
- if (ret) {
- icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n",
- &addr, size, ret);
- goto out;
- }
- icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n",
- source_nelems, source_vmlist[0], source_vmlist[1],
- source_vmlist[2], dest_vmids[0]);
-out:
- return ret;
-}
-
-static int icnss_setup_msa_permissions(struct icnss_priv *priv)
-{
- int ret;
- int i;
-
- if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
- return 0;
-
- for (i = 0; i < priv->nr_mem_region; i++) {
-
- ret = icnss_map_msa_permissions(&priv->mem_region[i]);
- if (ret)
- goto err_unmap;
- }
-
- set_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-
- return 0;
-
-err_unmap:
- for (i--; i >= 0; i--)
- icnss_unmap_msa_permissions(&priv->mem_region[i]);
- return ret;
-}
-
-static void icnss_remove_msa_permissions(struct icnss_priv *priv)
-{
- int i;
-
- if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state))
- return;
-
- for (i = 0; i < priv->nr_mem_region; i++)
- icnss_unmap_msa_permissions(&priv->mem_region[i]);
-
- clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state);
-}
-
static int wlfw_msa_mem_info_send_sync_msg(void)
{
int ret;
@@ -1634,6 +1719,60 @@
return ret;
}
+static int icnss_decode_rejuvenate_ind(void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_rejuvenate_ind_msg_v01 ind_msg;
+ int ret = 0;
+
+ if (!penv || !penv->wlfw_clnt) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memset(&ind_msg, 0, sizeof(ind_msg));
+
+ ind_desc.msg_id = QMI_WLFW_REJUVENATE_IND_V01;
+ ind_desc.max_msg_len = WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_rejuvenate_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ icnss_pr_err("Failed to decode rejuvenate ind message: ret %d, msg_len %u\n",
+ ret, msg_len);
+ goto out;
+ }
+
+ if (ind_msg.cause_for_rejuvenation_valid)
+ penv->cause_for_rejuvenation = ind_msg.cause_for_rejuvenation;
+ else
+ penv->cause_for_rejuvenation = 0;
+ if (ind_msg.requesting_sub_system_valid)
+ penv->requesting_sub_system = ind_msg.requesting_sub_system;
+ else
+ penv->requesting_sub_system = 0;
+ if (ind_msg.line_number_valid)
+ penv->line_number = ind_msg.line_number;
+ else
+ penv->line_number = 0;
+ if (ind_msg.function_name_valid)
+ memcpy(penv->function_name, ind_msg.function_name,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+ else
+ memset(penv->function_name, 0,
+ QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1);
+
+ icnss_pr_info("Cause for rejuvenation: 0x%x, requesting sub-system: 0x%x, line number: %u, function name: %s\n",
+ penv->cause_for_rejuvenation,
+ penv->requesting_sub_system,
+ penv->line_number,
+ penv->function_name);
+
+ penv->stats.rejuvenate_ind++;
+out:
+ return ret;
+}
+
static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
{
int ret;
@@ -1827,6 +1966,7 @@
msg_id, penv->state);
icnss_ignore_qmi_timeout(true);
+ icnss_decode_rejuvenate_ind(msg, msg_len);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
return;
@@ -1898,9 +2038,12 @@
if (ret < 0)
goto err_power_on;
- ret = icnss_setup_msa_permissions(penv);
- if (ret < 0)
- goto err_power_on;
+ if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) {
+ ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW);
+ if (ret < 0)
+ goto err_power_on;
+ set_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
+ }
ret = wlfw_msa_ready_send_sync_msg();
if (ret < 0)
@@ -1918,7 +2061,7 @@
return ret;
err_setup_msa:
- icnss_remove_msa_permissions(penv);
+ icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
err_power_on:
icnss_hw_power_off(penv);
fail:
@@ -2004,10 +2147,7 @@
icnss_pm_relax(priv);
- if (test_bit(ICNSS_WDOG_BITE, &priv->state)) {
- icnss_call_driver_shutdown(priv);
- clear_bit(ICNSS_WDOG_BITE, &priv->state);
- }
+ icnss_call_driver_shutdown(priv);
clear_bit(ICNSS_PD_RESTART, &priv->state);
@@ -2157,8 +2297,7 @@
static int icnss_fw_crashed(struct icnss_priv *priv,
struct icnss_event_pd_service_down_data *event_data)
{
- icnss_pr_dbg("FW crashed, state: 0x%lx, wdog_bite: %d\n",
- priv->state, event_data->wdog_bite);
+ icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
set_bit(ICNSS_PD_RESTART, &priv->state);
clear_bit(ICNSS_FW_READY, &priv->state);
@@ -2168,17 +2307,9 @@
if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
- if (event_data->wdog_bite) {
- set_bit(ICNSS_WDOG_BITE, &priv->state);
- goto out;
- }
-
- icnss_call_driver_shutdown(priv);
-
if (event_data->fw_rejuvenate)
wlfw_rejuvenate_ack_send_sync_msg(priv);
-out:
return 0;
}
@@ -2333,14 +2464,22 @@
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
modem_ssr_nb);
struct icnss_uevent_fw_down_data fw_down_data;
+ int ret = 0;
icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_SHUTDOWN &&
- notif->crashed == CRASH_STATUS_ERR_FATAL) {
- icnss_remove_msa_permissions(priv);
- icnss_pr_info("Collecting msa0 segment dump\n");
- icnss_msa0_ramdump(priv);
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ ret = icnss_assign_msa_perm_all(priv,
+ ICNSS_MSA_PERM_DUMP_COLLECT);
+ if (!ret) {
+ icnss_pr_info("Collecting msa0 segment dump\n");
+ icnss_msa0_ramdump(priv);
+ icnss_assign_msa_perm_all(priv,
+ ICNSS_MSA_PERM_WLAN_HW_RW);
+ } else {
+ icnss_pr_err("Not able to Collect msa0 segment dump"
+ "Apps permissions not assigned %d\n", ret);
+ }
return NOTIFY_OK;
}
@@ -2353,6 +2492,11 @@
icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
priv->state, notif->crashed);
+ if (notif->crashed)
+ priv->stats.recovery.root_pd_crash++;
+ else
+ priv->stats.recovery.root_pd_shutdown++;
+
icnss_ignore_qmi_timeout(true);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
@@ -2362,9 +2506,6 @@
event_data->crashed = notif->crashed;
- if (notif->crashed == CRASH_STATUS_WDOG_BITE)
- event_data->wdog_bite = true;
-
fw_down_data.crashed = !!notif->crashed;
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
@@ -2432,6 +2573,7 @@
enum pd_subsys_state *state = data;
struct icnss_event_pd_service_down_data *event_data;
struct icnss_uevent_fw_down_data fw_down_data;
+ enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
notification, priv->state);
@@ -2444,28 +2586,41 @@
if (event_data == NULL)
return notifier_from_errno(-ENOMEM);
+ event_data->crashed = true;
+
if (state == NULL) {
- event_data->crashed = true;
+ priv->stats.recovery.root_pd_crash++;
goto event_post;
}
- icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx\n",
- *state, priv->state);
-
switch (*state) {
case ROOT_PD_WDOG_BITE:
- event_data->crashed = true;
- event_data->wdog_bite = true;
+ priv->stats.recovery.root_pd_crash++;
break;
case ROOT_PD_SHUTDOWN:
+ cause = ICNSS_ROOT_PD_SHUTDOWN;
+ priv->stats.recovery.root_pd_shutdown++;
+ event_data->crashed = false;
+ break;
+ case USER_PD_STATE_CHANGE:
+ if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
+ cause = ICNSS_HOST_ERROR;
+ priv->stats.recovery.pdr_host_error++;
+ } else {
+ cause = ICNSS_FW_CRASH;
+ priv->stats.recovery.pdr_fw_crash++;
+ }
break;
default:
- event_data->crashed = true;
+ priv->stats.recovery.root_pd_crash++;
break;
}
+ icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
+ *state, priv->state, icnss_pdr_cause[cause]);
event_post:
icnss_ignore_qmi_timeout(true);
+ clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
fw_down_data.crashed = event_data->crashed;
icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
@@ -3208,7 +3363,6 @@
WARN_ON(1);
icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
priv->state);
- priv->stats.trigger_recovery++;
/*
* Initiate PDR, required only for the first instance
@@ -3216,6 +3370,9 @@
ret = service_notif_pd_restart(priv->service_notifier[0].name,
priv->service_notifier[0].instance_id);
+ if (!ret)
+ set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+
out:
return ret;
}
@@ -3227,6 +3384,7 @@
struct dma_iommu_mapping *mapping;
int atomic_ctx = 1;
int s1_bypass = 1;
+ int fast = 1;
int ret = 0;
icnss_pr_dbg("Initializing SMMU\n");
@@ -3240,7 +3398,17 @@
goto map_fail;
}
- if (!priv->bypass_s1_smmu) {
+ if (priv->bypass_s1_smmu) {
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (ret < 0) {
+ icnss_pr_err("Set s1_bypass attribute failed, err = %d\n",
+ ret);
+ goto set_attr_fail;
+ }
+ icnss_pr_dbg("SMMU S1 BYPASS\n");
+ } else {
ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
@@ -3249,14 +3417,17 @@
ret);
goto set_attr_fail;
}
- }
+ icnss_pr_dbg("SMMU ATTR ATOMIC\n");
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_S1_BYPASS,
- &s1_bypass);
- if (ret < 0) {
- icnss_pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
- goto set_attr_fail;
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast);
+ if (ret < 0) {
+ icnss_pr_err("Set fast map attribute failed, err = %d\n",
+ ret);
+ goto set_attr_fail;
+ }
+ icnss_pr_dbg("SMMU FAST map set\n");
}
ret = arm_iommu_attach_device(&priv->pdev->dev, mapping);
@@ -3570,9 +3741,6 @@
if (ret)
return ret;
- if (ret == 0)
- memset(&priv->stats, 0, sizeof(priv->stats));
-
return count;
}
@@ -3660,12 +3828,12 @@
case ICNSS_WLFW_EXISTS:
seq_puts(s, "WLAN FW EXISTS");
continue;
- case ICNSS_WDOG_BITE:
- seq_puts(s, "MODEM WDOG BITE");
- continue;
case ICNSS_SHUTDOWN_DONE:
seq_puts(s, "SHUTDOWN DONE");
continue;
+ case ICNSS_HOST_TRIGGERED_PDR:
+ seq_puts(s, "HOST TRIGGERED PDR");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -3696,6 +3864,26 @@
return 0;
}
+static int icnss_stats_show_rejuvenate_info(struct seq_file *s,
+ struct icnss_priv *priv)
+{
+ if (priv->stats.rejuvenate_ind) {
+ seq_puts(s, "\n<---------------- Rejuvenate Info ----------------->\n");
+ seq_printf(s, "Number of Rejuvenations: %u\n",
+ priv->stats.rejuvenate_ind);
+ seq_printf(s, "Cause for Rejuvenation: 0x%x\n",
+ priv->cause_for_rejuvenation);
+ seq_printf(s, "Requesting Sub-System: 0x%x\n",
+ priv->requesting_sub_system);
+ seq_printf(s, "Line Number: %u\n",
+ priv->line_number);
+ seq_printf(s, "Function Name: %s\n",
+ priv->function_name);
+ }
+
+ return 0;
+}
+
static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv)
{
int i;
@@ -3761,10 +3949,14 @@
ICNSS_STATS_DUMP(s, priv, vbatt_req);
ICNSS_STATS_DUMP(s, priv, vbatt_resp);
ICNSS_STATS_DUMP(s, priv, vbatt_req_err);
+ ICNSS_STATS_DUMP(s, priv, rejuvenate_ind);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
- ICNSS_STATS_DUMP(s, priv, trigger_recovery);
+ ICNSS_STATS_DUMP(s, priv, recovery.pdr_fw_crash);
+ ICNSS_STATS_DUMP(s, priv, recovery.pdr_host_error);
+ ICNSS_STATS_DUMP(s, priv, recovery.root_pd_crash);
+ ICNSS_STATS_DUMP(s, priv, recovery.root_pd_shutdown);
seq_puts(s, "\n<------------------ PM stats ------------------->\n");
ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -3782,6 +3974,8 @@
icnss_stats_show_capability(s, priv);
+ icnss_stats_show_rejuvenate_info(s, priv);
+
icnss_stats_show_events(s, priv);
icnss_stats_show_state(s, priv);
@@ -3893,12 +4087,14 @@
{
struct icnss_priv *priv = s->private;
+ mutex_lock(&priv->dev_lock);
if (!priv->diag_reg_read_buf) {
seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
if (!test_bit(ICNSS_FW_READY, &priv->state))
seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -3912,6 +4108,7 @@
priv->diag_reg_read_len = 0;
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
+ mutex_unlock(&priv->dev_lock);
return 0;
}
@@ -3972,18 +4169,22 @@
data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
return -EINVAL;
+ mutex_lock(&priv->dev_lock);
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
reg_buf = kzalloc(data_len, GFP_KERNEL);
- if (!reg_buf)
+ if (!reg_buf) {
+ mutex_unlock(&priv->dev_lock);
return -ENOMEM;
+ }
ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
mem_type, data_len,
reg_buf);
if (ret) {
kfree(reg_buf);
+ mutex_unlock(&priv->dev_lock);
return ret;
}
@@ -3991,6 +4192,7 @@
priv->diag_reg_read_mem_type = mem_type;
priv->diag_reg_read_len = data_len;
priv->diag_reg_read_buf = reg_buf;
+ mutex_unlock(&priv->dev_lock);
return count;
}
@@ -4114,6 +4316,9 @@
int i;
struct device *dev = &pdev->dev;
struct icnss_priv *priv;
+ const __be32 *addrp;
+ u64 prop_size = 0;
+ struct device_node *np;
if (penv) {
icnss_pr_err("Driver is already initialized\n");
@@ -4185,24 +4390,53 @@
}
}
- ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
- &priv->msa_mem_size);
+ np = of_parse_phandle(dev->of_node,
+ "qcom,wlan-msa-fixed-region", 0);
+ if (np) {
+ addrp = of_get_address(np, 0, &prop_size, NULL);
+ if (!addrp) {
+ icnss_pr_err("Failed to get assigned-addresses or property\n");
+ ret = -EINVAL;
+ goto out;
+ }
- if (ret || priv->msa_mem_size == 0) {
- icnss_pr_err("Fail to get MSA Memory Size: %u, ret: %d\n",
- priv->msa_mem_size, ret);
- goto out;
+ priv->msa_pa = of_translate_address(np, addrp);
+ if (priv->msa_pa == OF_BAD_ADDR) {
+ icnss_pr_err("Failed to translate MSA PA from device-tree\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->msa_va = memremap(priv->msa_pa,
+ (unsigned long)prop_size, MEMREMAP_WT);
+ if (!priv->msa_va) {
+ icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
+ &priv->msa_pa);
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->msa_mem_size = prop_size;
+ } else {
+ ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
+ &priv->msa_mem_size);
+ if (ret || priv->msa_mem_size == 0) {
+ icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
+ priv->msa_mem_size, ret);
+ goto out;
+ }
+
+ priv->msa_va = dmam_alloc_coherent(&pdev->dev,
+ priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
+
+ if (!priv->msa_va) {
+ icnss_pr_err("DMA alloc failed for MSA\n");
+ ret = -ENOMEM;
+ goto out;
+ }
}
- priv->msa_va = dmam_alloc_coherent(&pdev->dev, priv->msa_mem_size,
- &priv->msa_pa, GFP_KERNEL);
- if (!priv->msa_va) {
- icnss_pr_err("DMA alloc failed for MSA\n");
- ret = -ENOMEM;
- goto out;
- }
- icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p\n", &priv->msa_pa,
- priv->msa_va);
+ icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p MSA Memory Size: 0x%x\n",
+ &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smmu_iova_base");
@@ -4238,6 +4472,7 @@
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ mutex_init(&priv->dev_lock);
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
@@ -4307,7 +4542,8 @@
icnss_hw_power_off(penv);
- icnss_remove_msa_permissions(penv);
+ icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL);
+ clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 458e39d..cef3c77 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,7 @@
* @xprt_version: IPC Router header version supported by this XPRT.
* @xprt_option: XPRT specific options to be handled by IPC Router.
* @disable_pil_loading: Disable PIL Loading of the subsystem.
+ * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem.
*/
struct ipc_router_glink_xprt {
struct list_head list;
@@ -91,6 +92,7 @@
uint32_t cur_lo_intents_cnt;
uint32_t cur_md_intents_cnt;
uint32_t cur_hi_intents_cnt;
+ bool dynamic_wakeup_source;
};
struct ipc_router_glink_xprt_work {
@@ -127,6 +129,7 @@
* @link_id: Network Cluster ID to which this XPRT belongs to.
* @xprt_version: IPC Router header version supported by this XPRT.
* @disable_pil_loading:Disable PIL Loading of the subsystem.
+ * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem.
*/
struct ipc_router_glink_xprt_config {
char ch_name[GLINK_NAME_SIZE];
@@ -138,6 +141,7 @@
unsigned int xprt_version;
unsigned int xprt_option;
bool disable_pil_loading;
+ bool dynamic_wakeup_source;
};
#define MODULE_NAME "ipc_router_glink_xprt"
@@ -294,6 +298,14 @@
complete_all(&glink_xprtp->sft_close_complete);
}
+static bool ipc_router_glink_xprt_get_ws_info(struct msm_ipc_router_xprt *xprt)
+{
+ struct ipc_router_glink_xprt *glink_xprtp =
+ container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+ return glink_xprtp->dynamic_wakeup_source;
+}
+
static struct rr_packet *glink_xprt_copy_data(struct read_work *rx_work)
{
void *buf, *pbuf, *dest_buf;
@@ -706,6 +718,8 @@
glink_xprtp->xprt_option = glink_xprt_config->xprt_option;
glink_xprtp->disable_pil_loading =
glink_xprt_config->disable_pil_loading;
+ glink_xprtp->dynamic_wakeup_source =
+ glink_xprt_config->dynamic_wakeup_source;
if (!glink_xprtp->disable_pil_loading)
strlcpy(glink_xprtp->pil_edge, glink_xprt_config->pil_edge,
@@ -728,6 +742,7 @@
glink_xprtp->xprt.write = ipc_router_glink_xprt_write;
glink_xprtp->xprt.close = ipc_router_glink_xprt_close;
glink_xprtp->xprt.sft_close_done = glink_xprt_sft_close_done;
+ glink_xprtp->xprt.get_ws_info = ipc_router_glink_xprt_get_ws_info;
glink_xprtp->xprt.priv = NULL;
init_rwsem(&glink_xprtp->ss_reset_rwlock);
@@ -822,6 +837,10 @@
scnprintf(glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN,
"%s_%s", edge, ch_name);
+ key = "qcom,dynamic-wakeup-source";
+ glink_xprt_config->dynamic_wakeup_source =
+ of_property_read_bool(node, key);
+
return 0;
error:
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index be2b7da..739c053 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -57,23 +57,23 @@
}
static struct llcc_slice_config sdm845_data[] = {
- SCT_ENTRY("cpuss", 1, 1, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1),
- SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("rotator", 4, 4, 563, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("voice", 5, 5, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("audio", 6, 6, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modem", 8, 8, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("compute", 10, 10, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("gpuhtw", 11, 11, 515, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("gpu", 12, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("mmuhwt", 13, 13, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1),
- SCT_ENTRY("compute_dma", 15, 15, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("display", 16, 16, 3072, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0),
- SCT_ENTRY("audiohw", 22, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+ SCT_ENTRY("cpuss", 1, 1, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 1),
+ SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0x0, 0x0F0, 0, 0, 1, 1, 0),
+ SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0x0, 0x0F0, 0, 0, 1, 1, 0),
+ SCT_ENTRY("rotator", 4, 4, 563, 2, 1, 0x0, 0x00F, 2, 0, 1, 1, 0),
+ SCT_ENTRY("voice", 5, 5, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("audio", 6, 6, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0x0F0, 0xF0F, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modem", 8, 8, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("compute", 10, 10, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("gpuhtw", 11, 11, 512, 1, 1, 0x0, 0xC, 0, 0, 1, 1, 0),
+ SCT_ENTRY("gpu", 12, 12, 2560, 1, 0, 0xFF0, 0x3, 0, 0, 1, 1, 0),
+ SCT_ENTRY("mmuhwt", 13, 13, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 0, 1),
+ SCT_ENTRY("compute_dma", 15, 15, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("display", 16, 16, 3072, 1, 0, 0xFF0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0x0, 0xF00, 0, 0, 1, 1, 0),
+ SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0x0, 0xF, 0, 0, 1, 1, 0),
+ SCT_ENTRY("audiohw", 22, 22, 1024, 1, 1, 0xFF0, 0xF, 0, 0, 1, 1, 0),
};
static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/memshare/Kconfig b/drivers/soc/qcom/memshare/Kconfig
new file mode 100644
index 0000000..7eb1415
--- /dev/null
+++ b/drivers/soc/qcom/memshare/Kconfig
@@ -0,0 +1,9 @@
+config MEM_SHARE_QMI_SERVICE
+ depends on MSM_QMI_INTERFACE
+ bool "Shared Heap for external processors"
+ help
+ Memory Share Kernel Qualcomm Messaging Interface Service
+ receives requests from Modem Processor Sub System
+ for heap alloc/free from Application Processor
+ Sub System and send a response back to client with
+ proper handle/address.
diff --git a/drivers/soc/qcom/memshare/Makefile b/drivers/soc/qcom/memshare/Makefile
new file mode 100644
index 0000000..cf49fbc
--- /dev/null
+++ b/drivers/soc/qcom/memshare/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) := heap_mem_ext_v01.o msm_memshare.o
\ No newline at end of file
diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c
new file mode 100644
index 0000000..afe9a87
--- /dev/null
+++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c
@@ -0,0 +1,472 @@
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "heap_mem_ext_v01.h"
+
+struct elem_info mem_alloc_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_alloc_req_msg_v01,
+ num_bytes),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_req_msg_v01,
+ block_alignment_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_req_msg_v01,
+ block_alignment),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_alloc_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_alloc_resp_msg_v01,
+ resp),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_resp_msg_v01,
+ handle_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_resp_msg_v01,
+ handle),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_alloc_resp_msg_v01,
+ num_bytes_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_alloc_resp_msg_v01,
+ num_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_free_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_free_req_msg_v01,
+ handle),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_free_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_free_resp_msg_v01,
+ resp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ dhms_mem_alloc_addr_info_type_v01,
+ phy_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct
+ dhms_mem_alloc_addr_info_type_v01,
+ num_bytes),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ num_bytes),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ proc_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x04,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ sequence_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ alloc_contiguous_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ alloc_contiguous),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ block_alignment_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_alloc_generic_req_msg_v01,
+ block_alignment),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ sequence_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ sequence_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ dhms_mem_alloc_addr_info_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ dhms_mem_alloc_addr_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = MAX_ARR_CNT_V01,
+ .elem_size = sizeof(struct
+ dhms_mem_alloc_addr_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ mem_alloc_generic_resp_msg_v01,
+ dhms_mem_alloc_addr_info),
+ .ei_array = dhms_mem_alloc_addr_info_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_free_generic_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ dhms_mem_alloc_addr_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = MAX_ARR_CNT_V01,
+ .elem_size = sizeof(struct
+ dhms_mem_alloc_addr_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ dhms_mem_alloc_addr_info),
+ .ei_array = dhms_mem_alloc_addr_info_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ proc_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct mem_free_generic_req_msg_v01,
+ proc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ mem_free_generic_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_query_size_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct mem_query_size_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_query_size_req_msg_v01,
+ proc_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_query_size_req_msg_v01,
+ proc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info mem_query_size_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ mem_query_size_rsp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_query_size_rsp_msg_v01,
+ size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct mem_query_size_rsp_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h
new file mode 100644
index 0000000..cfe3e49
--- /dev/null
+++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h
@@ -0,0 +1,356 @@
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HEAP_MEM_EXT_SERVICE_01_H
+#define HEAP_MEM_EXT_SERVICE_01_H
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255
+#define MEM_FREE_REQ_MAX_MSG_LEN_V01 255
+#define MAX_ARR_CNT_V01 64
+
+struct dhms_mem_alloc_addr_info_type_v01 {
+ uint64_t phy_addr;
+ uint32_t num_bytes;
+};
+
+enum dhms_mem_proc_id_v01 {
+ /* To force a 32 bit signed enum. Do not change or use */
+ DHMS_MEM_PROC_ID_MIN_ENUM_VAL_V01 = -2147483647,
+ /* Request from MPSS processor */
+ DHMS_MEM_PROC_MPSS_V01 = 0,
+ /* Request from ADSP processor */
+ DHMS_MEM_PROC_ADSP_V01 = 1,
+ /* Request from WCNSS processor */
+ DHMS_MEM_PROC_WCNSS_V01 = 2,
+ /* To force a 32 bit signed enum. Do not change or use */
+ DHMS_MEM_PROC_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_client_id_v01 {
+ /*To force a 32 bit signed enum. Do not change or use*/
+ DHMS_MEM_CLIENT_ID_MIN_ENUM_VAL_V01 = -2147483647,
+ /* Request from GPS Client */
+ DHMS_MEM_CLIENT_GPS_V01 = 0,
+ /* Invalid Client */
+ DHMS_MEM_CLIENT_INVALID = 1000,
+ /* To force a 32 bit signed enum. Do not change or use */
+ DHMS_MEM_CLIENT_ID_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+enum dhms_mem_block_align_enum_v01 {
+ /* To force a 32 bit signed enum. Do not change or use
+ */
+ DHMS_MEM_BLOCK_ALIGN_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* Align allocated memory by 2 bytes */
+ DHMS_MEM_BLOCK_ALIGN_2_V01 = 0,
+ /* Align allocated memory by 4 bytes */
+ DHMS_MEM_BLOCK_ALIGN_4_V01 = 1,
+ /**< Align allocated memory by 8 bytes */
+ DHMS_MEM_BLOCK_ALIGN_8_V01 = 2,
+ /**< Align allocated memory by 16 bytes */
+ DHMS_MEM_BLOCK_ALIGN_16_V01 = 3,
+ /**< Align allocated memory by 32 bytes */
+ DHMS_MEM_BLOCK_ALIGN_32_V01 = 4,
+ /**< Align allocated memory by 64 bytes */
+ DHMS_MEM_BLOCK_ALIGN_64_V01 = 5,
+ /**< Align allocated memory by 128 bytes */
+ DHMS_MEM_BLOCK_ALIGN_128_V01 = 6,
+ /**< Align allocated memory by 256 bytes */
+ DHMS_MEM_BLOCK_ALIGN_256_V01 = 7,
+ /**< Align allocated memory by 512 bytes */
+ DHMS_MEM_BLOCK_ALIGN_512_V01 = 8,
+ /**< Align allocated memory by 1024 bytes */
+ DHMS_MEM_BLOCK_ALIGN_1K_V01 = 9,
+ /**< Align allocated memory by 2048 bytes */
+ DHMS_MEM_BLOCK_ALIGN_2K_V01 = 10,
+ /**< Align allocated memory by 4096 bytes */
+ DHMS_MEM_BLOCK_ALIGN_4K_V01 = 11,
+ DHMS_MEM_BLOCK_ALIGN_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use
+ */
+};
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_req_msg_v01 {
+
+ /* Mandatory */
+ /*requested size*/
+ uint32_t num_bytes;
+
+ /* Optional */
+ /* Must be set to true if block_alignment
+ * is being passed
+ */
+ uint8_t block_alignment_valid;
+ /* The block alignment for the memory block to be allocated
+ */
+ enum dhms_mem_block_align_enum_v01 block_alignment;
+}; /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ /* The result of the requested memory operation
+ */
+ enum qmi_result_type_v01 resp;
+ /* Optional */
+ /* Memory Block Handle
+ */
+ /* Must be set to true if handle is being passed
+ */
+ uint8_t handle_valid;
+ /* The physical address of the memory allocated on the HLOS
+ */
+ uint64_t handle;
+ /* Optional */
+ /* Memory block size */
+ /* Must be set to true if num_bytes is being passed
+ */
+ uint8_t num_bytes_valid;
+ /* The number of bytes actually allocated for the request.
+ * This value can be smaller than the size requested in
+ * QMI_DHMS_MEM_ALLOC_REQ_MSG.
+ */
+ uint32_t num_bytes;
+}; /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_req_msg_v01 {
+
+ /* Mandatory */
+ /* Physical address of memory to be freed
+ */
+ uint32_t handle;
+}; /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result of the requested memory operation, todo,
+ * need to check the async operation for free
+ */
+ enum qmi_result_type_v01 resp;
+}; /* Message */
+
+/* Request Message; This command is used for getting
+ * the multiple physically contiguous
+ * memory blocks from the server memory subsystem
+ */
+struct mem_alloc_generic_req_msg_v01 {
+
+ /* Mandatory */
+ /*requested size*/
+ uint32_t num_bytes;
+
+ /* Mandatory */
+ /* client id */
+ enum dhms_mem_client_id_v01 client_id;
+
+ /* Mandatory */
+ /* Peripheral Id*/
+ enum dhms_mem_proc_id_v01 proc_id;
+
+ /* Mandatory */
+ /* Sequence id */
+ uint32_t sequence_id;
+
+ /* Optional */
+ /* alloc_contiguous */
+ /* Must be set to true if alloc_contiguous is being passed */
+ uint8_t alloc_contiguous_valid;
+
+ /* Alloc_contiguous is used to identify that clients are requesting
+ * for contiguous or non contiguous memory, default is contiguous
+ * 0 = non contiguous else contiguous
+ */
+ uint8_t alloc_contiguous;
+
+ /* Optional */
+ /* Must be set to true if block_alignment
+ * is being passed
+ */
+ uint8_t block_alignment_valid;
+
+ /* The block alignment for the memory block to be allocated
+ */
+ enum dhms_mem_block_align_enum_v01 block_alignment;
+
+}; /* Message */
+
+/* Response Message; This command is used for getting
+ * the multiple physically contiguous memory blocks
+ * from the server memory subsystem
+ */
+struct mem_alloc_generic_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ /* The result of the requested memory operation
+ */
+ struct qmi_response_type_v01 resp;
+
+ /* Optional */
+ /* Sequence ID */
+ /* Must be set to true if sequence_id is being passed */
+ uint8_t sequence_id_valid;
+
+
+ /* Mandatory */
+ /* Sequence id */
+ uint32_t sequence_id;
+
+ /* Optional */
+ /* Memory Block Handle
+ */
+ /* Must be set to true if handle is being passed
+ */
+ uint8_t dhms_mem_alloc_addr_info_valid;
+
+ /* Optional */
+ /* Handle Size */
+ uint32_t dhms_mem_alloc_addr_info_len;
+
+ /* Optional */
+ /* The physical address of the memory allocated on the HLOS
+ */
+ struct dhms_mem_alloc_addr_info_type_v01
+ dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+}; /* Message */
+
+/* Request Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_req_msg_v01 {
+
+ /* Mandatory */
+ /* Must be set to # of elments in array*/
+ uint32_t dhms_mem_alloc_addr_info_len;
+
+ /* Mandatory */
+ /* Physical address and size of the memory allocated
+ * on the HLOS to be freed.
+ */
+ struct dhms_mem_alloc_addr_info_type_v01
+ dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01];
+
+ /* Optional */
+ /* Client ID */
+ /* Must be set to true if client_id is being passed */
+ uint8_t client_id_valid;
+
+ /* Optional */
+ /* Client Id */
+ enum dhms_mem_client_id_v01 client_id;
+
+ /* Optional */
+ /* Proc ID */
+ /* Must be set to true if proc_id is being passed */
+ uint8_t proc_id_valid;
+
+ /* Optional */
+ /* Peripheral */
+ enum dhms_mem_proc_id_v01 proc_id;
+
+}; /* Message */
+
+/* Response Message; This command is used for releasing
+ * the multiple physically contiguous
+ * memory blocks to the server memory subsystem
+ */
+struct mem_free_generic_resp_msg_v01 {
+
+ /*
+ * Mandatory
+ * Result of the requested memory operation, todo,
+ * need to check the async operation for free
+ */
+ struct qmi_response_type_v01 resp;
+
+}; /* Message */
+
+struct mem_query_size_req_msg_v01 {
+
+ /* Mandatory */
+ enum dhms_mem_client_id_v01 client_id;
+
+ /*
+ * Optional
+ * Proc ID
+ * proc_id_valid must be set to true if proc_id is being passed
+ */
+ uint8_t proc_id_valid;
+
+ enum dhms_mem_proc_id_v01 proc_id;
+}; /* Message */
+
+struct mem_query_size_rsp_msg_v01 {
+
+ /*
+ * Mandatory
+ * Result Code
+ */
+ struct qmi_response_type_v01 resp;
+
+ /*
+ * Optional
+ * size_valid must be set to true if size is being passed
+ */
+ uint8_t size_valid;
+
+ uint32_t size;
+}; /* Message */
+
+
+extern struct elem_info mem_alloc_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_resp_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_req_msg_data_v01_ei[];
+extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_req_msg_data_v01_ei[];
+extern struct elem_info mem_query_size_resp_msg_data_v01_ei[];
+
+/*Service Message Definition*/
+#define MEM_ALLOC_REQ_MSG_V01 0x0020
+#define MEM_ALLOC_RESP_MSG_V01 0x0020
+#define MEM_FREE_REQ_MSG_V01 0x0021
+#define MEM_FREE_RESP_MSG_V01 0x0021
+#define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022
+#define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022
+#define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023
+#define MEM_FREE_GENERIC_RESP_MSG_V01 0x0023
+#define MEM_QUERY_SIZE_REQ_MSG_V01 0x0024
+#define MEM_QUERY_SIZE_RESP_MSG_V01 0x0024
+
+#endif
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
new file mode 100644
index 0000000..e58fa2e
--- /dev/null
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -0,0 +1,1083 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/scm.h>
+#include "msm_memshare.h"
+#include "heap_mem_ext_v01.h"
+
+#include <soc/qcom/secure_buffer.h>
+#include <soc/qcom/ramdump.h>
+
+/* Macros */
+#define MEMSHARE_DEV_NAME "memshare"
+#define MEMSHARE_CHILD_DEV_NAME "memshare_child"
+static unsigned long(attrs);
+
+static struct qmi_handle *mem_share_svc_handle;
+static void mem_share_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg);
+static struct workqueue_struct *mem_share_svc_workqueue;
+static uint64_t bootup_request;
+static bool ramdump_event;
+static void *memshare_ramdump_dev[MAX_CLIENTS];
+static struct device *memshare_dev[MAX_CLIENTS];
+
+/* Memshare Driver Structure */
+struct memshare_driver {
+ struct device *dev;
+ struct mutex mem_share;
+ struct mutex mem_free;
+ struct work_struct memshare_init_work;
+};
+
+struct memshare_child {
+ struct device *dev;
+};
+
+static struct memshare_driver *memsh_drv;
+static struct memshare_child *memsh_child;
+static struct mem_blocks memblock[MAX_CLIENTS];
+static uint32_t num_clients;
+static struct msg_desc mem_share_svc_alloc_req_desc = {
+ .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_ALLOC_REQ_MSG_V01,
+ .ei_array = mem_alloc_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_resp_desc = {
+ .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_ALLOC_RESP_MSG_V01,
+ .ei_array = mem_alloc_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_req_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_FREE_REQ_MSG_V01,
+ .ei_array = mem_free_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_resp_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_FREE_RESP_MSG_V01,
+ .ei_array = mem_free_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_req_desc = {
+ .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01,
+ .ei_array = mem_alloc_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_alloc_generic_resp_desc = {
+ .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01,
+ .ei_array = mem_alloc_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_req_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01,
+ .ei_array = mem_free_generic_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_free_generic_resp_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_FREE_GENERIC_RESP_MSG_V01,
+ .ei_array = mem_free_generic_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_req_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01,
+ .ei_array = mem_query_size_req_msg_data_v01_ei,
+};
+
+static struct msg_desc mem_share_svc_size_query_resp_desc = {
+ .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01,
+ .msg_id = MEM_QUERY_SIZE_RESP_MSG_V01,
+ .ei_array = mem_query_size_resp_msg_data_v01_ei,
+};
+
+/*
+ * This API creates ramdump dev handlers
+ * for each of the memshare clients.
+ * These dev handlers will be used for
+ * extracting the ramdump for loaned memory
+ * segments.
+ */
+
+static int mem_share_configure_ramdump(int client)
+{
+ char client_name[18];
+ const char *clnt = NULL;
+
+ switch (client) {
+ case 0:
+ clnt = "GPS";
+ break;
+ case 1:
+ clnt = "FTM";
+ break;
+ case 2:
+ clnt = "DIAG";
+ break;
+ default:
+ pr_err("memshare: no memshare clients registered\n");
+ return -EINVAL;
+ }
+
+ snprintf(client_name, sizeof(client_name),
+ "memshare_%s", clnt);
+ if (memshare_dev[client]) {
+ memshare_ramdump_dev[client] =
+ create_ramdump_device(client_name,
+ memshare_dev[client]);
+ } else {
+ pr_err("memshare:%s: invalid memshare device\n", __func__);
+ return -ENODEV;
+ }
+ if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) {
+ pr_err("memshare: %s: Unable to create memshare ramdump device\n",
+ __func__);
+ memshare_ramdump_dev[client] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int check_client(int client_id, int proc, int request)
+{
+ int i = 0, rc;
+ int found = DHMS_MEM_CLIENT_INVALID;
+
+ for (i = 0; i < MAX_CLIENTS; i++) {
+ if (memblock[i].client_id == client_id &&
+ memblock[i].peripheral == proc) {
+ found = i;
+ break;
+ }
+ }
+ if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
+ pr_debug("memshare: No registered client, adding a new client\n");
+ /* Add a new client */
+ for (i = 0; i < MAX_CLIENTS; i++) {
+ if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
+ memblock[i].client_id = client_id;
+ memblock[i].allotted = 0;
+ memblock[i].guarantee = 0;
+ memblock[i].peripheral = proc;
+ found = i;
+
+ if (!memblock[i].file_created) {
+ rc = mem_share_configure_ramdump(i);
+ if (rc)
+ pr_err("memshare: %s, Cannot create ramdump for client: %d\n",
+ __func__, client_id);
+ else
+ memblock[i].file_created = 1;
+ }
+
+ break;
+ }
+ }
+ }
+
+ return found;
+}
+
+static void free_client(int id)
+{
+ memblock[id].phy_addr = 0;
+ memblock[id].virtual_addr = 0;
+ memblock[id].allotted = 0;
+ memblock[id].guarantee = 0;
+ memblock[id].sequence_id = -1;
+ memblock[id].memory_type = MEMORY_CMA;
+
+}
+
+static void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp,
+ int id, int *flag)
+{
+ resp->sequence_id_valid = 1;
+ resp->sequence_id = memblock[id].sequence_id;
+ resp->dhms_mem_alloc_addr_info_valid = 1;
+ resp->dhms_mem_alloc_addr_info_len = 1;
+ resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr;
+ resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size;
+ if (!*flag) {
+ resp->resp.result = QMI_RESULT_SUCCESS_V01;
+ resp->resp.error = QMI_ERR_NONE_V01;
+ } else {
+ resp->resp.result = QMI_RESULT_FAILURE_V01;
+ resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+ }
+
+}
+
+static void initialize_client(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_CLIENTS; i++) {
+ memblock[i].allotted = 0;
+ memblock[i].size = 0;
+ memblock[i].guarantee = 0;
+ memblock[i].phy_addr = 0;
+ memblock[i].virtual_addr = 0;
+ memblock[i].client_id = DHMS_MEM_CLIENT_INVALID;
+ memblock[i].peripheral = -1;
+ memblock[i].sequence_id = -1;
+ memblock[i].memory_type = MEMORY_CMA;
+ memblock[i].free_memory = 0;
+ memblock[i].hyp_mapping = 0;
+ memblock[i].file_created = 0;
+ }
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+}
+
+/*
+ * mem_share_do_ramdump() function initializes the
+ * ramdump segments with the physical address and
+ * size of the memshared clients. Extraction of ramdump
+ * is skipped if memshare client is not allotted
+ * This calls the ramdump api in extracting the
+ * ramdump in elf format.
+ */
+
+static int mem_share_do_ramdump(void)
+{
+ int i = 0, ret;
+ char *client_name = NULL;
+
+ for (i = 0; i < num_clients; i++) {
+
+ struct ramdump_segment *ramdump_segments_tmp = NULL;
+
+ switch (i) {
+ case 0:
+ client_name = "GPS";
+ break;
+ case 1:
+ client_name = "FTM";
+ break;
+ case 2:
+ client_name = "DIAG";
+ break;
+ default:
+ pr_err("memshare: no memshare clients registered\n");
+ return -EINVAL;
+ }
+
+ if (!memblock[i].allotted) {
+ pr_err("memshare:%s memblock is not allotted\n",
+ client_name);
+ continue;
+ }
+
+ ramdump_segments_tmp = kcalloc(1,
+ sizeof(struct ramdump_segment),
+ GFP_KERNEL);
+ if (!ramdump_segments_tmp)
+ return -ENOMEM;
+
+ ramdump_segments_tmp[0].size = memblock[i].size;
+ ramdump_segments_tmp[0].address = memblock[i].phy_addr;
+
+ pr_debug("memshare: %s:%s client:id: %d:size = %d\n",
+ __func__, client_name, i, memblock[i].size);
+
+ ret = do_elf_ramdump(memshare_ramdump_dev[i],
+ ramdump_segments_tmp, 1);
+ kfree(ramdump_segments_tmp);
+ if (ret < 0) {
+ pr_err("memshare: Unable to dump: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
+ void *_cmd)
+{
+ int i;
+ int ret;
+ u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA};
+ int dest_vmids[1] = {VMID_HLOS};
+ int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC};
+ struct notif_data *notifdata = NULL;
+
+ mutex_lock(&memsh_drv->mem_share);
+
+ switch (code) {
+
+ case SUBSYS_BEFORE_SHUTDOWN:
+ bootup_request++;
+ break;
+
+ case SUBSYS_RAMDUMP_NOTIFICATION:
+ ramdump_event = 1;
+ break;
+
+ case SUBSYS_BEFORE_POWERUP:
+ if (_cmd) {
+ notifdata = (struct notif_data *) _cmd;
+ } else {
+ ramdump_event = 0;
+ break;
+ }
+
+ if (notifdata->enable_ramdump && ramdump_event) {
+ pr_debug("memshare: %s, Ramdump collection is enabled\n",
+ __func__);
+ ret = mem_share_do_ramdump();
+ if (ret)
+ pr_err("memshare: Ramdump collection failed\n");
+ ramdump_event = 0;
+ }
+ break;
+
+ case SUBSYS_AFTER_POWERUP:
+ pr_debug("memshare: Modem has booted up\n");
+ for (i = 0; i < MAX_CLIENTS; i++) {
+ if (memblock[i].free_memory > 0 &&
+ bootup_request >= 2) {
+ memblock[i].free_memory -= 1;
+ pr_debug("memshare: free_memory count: %d for client id: %d\n",
+ memblock[i].free_memory,
+ memblock[i].client_id);
+ }
+
+ if (memblock[i].free_memory == 0) {
+ if (memblock[i].peripheral ==
+ DHMS_MEM_PROC_MPSS_V01 &&
+ !memblock[i].guarantee &&
+ memblock[i].allotted) {
+ pr_debug("memshare: hypervisor unmapping for client id: %d\n",
+ memblock[i].client_id);
+ ret = hyp_assign_phys(
+ memblock[i].phy_addr,
+ memblock[i].size,
+ source_vmlist,
+ 2, dest_vmids,
+ dest_perms, 1);
+ if (ret &&
+ memblock[i].hyp_mapping == 1) {
+ /*
+ * This is an error case as hyp
+ * mapping was successful
+ * earlier but during unmap
+ * it lead to failure.
+ */
+ pr_err("memshare: %s, failed to unmap the region\n",
+ __func__);
+ memblock[i].hyp_mapping = 1;
+ } else {
+ memblock[i].hyp_mapping = 0;
+ }
+ dma_free_attrs(memsh_drv->dev,
+ memblock[i].size,
+ memblock[i].virtual_addr,
+ memblock[i].phy_addr,
+ attrs);
+ free_client(i);
+ }
+ }
+ }
+ bootup_request++;
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&memsh_drv->mem_share);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = modem_notifier_cb,
+};
+
+static void shared_hyp_mapping(int client_id)
+{
+ int ret;
+ u32 source_vmlist[1] = {VMID_HLOS};
+ int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA};
+ int dest_perms[2] = {PERM_READ|PERM_WRITE,
+ PERM_READ|PERM_WRITE};
+
+ if (client_id == DHMS_MEM_CLIENT_INVALID) {
+ pr_err("memshare: %s, Invalid Client\n", __func__);
+ return;
+ }
+
+ ret = hyp_assign_phys(memblock[client_id].phy_addr,
+ memblock[client_id].size,
+ source_vmlist, 1, dest_vmids,
+ dest_perms, 2);
+
+ if (ret != 0) {
+ pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
+ memblock[client_id].size, ret);
+ return;
+ }
+ memblock[client_id].hyp_mapping = 1;
+}
+
+static int handle_alloc_req(void *req_h, void *req, void *conn_h)
+{
+ struct mem_alloc_req_msg_v01 *alloc_req;
+ struct mem_alloc_resp_msg_v01 alloc_resp;
+ int rc = 0;
+
+ mutex_lock(&memsh_drv->mem_share);
+ alloc_req = (struct mem_alloc_req_msg_v01 *)req;
+ pr_debug("memshare: %s: Received Alloc Request: alloc_req->num_bytes = %d\n",
+ __func__, alloc_req->num_bytes);
+ if (!memblock[GPS].size) {
+ memset(&alloc_resp, 0, sizeof(alloc_resp));
+ alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+ rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes,
+ &memblock[GPS]);
+ }
+ alloc_resp.num_bytes_valid = 1;
+ alloc_resp.num_bytes = alloc_req->num_bytes;
+ alloc_resp.handle_valid = 1;
+ alloc_resp.handle = memblock[GPS].phy_addr;
+ if (rc) {
+ alloc_resp.resp = QMI_RESULT_FAILURE_V01;
+ memblock[GPS].size = 0;
+ } else {
+ alloc_resp.resp = QMI_RESULT_SUCCESS_V01;
+ }
+
+ mutex_unlock(&memsh_drv->mem_share);
+
+ pr_debug("memshare: %s, alloc_resp.num_bytes :%d, alloc_resp.resp :%lx\n",
+ __func__, alloc_resp.num_bytes,
+ (unsigned long int)alloc_resp.resp);
+ rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+ &mem_share_svc_alloc_resp_desc, &alloc_resp,
+ sizeof(alloc_resp));
+ if (rc < 0)
+ pr_err("memshare: %s, Error sending the alloc request: %d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h)
+{
+ struct mem_alloc_generic_req_msg_v01 *alloc_req;
+ struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
+ int rc, resp = 0;
+ int client_id;
+ uint32_t size = 0;
+
+ mutex_lock(&memsh_drv->mem_share);
+ alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req;
+ pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
+ alloc_req->client_id, alloc_req->proc_id);
+ alloc_resp = kzalloc(sizeof(*alloc_resp),
+ GFP_KERNEL);
+ if (!alloc_resp) {
+ mutex_unlock(&memsh_drv->mem_share);
+ return -ENOMEM;
+ }
+ alloc_resp->resp.result = QMI_RESULT_FAILURE_V01;
+ alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01;
+ client_id = check_client(alloc_req->client_id, alloc_req->proc_id,
+ CHECK);
+
+ if (client_id >= MAX_CLIENTS) {
+ pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+ __func__, alloc_req->client_id,
+ alloc_req->proc_id);
+ kfree(alloc_resp);
+ alloc_resp = NULL;
+ mutex_unlock(&memsh_drv->mem_share);
+ return -EINVAL;
+ }
+
+ if (!memblock[client_id].allotted) {
+ if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0)
+ size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
+ else
+ size = alloc_req->num_bytes;
+ rc = memshare_alloc(memsh_drv->dev, size,
+ &memblock[client_id]);
+ if (rc) {
+ pr_err("memshare: %s,Unable to allocate memory for requested client\n",
+ __func__);
+ resp = 1;
+ }
+ if (!resp) {
+ memblock[client_id].free_memory += 1;
+ memblock[client_id].allotted = 1;
+ memblock[client_id].size = alloc_req->num_bytes;
+ memblock[client_id].peripheral = alloc_req->proc_id;
+ }
+ }
+ pr_debug("memshare: In %s, free memory count for client id: %d = %d",
+ __func__, memblock[client_id].client_id,
+ memblock[client_id].free_memory);
+
+ memblock[client_id].sequence_id = alloc_req->sequence_id;
+
+ fill_alloc_response(alloc_resp, client_id, &resp);
+ /*
+ * Perform the Hypervisor mapping in order to avoid XPU viloation
+ * to the allocated region for Modem Clients
+ */
+ if (!memblock[client_id].hyp_mapping &&
+ memblock[client_id].allotted)
+ shared_hyp_mapping(client_id);
+ mutex_unlock(&memsh_drv->mem_share);
+ pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n",
+ alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
+ (unsigned long int)alloc_resp->resp.result);
+ rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+ &mem_share_svc_alloc_generic_resp_desc, alloc_resp,
+ sizeof(alloc_resp));
+
+ if (rc < 0)
+ pr_err("memshare: %s, Error sending the alloc request: %d\n",
+ __func__, rc);
+
+ kfree(alloc_resp);
+ alloc_resp = NULL;
+ return rc;
+}
+
+static int handle_free_req(void *req_h, void *req, void *conn_h)
+{
+ struct mem_free_req_msg_v01 *free_req;
+ struct mem_free_resp_msg_v01 free_resp;
+ int rc;
+
+ mutex_lock(&memsh_drv->mem_free);
+ if (!memblock[GPS].guarantee) {
+ free_req = (struct mem_free_req_msg_v01 *)req;
+ pr_debug("memshare: %s: Received Free Request\n", __func__);
+ memset(&free_resp, 0, sizeof(free_resp));
+ dma_free_coherent(memsh_drv->dev, memblock[GPS].size,
+ memblock[GPS].virtual_addr,
+ free_req->handle);
+ }
+ free_resp.resp = QMI_RESULT_SUCCESS_V01;
+ mutex_unlock(&memsh_drv->mem_free);
+ rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+ &mem_share_svc_free_resp_desc, &free_resp,
+ sizeof(free_resp));
+ if (rc < 0)
+ pr_err("memshare: %s, Error sending the free request: %d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
+{
+ struct mem_free_generic_req_msg_v01 *free_req;
+ struct mem_free_generic_resp_msg_v01 free_resp;
+ int rc;
+ int flag = 0;
+ uint32_t client_id;
+
+ mutex_lock(&memsh_drv->mem_free);
+ free_req = (struct mem_free_generic_req_msg_v01 *)req;
+ pr_debug("memshare: %s: Received Free Request\n", __func__);
+ memset(&free_resp, 0, sizeof(free_resp));
+ free_resp.resp.error = QMI_ERR_INTERNAL_V01;
+ free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+ pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id,
+ free_req->proc_id);
+ client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
+ if (client_id == DHMS_MEM_CLIENT_INVALID) {
+ pr_err("memshare: %s, Invalid client request to free memory\n",
+ __func__);
+ flag = 1;
+ } else if (!memblock[client_id].guarantee &&
+ memblock[client_id].allotted) {
+ pr_debug("memshare: %s: size: %d",
+ __func__, memblock[client_id].size);
+ dma_free_attrs(memsh_drv->dev, memblock[client_id].size,
+ memblock[client_id].virtual_addr,
+ memblock[client_id].phy_addr,
+ attrs);
+ free_client(client_id);
+ } else {
+ pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
+ __func__);
+ }
+
+ if (flag) {
+ free_resp.resp.result = QMI_RESULT_FAILURE_V01;
+ free_resp.resp.error = QMI_ERR_INVALID_ID_V01;
+ } else {
+ free_resp.resp.result = QMI_RESULT_SUCCESS_V01;
+ free_resp.resp.error = QMI_ERR_NONE_V01;
+ }
+
+ mutex_unlock(&memsh_drv->mem_free);
+ rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+ &mem_share_svc_free_generic_resp_desc, &free_resp,
+ sizeof(free_resp));
+
+ if (rc < 0)
+ pr_err("memshare: %s, Error sending the free request: %d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int handle_query_size_req(void *req_h, void *req, void *conn_h)
+{
+ int rc, client_id;
+ struct mem_query_size_req_msg_v01 *query_req;
+ struct mem_query_size_rsp_msg_v01 *query_resp;
+
+ mutex_lock(&memsh_drv->mem_share);
+ query_req = (struct mem_query_size_req_msg_v01 *)req;
+ query_resp = kzalloc(sizeof(*query_resp),
+ GFP_KERNEL);
+ if (!query_resp) {
+ mutex_unlock(&memsh_drv->mem_share);
+ return -ENOMEM;
+ }
+ pr_debug("memshare: query request client id: %d proc _id: %d\n",
+ query_req->client_id, query_req->proc_id);
+ client_id = check_client(query_req->client_id, query_req->proc_id,
+ CHECK);
+
+ if (client_id >= MAX_CLIENTS) {
+ pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
+ __func__, query_req->client_id,
+ query_req->proc_id);
+ kfree(query_resp);
+ query_resp = NULL;
+ mutex_unlock(&memsh_drv->mem_share);
+ return -EINVAL;
+ }
+
+ if (memblock[client_id].size) {
+ query_resp->size_valid = 1;
+ query_resp->size = memblock[client_id].size;
+ } else {
+ query_resp->size_valid = 1;
+ query_resp->size = 0;
+ }
+ query_resp->resp.result = QMI_RESULT_SUCCESS_V01;
+ query_resp->resp.error = QMI_ERR_NONE_V01;
+ mutex_unlock(&memsh_drv->mem_share);
+
+ pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n",
+ query_resp->size,
+ (unsigned long int)query_resp->resp.result);
+ rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h,
+ &mem_share_svc_size_query_resp_desc, query_resp,
+ sizeof(query_resp));
+
+ if (rc < 0)
+ pr_err("memshare: %s, Error sending the query request: %d\n",
+ __func__, rc);
+
+ kfree(query_resp);
+ query_resp = NULL;
+ return rc;
+}
+
+static int mem_share_svc_connect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (mem_share_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mem_share_svc_disconnect_cb(struct qmi_handle *handle,
+ void *conn_h)
+{
+ if (mem_share_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mem_share_svc_req_desc_cb(unsigned int msg_id,
+ struct msg_desc **req_desc)
+{
+ int rc;
+
+ pr_debug("memshare: %s\n", __func__);
+ switch (msg_id) {
+ case MEM_ALLOC_REQ_MSG_V01:
+ *req_desc = &mem_share_svc_alloc_req_desc;
+ rc = sizeof(struct mem_alloc_req_msg_v01);
+ break;
+
+ case MEM_FREE_REQ_MSG_V01:
+ *req_desc = &mem_share_svc_free_req_desc;
+ rc = sizeof(struct mem_free_req_msg_v01);
+ break;
+
+ case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+ *req_desc = &mem_share_svc_alloc_generic_req_desc;
+ rc = sizeof(struct mem_alloc_generic_req_msg_v01);
+ break;
+
+ case MEM_FREE_GENERIC_REQ_MSG_V01:
+ *req_desc = &mem_share_svc_free_generic_req_desc;
+ rc = sizeof(struct mem_free_generic_req_msg_v01);
+ break;
+
+ case MEM_QUERY_SIZE_REQ_MSG_V01:
+ *req_desc = &mem_share_svc_size_query_req_desc;
+ rc = sizeof(struct mem_query_size_req_msg_v01);
+ break;
+
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+ void *req_h, unsigned int msg_id, void *req)
+{
+ int rc;
+
+ pr_debug("memshare: %s\n", __func__);
+ if (mem_share_svc_handle != handle || !conn_h)
+ return -EINVAL;
+
+ switch (msg_id) {
+ case MEM_ALLOC_REQ_MSG_V01:
+ rc = handle_alloc_req(req_h, req, conn_h);
+ break;
+
+ case MEM_FREE_REQ_MSG_V01:
+ rc = handle_free_req(req_h, req, conn_h);
+ break;
+
+ case MEM_ALLOC_GENERIC_REQ_MSG_V01:
+ rc = handle_alloc_generic_req(req_h, req, conn_h);
+ break;
+
+ case MEM_FREE_GENERIC_REQ_MSG_V01:
+ rc = handle_free_generic_req(req_h, req, conn_h);
+ break;
+
+ case MEM_QUERY_SIZE_REQ_MSG_V01:
+ rc = handle_query_size_req(req_h, req, conn_h);
+ break;
+
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+ return rc;
+}
+
+static void mem_share_svc_recv_msg(struct work_struct *work)
+{
+ int rc;
+
+ pr_debug("memshare: %s\n", __func__);
+ do {
+ rc = qmi_recv_msg(mem_share_svc_handle);
+ pr_debug("memshare: %s: Notified about a Receive Event",
+ __func__);
+ } while (!rc);
+
+ if (rc != -ENOMSG)
+ pr_err("memshare: %s: Error = %d while receiving message\n",
+ __func__, rc);
+}
+
+static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv)
+{
+ pr_debug("memshare: %s\n", __func__);
+
+ if (event == QMI_RECV_MSG)
+ queue_delayed_work(mem_share_svc_workqueue,
+ &work_recv_msg, 0);
+}
+
+static struct qmi_svc_ops_options mem_share_svc_ops_options = {
+ .version = 1,
+ .service_id = MEM_SHARE_SERVICE_SVC_ID,
+ .service_vers = MEM_SHARE_SERVICE_VERS,
+ .service_ins = MEM_SHARE_SERVICE_INS_ID,
+ .connect_cb = mem_share_svc_connect_cb,
+ .disconnect_cb = mem_share_svc_disconnect_cb,
+ .req_desc_cb = mem_share_svc_req_desc_cb,
+ .req_cb = mem_share_svc_req_cb,
+};
+
+int memshare_alloc(struct device *dev,
+ unsigned int block_size,
+ struct mem_blocks *pblk)
+{
+ pr_debug("memshare: %s", __func__);
+
+ if (!pblk) {
+ pr_err("memshare: %s: Failed memory block allocation\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ pblk->virtual_addr = dma_alloc_attrs(dev, block_size,
+ &pblk->phy_addr, GFP_KERNEL,
+ attrs);
+ if (pblk->virtual_addr == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void memshare_init_worker(struct work_struct *work)
+{
+ int rc;
+
+ mem_share_svc_workqueue =
+ create_singlethread_workqueue("mem_share_svc");
+ if (!mem_share_svc_workqueue)
+ return;
+
+ mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL);
+ if (!mem_share_svc_handle) {
+ pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n",
+ __func__);
+ destroy_workqueue(mem_share_svc_workqueue);
+ return;
+ }
+ rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options);
+ if (rc < 0) {
+ pr_err("memshare: %s: Registering mem share svc failed %d\n",
+ __func__, rc);
+ qmi_handle_destroy(mem_share_svc_handle);
+ destroy_workqueue(mem_share_svc_workqueue);
+ return;
+ }
+ pr_debug("memshare: memshare_init successful\n");
+}
+
+static int memshare_child_probe(struct platform_device *pdev)
+{
+ int rc;
+ uint32_t size, client_id;
+ const char *name;
+ struct memshare_child *drv;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child),
+ GFP_KERNEL);
+
+ if (!drv)
+ return -ENOMEM;
+
+ drv->dev = &pdev->dev;
+ memsh_child = drv;
+ platform_set_drvdata(pdev, memsh_child);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
+ &size);
+ if (rc) {
+ pr_err("memshare: %s, Error reading size of clients, rc: %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
+ &client_id);
+ if (rc) {
+ pr_err("memshare: %s, Error reading client id, rc: %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ memblock[num_clients].guarantee = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,allocate-boot-time");
+
+ rc = of_property_read_string(pdev->dev.of_node, "label",
+ &name);
+ if (rc) {
+ pr_err("memshare: %s, Error reading peripheral info for client, rc: %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (strcmp(name, "modem") == 0)
+ memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01;
+ else if (strcmp(name, "adsp") == 0)
+ memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01;
+ else if (strcmp(name, "wcnss") == 0)
+ memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01;
+
+ memblock[num_clients].size = size;
+ memblock[num_clients].client_id = client_id;
+
+ /*
+ * Memshare allocation for guaranteed clients
+ */
+ if (memblock[num_clients].guarantee && size > 0) {
+ if (client_id == 1)
+ size += MEMSHARE_GUARD_BYTES;
+ rc = memshare_alloc(memsh_child->dev,
+ size,
+ &memblock[num_clients]);
+ if (rc) {
+ pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
+ __func__, rc);
+ return rc;
+ }
+ memblock[num_clients].allotted = 1;
+ shared_hyp_mapping(num_clients);
+ }
+
+ /*
+ * call for creating ramdump dev handlers for
+ * memshare clients
+ */
+
+ memshare_dev[num_clients] = &pdev->dev;
+
+ if (!memblock[num_clients].file_created) {
+ rc = mem_share_configure_ramdump(num_clients);
+ if (rc)
+ pr_err("memshare: %s, cannot collect dumps for client id: %d\n",
+ __func__,
+ memblock[num_clients].client_id);
+ else
+ memblock[num_clients].file_created = 1;
+ }
+
+ num_clients++;
+
+ return 0;
+}
+
+static int memshare_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct memshare_driver *drv;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
+ GFP_KERNEL);
+
+ if (!drv)
+ return -ENOMEM;
+
+ /* Memory allocation has been done successfully */
+ mutex_init(&drv->mem_free);
+ mutex_init(&drv->mem_share);
+
+ INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
+ schedule_work(&drv->memshare_init_work);
+
+ drv->dev = &pdev->dev;
+ memsh_drv = drv;
+ platform_set_drvdata(pdev, memsh_drv);
+ initialize_client();
+ num_clients = 0;
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ &pdev->dev);
+
+ if (rc) {
+ pr_err("memshare: %s, error populating the devices\n",
+ __func__);
+ return rc;
+ }
+
+ subsys_notif_register_notifier("modem", &nb);
+ pr_debug("memshare: %s, Memshare inited\n", __func__);
+
+ return 0;
+}
+
+static int memshare_remove(struct platform_device *pdev)
+{
+ if (!memsh_drv)
+ return 0;
+
+ qmi_svc_unregister(mem_share_svc_handle);
+ flush_workqueue(mem_share_svc_workqueue);
+ qmi_handle_destroy(mem_share_svc_handle);
+ destroy_workqueue(mem_share_svc_workqueue);
+
+ return 0;
+}
+
+static int memshare_child_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id memshare_match_table[] = {
+ {
+ .compatible = "qcom,memshare",
+ },
+ {}
+};
+
+static const struct of_device_id memshare_match_table1[] = {
+ {
+ .compatible = "qcom,memshare-peripheral",
+ },
+ {}
+};
+
+
+static struct platform_driver memshare_pdriver = {
+ .probe = memshare_probe,
+ .remove = memshare_remove,
+ .driver = {
+ .name = MEMSHARE_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = memshare_match_table,
+ },
+};
+
+static struct platform_driver memshare_pchild = {
+ .probe = memshare_child_probe,
+ .remove = memshare_child_remove,
+ .driver = {
+ .name = MEMSHARE_CHILD_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = memshare_match_table1,
+ },
+};
+
+module_platform_driver(memshare_pdriver);
+module_platform_driver(memshare_pchild);
+
+MODULE_DESCRIPTION("Mem Share QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
new file mode 100644
index 0000000..ca11137
--- /dev/null
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MEM_SHARE_H
+#define _LINUX_MEM_SHARE_H
+
+#define MEM_SHARE_SERVICE_SVC_ID 0x00000034
+#define MEM_SHARE_SERVICE_INS_ID 1
+#define MEM_SHARE_SERVICE_VERS 1
+
+#define MEMORY_CMA 1
+#define MEMORY_NON_CMA 0
+#define MAX_CLIENTS 10
+#define GPS 0
+#define CHECK 0
+#define FREE 1
+#define MEMSHARE_GUARD_BYTES (4*1024)
+
+struct mem_blocks {
+ /* Client Id information */
+ uint32_t client_id;
+ /* Peripheral associated with client */
+ uint32_t peripheral;
+ /* Sequence Id */
+ uint32_t sequence_id;
+ /* CMA or Non-CMA region */
+ uint32_t memory_type;
+ /* Guaranteed Memory */
+ uint32_t guarantee;
+ /* Memory alloted or not */
+ uint32_t allotted;
+ /* Size required for client */
+ uint32_t size;
+ /*
+ * start address of the memory block reserved by server memory
+ * subsystem to client
+ */
+ phys_addr_t phy_addr;
+ /* Virtual address for the physical address allocated */
+ void *virtual_addr;
+ /* Release memory only when XPU is released*/
+ uint8_t free_memory;
+ /* Need Hypervisor mapping*/
+ uint8_t hyp_mapping;
+ /* Status flag which checks if ramdump file is created*/
+ int file_created;
+
+};
+
+int memshare_alloc(struct device *dev,
+ unsigned int block_size,
+ struct mem_blocks *pblk);
+void memshare_free(unsigned int block_size,
+ struct mem_blocks *pblk);
+#endif /* _LINUX_MEM_SHARE_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index e38c53e..aa6c5d7 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -40,8 +40,6 @@
};
static struct handle_type handle_list;
-static LIST_HEAD(input_list);
-static LIST_HEAD(apply_list);
static LIST_HEAD(commit_list);
static LIST_HEAD(late_init_clist);
static LIST_HEAD(query_list);
@@ -780,84 +778,21 @@
return;
}
-static void del_inp_list(struct list_head *list)
-{
- struct rule_update_path_info *rule_node;
- struct rule_update_path_info *rule_node_tmp;
-
- list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
- list_del(&rule_node->link);
- rule_node->added = false;
- }
-}
-
-static void del_op_list(struct list_head *list)
-{
- struct rule_apply_rcm_info *rule;
- struct rule_apply_rcm_info *rule_tmp;
-
- list_for_each_entry_safe(rule, rule_tmp, list, link)
- list_del(&rule->link);
-}
-
-static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
-{
- struct rule_apply_rcm_info *rule;
- struct device *dev = NULL;
- struct msm_bus_node_device_type *dev_info = NULL;
- int ret = 0;
-
- list_for_each_entry(rule, list, link) {
- if (!rule)
- continue;
-
- if (rule && (rule->after_clk_commit != after_clk_commit))
- continue;
-
- dev = bus_find_device(&msm_bus_type, NULL,
- (void *) &rule->id,
- msm_bus_device_match_adhoc);
-
- if (!dev) {
- MSM_BUS_ERR("Can't find dev node for %d", rule->id);
- continue;
- }
- dev_info = to_msm_bus_node(dev);
-
- ret = msm_bus_enable_limiter(dev_info, rule->throttle,
- rule->lim_bw);
- if (ret)
- MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
- }
-
- return ret;
-}
-
static void commit_data(void)
{
- bool rules_registered = msm_rule_are_rules_registered();
-
- if (rules_registered) {
- msm_rules_update_path(&input_list, &apply_list);
- msm_bus_apply_rules(&apply_list, false);
- }
-
msm_bus_commit_data(&commit_list);
-
- if (rules_registered) {
- msm_bus_apply_rules(&apply_list, true);
- del_inp_list(&input_list);
- del_op_list(&apply_list);
- }
- INIT_LIST_HEAD(&input_list);
- INIT_LIST_HEAD(&apply_list);
INIT_LIST_HEAD(&commit_list);
}
-int commit_late_init_data(void)
+int commit_late_init_data(bool lock)
{
int rc;
- rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (lock) {
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ return 0;
+ }
+
rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
bcm_remove_handoff_req);
@@ -904,8 +839,6 @@
struct msm_bus_node_device_type *dev_info = NULL;
int curr_idx;
int ret = 0;
- struct rule_update_path_info *rule_node;
- bool rules_registered = msm_rule_are_rules_registered();
if (IS_ERR_OR_NULL(src_dev)) {
MSM_BUS_ERR("%s: No source device", __func__);
@@ -953,19 +886,6 @@
add_node_to_clist(dev_info);
- if (rules_registered) {
- rule_node = &dev_info->node_info->rule;
- rule_node->id = dev_info->node_info->id;
- rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
- rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
- rule_node->clk =
- dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
- if (!rule_node->added) {
- list_add_tail(&rule_node->link, &input_list);
- rule_node->added = true;
- }
- }
-
next_dev = lnode->next_dev;
curr_idx = lnode->next;
}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
index 015edb3..df29233 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -38,6 +38,7 @@
static struct dentry *clients;
static struct dentry *dir;
static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
struct msm_bus_dbg_state {
uint32_t cl;
uint8_t enable;
@@ -289,7 +290,9 @@
struct msm_bus_cldata *cldata = NULL;
const struct msm_bus_client_handle *handle = file->private_data;
int found = 0;
+ ssize_t ret;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if ((cldata->clid == cl) ||
(cldata->handle && (cldata->handle == handle))) {
@@ -298,12 +301,17 @@
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
+ }
bsize = cldata->size;
- return simple_read_from_buffer(buf, count, ppos,
+ ret = simple_read_from_buffer(buf, count, ppos,
cldata->buffer, bsize);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ return ret;
}
static int client_data_open(struct inode *inode, struct file *file)
@@ -339,7 +347,9 @@
return -ENOMEM;
}
cldata->handle = pdata;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
}
@@ -352,6 +362,7 @@
bool found = false;
char *buf = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->handle == pdata) {
found = true;
@@ -359,12 +370,15 @@
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -ENOENT;
+ }
if (cldata->file == NULL) {
if (pdata->name == NULL) {
MSM_BUS_DBG("Client doesn't have a name\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -EINVAL;
}
cldata->file = debugfs_create_file(pdata->name, S_IRUGO,
@@ -393,6 +407,7 @@
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ib);
i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
cldata->size = i;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
pdata->name, pdata->mas, pdata->slv, ab, ib);
@@ -404,6 +419,7 @@
{
struct msm_bus_cldata *cldata = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->handle == pdata) {
debugfs_remove(cldata->file);
@@ -412,6 +428,7 @@
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
}
static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
@@ -429,7 +446,9 @@
cldata->clid = clid;
cldata->file = file;
cldata->size = 0;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return 0;
}
@@ -437,13 +456,16 @@
{
struct msm_bus_cldata *cldata = NULL;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->clid == clid) {
+ debugfs_remove(cldata->file);
list_del(&cldata->list);
kfree(cldata);
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
}
static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
@@ -455,6 +477,7 @@
struct timespec ts;
int found = 0;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->clid == clid) {
found = 1;
@@ -462,11 +485,14 @@
}
}
- if (!found)
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
return -ENOENT;
+ }
if (cldata->file == NULL) {
if (pdata->name == NULL) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
MSM_BUS_DBG("Client doesn't have a name\n");
return -EINVAL;
}
@@ -514,21 +540,11 @@
cldata->index = index;
cldata->size = i;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
return i;
}
-static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index)
-{
- int ret = 0;
-
- if ((index < 0) || (index > cldata->pdata->num_usecases)) {
- MSM_BUS_DBG("Invalid index!\n");
- return -EINVAL;
- }
- ret = msm_bus_scale_client_update_request(cldata->clid, index);
- return ret;
-}
-
static ssize_t msm_bus_dbg_update_request_write(struct file *file,
const char __user *ubuf, size_t cnt, loff_t *ppos)
{
@@ -538,19 +554,26 @@
char *chid;
char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
int found = 0;
+ uint32_t clid;
+ ssize_t res = cnt;
if (!buf || IS_ERR(buf)) {
MSM_BUS_ERR("Memory allocation for buffer failed\n");
return -ENOMEM;
}
- if (cnt == 0)
- return 0;
- if (copy_from_user(buf, ubuf, cnt))
- return -EFAULT;
+ if (cnt == 0) {
+ res = 0;
+ goto out;
+ }
+ if (copy_from_user(buf, ubuf, cnt)) {
+ res = -EFAULT;
+ goto out;
+ }
buf[cnt] = '\0';
chid = buf;
MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (strnstr(chid, cldata->pdata->name, cnt)) {
found = 1;
@@ -559,23 +582,37 @@
if (chid) {
ret = kstrtoul(chid, 10, &index);
if (ret) {
- MSM_BUS_DBG("Index conversion failed\n"
- );
- return -EFAULT;
+ MSM_BUS_DBG("Index conversion\n"
+ " failed\n");
+ rt_mutex_unlock(
+ &msm_bus_dbg_cllist_lock);
+ res = -EFAULT;
+ goto out;
}
} else {
- MSM_BUS_DBG("Error parsing input.\n"
- "Index not found\n");
+ MSM_BUS_DBG("Error parsing input. Index not\n"
+ " found\n");
found = 0;
}
+ if ((index < 0) ||
+ (index > cldata->pdata->num_usecases)) {
+ MSM_BUS_DBG("Invalid index!\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ res = -EINVAL;
+ goto out;
+ }
+ clid = cldata->clid;
break;
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
if (found)
- msm_bus_dbg_update_request(cldata, index);
+ msm_bus_scale_client_update_request(clid, index);
+
+out:
kfree(buf);
- return cnt;
+ return res;
}
/**
@@ -598,8 +635,10 @@
break;
}
}
- if (!found)
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
return -ENOENT;
+ }
bsize = fablist->size;
ret = simple_read_from_buffer(buf, count, ppos,
fablist->buffer, bsize);
@@ -689,8 +728,10 @@
break;
}
}
- if (!found)
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
return -ENOENT;
+ }
if (fablist->file == NULL) {
MSM_BUS_DBG("Fabric dbg entry does not exist\n");
@@ -741,6 +782,8 @@
"\nDumping curent client votes to trace log\n");
if (*ppos)
goto exit_dump_clients_read;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (IS_ERR_OR_NULL(cldata->pdata))
continue;
@@ -756,6 +799,7 @@
cldata->pdata->active_only);
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
exit_dump_clients_read:
return simple_read_from_buffer(buf, count, ppos, msg, cnt);
}
@@ -880,6 +924,7 @@
goto err;
}
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry(cldata, &cl_list, list) {
if (cldata->pdata) {
if (cldata->pdata->name == NULL) {
@@ -899,6 +944,7 @@
&client_data_fops);
}
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
if (debugfs_create_file("dump_clients", S_IRUGO | S_IWUSR,
clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
@@ -911,6 +957,7 @@
if (fablist->file == NULL) {
MSM_BUS_DBG("Cannot create files for commit data\n");
kfree(rules_buf);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
goto err;
}
}
@@ -930,10 +977,14 @@
struct msm_bus_cldata *cldata = NULL, *cldata_temp;
debugfs_remove_recursive(dir);
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
list_del(&cldata->list);
kfree(cldata);
}
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
mutex_lock(&msm_bus_dbg_fablist_lock);
list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
list_del(&fablist->list);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index 3f8b52c..6c69bec 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@
return 0;
}
- if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+ if (sscanf(buf, "%9s %llu", name, &vote_khz) != 2) {
pr_err("%s:return error", __func__);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index b331e74..007b353 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -386,6 +386,10 @@
tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
cur_bcm->node_vec[ACTIVE_CTX].vec_a,
cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
+
+ if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE)
+ commit = false;
+
tcs_cmd_gen(cur_bcm, &cmdlist_sleep[k],
cur_bcm->node_vec[DUAL_CTX].vec_a,
cur_bcm->node_vec[DUAL_CTX].vec_b, commit);
@@ -555,6 +559,8 @@
int cnt_sleep = 0;
int i = 0;
+ if (!clist)
+ return ret;
list_for_each_entry_safe(node, node_tmp, clist, link) {
if (unlikely(node->node_info->defer_qos))
@@ -590,8 +596,16 @@
}
n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_active)
+ return -ENOMEM;
+
n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_wake)
+ return -ENOMEM;
+
n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
+ if (!n_sleep)
+ return -ENOMEM;
if (cnt_active)
cmdlist_active = kcalloc(cnt_active, sizeof(struct tcs_cmd),
@@ -606,18 +620,41 @@
cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
ret = rpmh_invalidate(cur_mbox);
- if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE)
+ if (ret)
+ MSM_BUS_ERR("%s: Error invalidating mbox: %d\n",
+ __func__, ret);
+
+ if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE) {
ret = rpmh_write(cur_mbox, cur_rsc->rscdev->req_state,
cmdlist_active, cnt_active);
- else
+ /*
+ * Ignore -EBUSY from rpmh_write if it's an AWAKE_STATE
+ * request since AWAKE requests are invalid when
+ * the display RSC is in solver mode and the bus driver
+ * does not know the current state of the display RSC.
+ */
+ if (ret && ret != -EBUSY)
+ MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+ __func__, ret);
+ } else {
ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
cmdlist_active, n_active);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+ __func__, ret);
+ }
ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
cmdlist_wake, n_wake);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending wake sets: %d\n",
+ __func__, ret);
ret = rpmh_write_passthru(cur_mbox, RPMH_SLEEP_STATE,
cmdlist_sleep, n_sleep);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
+ __func__, ret);
list_for_each_entry_safe(node, node_tmp, clist, link) {
bcm_clist_clean(node);
@@ -740,18 +777,16 @@
static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
{
- struct msm_bus_node_device_type *bus_node = NULL;
int i;
int ret = 0;
- if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+ if (!node) {
ret = -ENXIO;
goto exit_disable_node_qos_clk;
}
- bus_node = to_msm_bus_node(node->node_info->bus_device);
- for (i = 0; i < bus_node->num_node_qos_clks; i++)
- ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+ for (i = 0; i < node->num_node_qos_clks; i++)
+ ret = disable_nodeclk(&node->node_qos_clks[i]);
exit_disable_node_qos_clk:
return ret;
@@ -760,7 +795,7 @@
static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
{
int i;
- int ret;
+ int ret = 0;
long rounded_rate;
for (i = 0; i < node->num_node_qos_clks; i++) {
@@ -1339,7 +1374,7 @@
node_info->bcm_dev_ids = devm_kzalloc(bus_dev,
sizeof(int) * pdata_node_info->num_bcm_devs,
GFP_KERNEL);
- if (!node_info->bcm_devs) {
+ if (!node_info->bcm_dev_ids) {
MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
devm_kfree(bus_dev, node_info->bcm_devs);
ret = -ENOMEM;
@@ -1363,7 +1398,7 @@
node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
sizeof(int) * pdata_node_info->num_rsc_devs,
GFP_KERNEL);
- if (!node_info->rsc_devs) {
+ if (!node_info->rsc_dev_ids) {
MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
devm_kfree(bus_dev, node_info->rsc_devs);
ret = -ENOMEM;
@@ -1807,9 +1842,10 @@
int __init msm_bus_device_late_init(void)
{
+ commit_late_init_data(true);
MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
init_time = false;
- return commit_late_init_data();
+ return commit_late_init_data(false);
}
subsys_initcall(msm_bus_device_init_driver);
late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
index 42a6f58..77cbbf1 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -47,7 +47,7 @@
}
arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+ if (ZERO_OR_NULL_PTR(arr)) {
dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
prop);
return NULL;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 17657e5..ad04fef 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -224,7 +224,7 @@
int throttle_en, uint64_t lim_bw);
int msm_bus_commit_data(struct list_head *clist);
int bcm_remove_handoff_req(struct device *dev, void *data);
-int commit_late_init_data(void);
+int commit_late_init_data(bool lock);
int msm_bus_query_gen(struct list_head *qlist,
struct msm_bus_tcs_usecase *tcs_usecase);
void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index 25e6a9d..979c628 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/kthread.h>
+#include <soc/qcom/msm-core.h>
static struct mutex managed_cpus_lock;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index cfa4ca9..91c9441 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -934,13 +934,13 @@
priv->region_start),
VMID_HLOS);
}
+ if (desc->clear_fw_region && priv->region_start)
+ pil_clear_segment(desc);
dma_free_attrs(desc->dev, priv->region_size,
priv->region, priv->region_start,
desc->attrs);
priv->region = NULL;
}
- if (desc->clear_fw_region && priv->region_start)
- pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 4a586ac..20b9769 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -677,7 +677,15 @@
/* Load the MBA image into memory */
count = fw->size;
- memcpy(mba_dp_virt, data, count);
+ if (count <= SZ_1M) {
+ /* Ensures memcpy is done for max 1MB fw size */
+ memcpy(mba_dp_virt, data, count);
+ } else {
+ dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_mba_data;
+ }
/* Ensure memcpy of the MBA memory is done before loading the DP */
wmb();
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
index 67a5e05..e4ada03 100644
--- a/drivers/soc/qcom/qbt1000.c
+++ b/drivers/soc/qcom/qbt1000.c
@@ -342,6 +342,13 @@
goto end;
}
+ if (strcmp(app.name, FP_APP_NAME)) {
+ dev_err(drvdata->dev, "%s: Invalid app name\n",
+ __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
if (drvdata->app_handle) {
dev_err(drvdata->dev, "%s: LOAD app already loaded, unloading first\n",
__func__);
@@ -388,9 +395,7 @@
pr_debug("app %s load after\n", app.name);
- if (!strcmp(app.name, FP_APP_NAME))
- drvdata->fp_app_handle = drvdata->app_handle;
-
+ drvdata->fp_app_handle = drvdata->app_handle;
break;
}
case QBT1000_UNLOAD_APP:
diff --git a/drivers/soc/qcom/qmp-debugfs-client.c b/drivers/soc/qcom/qmp-debugfs-client.c
new file mode 100644
index 0000000..578e7f0
--- /dev/null
+++ b/drivers/soc/qcom/qmp-debugfs-client.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mailbox_client.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/qmp.h>
+#include <linux/uaccess.h>
+
+#define MAX_MSG_SIZE 96 /* Imposed by the remote*/
+
+static struct mbox_chan *chan;
+static struct mbox_client *cl;
+
+static ssize_t aop_msg_write(struct file *file, const char __user *userstr,
+ size_t len, loff_t *pos)
+{
+ char buf[MAX_MSG_SIZE + 1] = {0};
+ struct qmp_pkt pkt;
+ int rc;
+
+ if (!len || (len > MAX_MSG_SIZE))
+ return len;
+
+ rc = copy_from_user(buf, userstr, len);
+ if (rc) {
+ pr_err("%s copy from user failed, rc=%d\n", __func__, rc);
+ return len;
+ }
+
+ /*
+ * Controller expects a 4 byte aligned buffer
+ */
+ pkt.size = (len + 0x3) & ~0x3;
+ pkt.data = buf;
+
+ if (mbox_send_message(chan, &pkt) < 0)
+ pr_err("Failed to send qmp request\n");
+
+ return len;
+}
+
+static const struct file_operations aop_msg_fops = {
+ .write = aop_msg_write,
+};
+
+static int qmp_msg_probe(struct platform_device *pdev)
+{
+ struct dentry *file;
+
+ cl = devm_kzalloc(&pdev->dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = &pdev->dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+
+ chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(chan)) {
+ dev_err(&pdev->dev, "Failed to mbox channel\n");
+ return PTR_ERR(chan);
+ }
+
+ file = debugfs_create_file("aop_send_message", 0220, NULL, NULL,
+ &aop_msg_fops);
+ if (!file)
+ goto err;
+ return 0;
+err:
+ mbox_free_channel(chan);
+ chan = NULL;
+ return -ENOMEM;
+}
+
+static const struct of_device_id aop_qmp_match_tbl[] = {
+ {.compatible = "qcom,debugfs-qmp-client"},
+ {},
+};
+
+static struct platform_driver aop_qmp_msg_driver = {
+ .probe = qmp_msg_probe,
+ .driver = {
+ .name = "debugfs-qmp-client",
+ .owner = THIS_MODULE,
+ .of_match_table = aop_qmp_match_tbl,
+ },
+};
+
+builtin_platform_driver(aop_qmp_msg_driver);
diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
new file mode 100644
index 0000000..ac3dcc3
--- /dev/null
+++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
@@ -0,0 +1,624 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define MODULE_NAME "qsee_ipc_irq_bridge"
+#define DEVICE_NAME MODULE_NAME
+#define NUM_LOG_PAGES 4
+
+#define QIIB_DBG(x...) do { \
+ if (qiib_info->log_ctx) \
+ ipc_log_string(qiib_info->log_ctx, x); \
+ else \
+ pr_debug(x); \
+ } while (0)
+
+#define QIIB_ERR(x...) do { \
+ pr_err(x); \
+ if (qiib_info->log_ctx) \
+ ipc_log_string(qiib_info->log_ctx, x); \
+ } while (0)
+
+static void qiib_cleanup(void);
+
+/**
+ * qiib_dev - QSEE IPC IRQ bridge device
+ * @dev_list: qiib device list.
+ * @i: Index to this character device.
+ * @dev_name: Device node name used by the clients.
+ * @cdev: structure to the internal character device.
+ * @devicep: Pointer to the qiib class device structure.
+ * @poll_wait_queue: poll thread wait queue.
+ * @irq_num: IRQ number usd for this device.
+ * @rx_irq_reset_reg: Reference to the register to reset the rx irq
+ * line, if applicable.
+ * @irq_mask: Mask written to @rx_irq_reset_reg to clear the irq.
+ * @irq_pending_count: The number of IRQs pending.
+ * @irq_pending_count_lock: Lock to protect @irq_pending_cont.
+ * @ssr_name: Name of the subsystem recognized by the SSR framework.
+ * @nb: SSR Notifier callback.
+ * @notifier_handle: SSR Notifier handle.
+ * @in_reset: Flag to check the SSR state.
+ */
+struct qiib_dev {
+ struct list_head dev_list;
+ uint32_t i;
+
+ const char *dev_name;
+ struct cdev cdev;
+ struct device *devicep;
+
+ wait_queue_head_t poll_wait_queue;
+
+ uint32_t irq_line;
+ void __iomem *rx_irq_reset_reg;
+ uint32_t irq_mask;
+ uint32_t irq_pending_count;
+ spinlock_t irq_pending_count_lock;
+
+ const char *ssr_name;
+ struct notifier_block nb;
+ void *notifier_handle;
+ bool in_reset;
+};
+
+/**
+ * qiib_driver_data - QSEE IPC IRQ bridge driver data
+ * @list: list of all nodes devices.
+ * @list_lock: lock to synchronize the @list access.
+ * @nprots: Number of device nodes.
+ * @classp: Pointer to the device class.
+ * @dev_num: qiib device number.
+ * @log_ctx: pointer to the ipc logging context.
+ */
+struct qiib_driver_data {
+ struct list_head list;
+ struct mutex list_lock;
+
+ int nports;
+ struct class *classp;
+ dev_t dev_num;
+
+ void *log_ctx;
+};
+
+static struct qiib_driver_data *qiib_info;
+
+/**
+ * qiib_driver_data_init() - Initialize the QIIB driver data.
+ *
+ * This function used to initialize the driver specific data
+ * during the module init.
+ *
+ * Return: 0 for success, Standard Linux errors
+ */
+static int qiib_driver_data_init(void)
+{
+ qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
+ if (!qiib_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&qiib_info->list);
+ mutex_init(&qiib_info->list_lock);
+
+ qiib_info->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
+ "qsee_ipc_irq_bridge", 0);
+ if (!qiib_info->log_ctx)
+ QIIB_ERR("%s: unable to create logging context\n", __func__);
+
+ return 0;
+}
+
+/**
+ * qiib_driver_data_deinit() - De-Initialize the QIIB driver data.
+ *
+ * This function used to de-initialize the driver specific data
+ * during the module exit.
+ */
+static void qiib_driver_data_deinit(void)
+{
+ qiib_cleanup();
+ if (!qiib_info->log_ctx)
+ ipc_log_context_destroy(qiib_info->log_ctx);
+ kfree(qiib_info);
+ qiib_info = NULL;
+}
+
+/**
+ * qiib_restart_notifier_cb() - SSR restart notifier callback function
+ * @this: Notifier block used by the SSR framework
+ * @code: The SSR code for which stage of restart is occurring
+ * @data: Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int qiib_restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ struct qiib_dev *devp = container_of(this, struct qiib_dev, nb);
+
+ if (code == SUBSYS_BEFORE_SHUTDOWN) {
+ QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+ "SUBSYS_BEFORE_SHUTDOWN",
+ devp->ssr_name);
+ devp->in_reset = true;
+ wake_up_interruptible(&devp->poll_wait_queue);
+ } else if (code == SUBSYS_AFTER_POWERUP) {
+ QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+ "SUBSYS_AFTER_POWERUP",
+ devp->ssr_name);
+ devp->in_reset = false;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * qiib_poll() - poll() syscall for the qiib device
+ * @file: Pointer to the file structure.
+ * @wait: pointer to Poll table.
+ *
+ * This function is used to poll on the qiib device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return: POLLIN for interrupt intercepted case and POLLRDHUP for SSR.
+ */
+static unsigned int qiib_poll(struct file *file, poll_table *wait)
+{
+ struct qiib_dev *devp = file->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ if (!devp) {
+ QIIB_ERR("%s on NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ if (devp->in_reset)
+ return POLLRDHUP;
+
+ poll_wait(file, &devp->poll_wait_queue, wait);
+ spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+ if (devp->irq_pending_count) {
+ mask |= POLLIN;
+ QIIB_DBG("%s set POLLIN on [%s] count[%d]\n",
+ __func__, devp->dev_name,
+ devp->irq_pending_count);
+ devp->irq_pending_count = 0;
+ }
+ spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+
+ if (devp->in_reset) {
+ mask |= POLLRDHUP;
+ QIIB_DBG("%s set POLLRDHUP on [%s] count[%d]\n",
+ __func__, devp->dev_name,
+ devp->irq_pending_count);
+ }
+ return mask;
+}
+
+/**
+ * qiib_open() - open() syscall for the qiib device
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * This function is used to open the qiib device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return: 0 for success, Standard Linux errors
+ */
+static int qiib_open(struct inode *inode, struct file *file)
+{
+ struct qiib_dev *devp = NULL;
+
+ devp = container_of(inode->i_cdev, struct qiib_dev, cdev);
+ if (!devp) {
+ QIIB_ERR("%s on NULL device\n", __func__);
+ return -EINVAL;
+ }
+ file->private_data = devp;
+ QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+ return 0;
+}
+
+/**
+ * qiib_release() - release operation on qiibdevice
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * This function is used to release the qiib device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static int qiib_release(struct inode *inode, struct file *file)
+{
+ struct qiib_dev *devp = file->private_data;
+
+ if (!devp) {
+ QIIB_ERR("%s on NULL device\n", __func__);
+ return -EINVAL;
+ }
+
+ QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+ return 0;
+}
+
+static const struct file_operations qiib_fops = {
+ .owner = THIS_MODULE,
+ .open = qiib_open,
+ .release = qiib_release,
+ .poll = qiib_poll,
+};
+
+/**
+ * qiib_add_device() - Initialize qiib device and add cdev
+ * @devp: pointer to the qiib device.
+ * @i: index of the qiib device.
+ *
+ * Return: 0 for success, Standard Linux errors
+ */
+static int qiib_add_device(struct qiib_dev *devp, int i)
+{
+ int ret = 0;
+
+ devp->i = i;
+ init_waitqueue_head(&devp->poll_wait_queue);
+ spin_lock_init(&devp->irq_pending_count_lock);
+
+ cdev_init(&devp->cdev, &qiib_fops);
+ devp->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&devp->cdev, qiib_info->dev_num + i, 1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ QIIB_ERR("%s: cdev_add() failed for dev [%s] ret:%i\n",
+ __func__, devp->dev_name, ret);
+ return ret;
+ }
+
+ devp->devicep = device_create(qiib_info->classp,
+ NULL,
+ (qiib_info->dev_num + i),
+ NULL,
+ devp->dev_name);
+
+ if (IS_ERR_OR_NULL(devp->devicep)) {
+ QIIB_ERR("%s: device_create() failed for dev [%s]\n",
+ __func__, devp->dev_name);
+ ret = -ENOMEM;
+ cdev_del(&devp->cdev);
+ return ret;
+ }
+
+ mutex_lock(&qiib_info->list_lock);
+ list_add(&devp->dev_list, &qiib_info->list);
+ mutex_unlock(&qiib_info->list_lock);
+
+ return ret;
+}
+
+static irqreturn_t qiib_irq_handler(int irq, void *priv)
+{
+ struct qiib_dev *devp = priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+ devp->irq_pending_count++;
+ spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+ wake_up_interruptible(&devp->poll_wait_queue);
+
+ if (devp->rx_irq_reset_reg)
+ writel_relaxed(devp->irq_mask, devp->rx_irq_reset_reg);
+
+ QIIB_DBG("%s name[%s] pend_count[%d]\n", __func__,
+ devp->dev_name, devp->irq_pending_count);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qiib_parse_node() - parse node from device tree binding
+ * @node: pointer to device tree node
+ * @devp: pointer to the qiib device
+ *
+ * Return: 0 on success, -ENODEV on failure.
+ */
+static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
+{
+ char *key;
+ const char *subsys_name;
+ const char *dev_name;
+ uint32_t irqtype;
+ uint32_t irq_clear[2];
+ struct irq_data *irqtype_data;
+ int ret = -ENODEV;
+
+ key = "qcom,dev-name";
+ dev_name = of_get_property(node, key, NULL);
+ if (!dev_name) {
+ QIIB_ERR("%s: missing key: %s\n", __func__, key);
+ goto missing_key;
+ }
+ QIIB_DBG("%s: %s = %s\n", __func__, key, dev_name);
+
+ key = "interrupts";
+ devp->irq_line = irq_of_parse_and_map(node, 0);
+ if (!devp->irq_line) {
+ QIIB_ERR("%s: missing key: %s\n", __func__, key);
+ goto missing_key;
+ }
+ QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
+
+ irqtype_data = irq_get_irq_data(devp->irq_line);
+ if (!irqtype_data) {
+ QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+ goto missing_key;
+ }
+ irqtype = irqd_get_trigger_type(irqtype_data);
+ QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ QIIB_ERR("%s: missing key: %s\n", __func__, key);
+ goto missing_key;
+ }
+ QIIB_DBG("%s: %s = %s\n", __func__, key, subsys_name);
+
+ if (irqtype & IRQF_TRIGGER_HIGH) {
+ key = "qcom,rx-irq-clr-mask";
+ ret = of_property_read_u32(node, key, &devp->irq_mask);
+ if (ret) {
+ QIIB_ERR("%s: missing key: %s\n", __func__, key);
+ ret = -ENODEV;
+ goto missing_key;
+ }
+ QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_mask);
+
+ key = "qcom,rx-irq-clr";
+ ret = of_property_read_u32_array(node, key, irq_clear,
+ ARRAY_SIZE(irq_clear));
+ if (ret) {
+ QIIB_ERR("%s: missing key: %s\n", __func__, key);
+ ret = -ENODEV;
+ goto missing_key;
+ }
+
+ devp->rx_irq_reset_reg = ioremap_nocache(irq_clear[0],
+ irq_clear[1]);
+ if (!devp->rx_irq_reset_reg) {
+ QIIB_ERR("%s: unable to map rx reset reg\n", __func__);
+ ret = -ENOMEM;
+ goto missing_key;
+ }
+ }
+
+ devp->dev_name = dev_name;
+ devp->ssr_name = subsys_name;
+ devp->nb.notifier_call = qiib_restart_notifier_cb;
+
+ devp->notifier_handle = subsys_notif_register_notifier(devp->ssr_name,
+ &devp->nb);
+ if (IS_ERR_OR_NULL(devp->notifier_handle)) {
+ QIIB_ERR("%s: Could not register SSR notifier cb\n", __func__);
+ ret = -EINVAL;
+ goto ssr_reg_fail;
+ }
+
+ ret = request_irq(devp->irq_line, qiib_irq_handler,
+ irqtype | IRQF_NO_SUSPEND,
+ devp->dev_name, devp);
+ if (ret < 0) {
+ QIIB_ERR("%s: request_irq() failed on %d\n", __func__,
+ devp->irq_line);
+ goto req_irq_fail;
+ } else {
+ ret = enable_irq_wake(devp->irq_line);
+ if (ret < 0)
+ QIIB_ERR("%s: enable_irq_wake() failed on %d\n",
+ __func__, devp->irq_line);
+ }
+
+ return ret;
+
+req_irq_fail:
+ subsys_notif_unregister_notifier(devp->notifier_handle, &devp->nb);
+ssr_reg_fail:
+ if (devp->rx_irq_reset_reg) {
+ iounmap(devp->rx_irq_reset_reg);
+ devp->rx_irq_reset_reg = NULL;
+ }
+missing_key:
+ return ret;
+}
+
+/**
+ * qiib_cleanup - cleanup all the resources
+ *
+ * This function remove all the memory and unregister
+ * the char device region.
+ */
+static void qiib_cleanup(void)
+{
+ struct qiib_dev *devp;
+ struct qiib_dev *index;
+
+ mutex_lock(&qiib_info->list_lock);
+ list_for_each_entry_safe(devp, index, &qiib_info->list, dev_list) {
+ cdev_del(&devp->cdev);
+ list_del(&devp->dev_list);
+ device_destroy(qiib_info->classp,
+ MKDEV(MAJOR(qiib_info->dev_num), devp->i));
+ if (devp->notifier_handle)
+ subsys_notif_unregister_notifier(devp->notifier_handle,
+ &devp->nb);
+ kfree(devp);
+ }
+ mutex_unlock(&qiib_info->list_lock);
+
+ if (!IS_ERR_OR_NULL(qiib_info->classp))
+ class_destroy(qiib_info->classp);
+
+ unregister_chrdev_region(MAJOR(qiib_info->dev_num), qiib_info->nports);
+}
+
+/**
+ * qiib_alloc_chrdev_region() - allocate the char device region
+ *
+ * This function allocate memory for qiib character-device region and
+ * create the class.
+ */
+static int qiib_alloc_chrdev_region(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qiib_info->dev_num,
+ 0,
+ qiib_info->nports,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ QIIB_ERR("%s: alloc_chrdev_region() failed ret:%i\n",
+ __func__, ret);
+ return ret;
+ }
+
+ qiib_info->classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(qiib_info->classp)) {
+ QIIB_ERR("%s: class_create() failed ENOMEM\n", __func__);
+ ret = -ENOMEM;
+ unregister_chrdev_region(MAJOR(qiib_info->dev_num),
+ qiib_info->nports);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qsee_ipc_irq_bridge_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct qiib_dev *devp;
+ int i = 0;
+
+ qiib_info->nports = of_get_available_child_count(pdev->dev.of_node);
+ if (!qiib_info->nports) {
+ QIIB_ERR("%s:Fail nports = %d\n", __func__, qiib_info->nports);
+ return -EINVAL;
+ }
+
+ ret = qiib_alloc_chrdev_region();
+ if (ret) {
+ QIIB_ERR("%s: chrdev_region allocation failed ret:%i\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ devp = kzalloc(sizeof(*devp), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(devp)) {
+ QIIB_ERR("%s:Allocation failed id:%d\n", __func__, i);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = qiib_parse_node(node, devp);
+ if (ret) {
+ QIIB_ERR("%s:qiib_parse_node failed %d\n", __func__, i);
+ kfree(devp);
+ goto error;
+ }
+
+ ret = qiib_add_device(devp, i);
+ if (ret < 0) {
+ QIIB_ERR("%s: add [%s] device failed ret=%d\n",
+ __func__, devp->dev_name, ret);
+ kfree(devp);
+ goto error;
+ }
+ i++;
+ }
+
+ QIIB_DBG("%s: Driver Initialized.\n", __func__);
+ return 0;
+
+error:
+ qiib_cleanup();
+ return ret;
+}
+
+static int qsee_ipc_irq_bridge_remove(struct platform_device *pdev)
+{
+ qiib_cleanup();
+ return 0;
+}
+
+static const struct of_device_id qsee_ipc_irq_bridge_match_table[] = {
+ { .compatible = "qcom,qsee-ipc-irq-bridge" },
+ {},
+};
+
+static struct platform_driver qsee_ipc_irq_bridge_driver = {
+ .probe = qsee_ipc_irq_bridge_probe,
+ .remove = qsee_ipc_irq_bridge_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qsee_ipc_irq_bridge_match_table,
+ },
+};
+
+static int __init qsee_ipc_irq_bridge_init(void)
+{
+ int ret;
+
+ ret = qiib_driver_data_init();
+ if (ret) {
+ QIIB_ERR("%s: driver data init failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&qsee_ipc_irq_bridge_driver);
+ if (ret) {
+ QIIB_ERR("%s: platform driver register failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(qsee_ipc_irq_bridge_init);
+
+static void __exit qsee_ipc_irq_bridge_exit(void)
+{
+ platform_driver_unregister(&qsee_ipc_irq_bridge_driver);
+ qiib_driver_data_deinit();
+}
+module_exit(qsee_ipc_irq_bridge_exit);
+MODULE_DESCRIPTION("QSEE IPC interrupt bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index d5b051e..dd77062 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -16,7 +16,6 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -24,10 +23,20 @@
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/wait.h>
+#include <linux/cdev.h>
#include <soc/qcom/ramdump.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+
+#define RAMDUMP_NUM_DEVICES 256
+#define RAMDUMP_NAME "ramdump"
+
+static struct class *ramdump_class;
+static dev_t ramdump_dev;
+static DEFINE_MUTEX(rd_minor_mutex);
+static DEFINE_IDA(rd_minor_id);
+static bool ramdump_devnode_inited;
#define RAMDUMP_WAIT_MSECS 120000
struct ramdump_device {
@@ -38,7 +47,8 @@
int ramdump_status;
struct completion ramdump_complete;
- struct miscdevice device;
+ struct cdev cdev;
+ struct device *dev;
wait_queue_head_t dump_wait_q;
int nsegments;
@@ -51,17 +61,19 @@
static int ramdump_open(struct inode *inode, struct file *filep)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+ struct ramdump_device, cdev);
rd_dev->consumer_present = 1;
rd_dev->ramdump_status = 0;
+ filep->private_data = rd_dev;
return 0;
}
static int ramdump_release(struct inode *inode, struct file *filep)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+
+ struct ramdump_device *rd_dev = container_of(inode->i_cdev,
+ struct ramdump_device, cdev);
rd_dev->consumer_present = 0;
rd_dev->data_ready = 0;
complete(&rd_dev->ramdump_complete);
@@ -105,8 +117,7 @@
static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
loff_t *pos)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = filep->private_data;
void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
unsigned long data_left = 0, bytes_before, bytes_after;
unsigned long addr = 0;
@@ -154,7 +165,7 @@
rd_dev->attrs = 0;
rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
- device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+ device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr,
copy_size, rd_dev->attrs);
origdevice_mem = device_mem;
@@ -206,7 +217,7 @@
kfree(finalbuf);
if (!vaddr && origdevice_mem)
- dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+ dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
*pos += copy_size;
@@ -217,7 +228,7 @@
ramdump_done:
if (!vaddr && origdevice_mem)
- dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+ dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
kfree(finalbuf);
rd_dev->data_ready = 0;
@@ -229,8 +240,7 @@
static unsigned int ramdump_poll(struct file *filep,
struct poll_table_struct *wait)
{
- struct ramdump_device *rd_dev = container_of(filep->private_data,
- struct ramdump_device, device);
+ struct ramdump_device *rd_dev = filep->private_data;
unsigned int mask = 0;
if (rd_dev->data_ready)
@@ -247,9 +257,26 @@
.poll = ramdump_poll
};
-void *create_ramdump_device(const char *dev_name, struct device *parent)
+static int ramdump_devnode_init(void)
{
int ret;
+
+ ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME);
+ ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
+ RAMDUMP_NAME);
+ if (ret < 0) {
+ pr_warn("%s: unable to allocate major\n", __func__);
+ return ret;
+ }
+
+ ramdump_devnode_inited = true;
+
+ return 0;
+}
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+ int ret, minor;
struct ramdump_device *rd_dev;
if (!dev_name) {
@@ -257,6 +284,14 @@
return NULL;
}
+ mutex_lock(&rd_minor_mutex);
+ if (!ramdump_devnode_inited) {
+ ret = ramdump_devnode_init();
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ mutex_unlock(&rd_minor_mutex);
+
rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
if (!rd_dev) {
@@ -265,15 +300,20 @@
return NULL;
}
+ /* get a minor number */
+ minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES,
+ GFP_KERNEL);
+ if (minor < 0) {
+ pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
+ minor);
+ ret = -ENODEV;
+ goto fail_out_of_minors;
+ }
+
snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
dev_name);
init_completion(&rd_dev->ramdump_complete);
-
- rd_dev->device.minor = MISC_DYNAMIC_MINOR;
- rd_dev->device.name = rd_dev->name;
- rd_dev->device.fops = &ramdump_file_ops;
- rd_dev->device.parent = parent;
if (parent) {
rd_dev->complete_ramdump = of_property_read_bool(
parent->of_node, "qcom,complete-ramdump");
@@ -284,27 +324,48 @@
init_waitqueue_head(&rd_dev->dump_wait_q);
- ret = misc_register(&rd_dev->device);
-
- if (ret) {
- pr_err("%s: misc_register failed for %s (%d)", __func__,
+ rd_dev->dev = device_create(ramdump_class, parent,
+ MKDEV(MAJOR(ramdump_dev), minor),
+ rd_dev, rd_dev->name);
+ if (IS_ERR(rd_dev->dev)) {
+ ret = PTR_ERR(rd_dev->dev);
+ pr_err("%s: device_create failed for %s (%d)", __func__,
dev_name, ret);
- kfree(rd_dev);
- return NULL;
+ goto fail_return_minor;
+ }
+
+ cdev_init(&rd_dev->cdev, &ramdump_file_ops);
+
+ ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed for %s (%d)", __func__,
+ dev_name, ret);
+ goto fail_cdev_add;
}
return (void *)rd_dev;
+
+fail_cdev_add:
+ device_unregister(rd_dev->dev);
+fail_return_minor:
+ ida_simple_remove(&rd_minor_id, minor);
+fail_out_of_minors:
+ kfree(rd_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(create_ramdump_device);
void destroy_ramdump_device(void *dev)
{
struct ramdump_device *rd_dev = dev;
+ int minor = MINOR(rd_dev->cdev.dev);
if (IS_ERR_OR_NULL(rd_dev))
return;
- misc_deregister(&rd_dev->device);
+ cdev_del(&rd_dev->cdev);
+ device_unregister(rd_dev->dev);
+ ida_simple_remove(&rd_minor_id, minor);
kfree(rd_dev);
}
EXPORT_SYMBOL(destroy_ramdump_device);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 306510f..1b41269 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -32,6 +32,7 @@
#define RPMH_MAX_MBOXES 2
#define RPMH_MAX_FAST_RES 32
#define RPMH_MAX_REQ_IN_BATCH 10
+#define RPMH_TIMEOUT msecs_to_jiffies(10000)
#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
struct rpmh_msg name = { \
@@ -76,6 +77,8 @@
DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
bool dirty;
bool in_solver_mode;
+ /* Cache sleep and wake requests sent as passthru */
+ struct rpmh_msg *passthru_cache[2 * RPMH_MAX_REQ_IN_BATCH];
};
struct rpmh_client {
@@ -110,17 +113,24 @@
return msg;
}
+static void __free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+ struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+
+ /* If we allocated the pool, set it as available */
+ if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+ bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+ }
+}
+
static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
{
struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
unsigned long flags;
- /* If we allocated the pool, set it as available */
- if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
- spin_lock_irqsave(&rpm->lock, flags);
- bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
- spin_unlock_irqrestore(&rpm->lock, flags);
- }
+ spin_lock_irqsave(&rpm->lock, flags);
+ __free_msg_to_pool(rpm_msg);
+ spin_unlock_irqrestore(&rpm->lock, flags);
}
static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
@@ -166,6 +176,46 @@
complete(compl);
}
+/**
+ * wait_for_tx_done: Wait forever until the response is received.
+ *
+ * @rc: The RPMH client
+ * @compl: The completion object
+ * @addr: An addr that we sent in that request
+ * @data: The data for the address in that request
+ *
+ */
+static inline void wait_for_tx_done(struct rpmh_client *rc,
+ struct completion *compl, u32 addr, u32 data)
+{
+ int ret;
+ int count = 4;
+ int skip = 0;
+
+ do {
+ ret = wait_for_completion_timeout(compl, RPMH_TIMEOUT);
+ if (ret) {
+ if (count != 4)
+ dev_notice(rc->dev,
+ "RPMH response received addr=0x%x data=0x%x\n",
+ addr, data);
+ return;
+ }
+ if (!count) {
+ if (skip++ % 100)
+ continue;
+ dev_err(rc->dev,
+ "RPMH waiting for interrupt from AOSS\n");
+ mbox_chan_debug(rc->chan);
+ } else {
+ dev_err(rc->dev,
+ "RPMH response timeout (%d) addr=0x%x,data=0x%x\n",
+ count, addr, data);
+ count--;
+ }
+ } while (true);
+}
+
static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
{
struct rpmh_req *p, *req = NULL;
@@ -227,6 +277,21 @@
return req;
}
+static int check_ctrlr_state(struct rpmh_client *rc, enum rpmh_state state)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Do not allow setting active votes when in solver mode */
+ spin_lock_irqsave(&rpm->lock, flags);
+ if (rpm->in_solver_mode && state == RPMH_AWAKE_STATE)
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
+ return ret;
+}
+
/**
* __rpmh_write: Cache and send the RPMH request
*
@@ -282,6 +347,7 @@
u32 addr, u32 data)
{
struct rpmh_msg *rpm_msg;
+ int ret;
if (IS_ERR_OR_NULL(rc))
return -EINVAL;
@@ -289,6 +355,10 @@
if (rpmh_standalone)
return 0;
+ ret = check_ctrlr_state(rc, state);
+ if (ret)
+ return ret;
+
rpm_msg = get_msg_from_pool(rc);
if (!rpm_msg)
return -ENOMEM;
@@ -333,6 +403,10 @@
if (rpmh_standalone)
return 0;
+ ret = check_ctrlr_state(rc, state);
+ if (ret)
+ return ret;
+
rpm_msg.cmd[0].addr = addr;
rpm_msg.cmd[0].data = data;
rpm_msg.msg.num_payload = 1;
@@ -341,7 +415,7 @@
if (ret < 0)
return ret;
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, data);
return rpm_msg.err;
}
@@ -385,10 +459,15 @@
struct tcs_cmd *cmd, int n)
{
struct rpmh_msg *rpm_msg;
+ int ret;
if (rpmh_standalone)
return 0;
+ ret = check_ctrlr_state(rc, state);
+ if (ret)
+ return ret;
+
rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n);
if (IS_ERR(rpm_msg))
return PTR_ERR(rpm_msg);
@@ -429,6 +508,10 @@
if (rpmh_standalone)
return 0;
+ ret = check_ctrlr_state(rc, state);
+ if (ret)
+ return ret;
+
memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
rpm_msg.msg.num_payload = n;
@@ -436,12 +519,76 @@
if (ret)
return ret;
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, cmd[0].addr, cmd[0].data);
return rpm_msg.err;
}
EXPORT_SYMBOL(rpmh_write);
+static int cache_passthru(struct rpmh_client *rc, struct rpmh_msg **rpm_msg,
+ int count)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
+ int ret = 0;
+ int index = 0;
+ int i;
+
+ spin_lock_irqsave(&rpm->lock, flags);
+ while (rpm->passthru_cache[index])
+ index++;
+ if (index + count >= 2 * RPMH_MAX_REQ_IN_BATCH) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < count; i++)
+ rpm->passthru_cache[index + i] = rpm_msg[i];
+fail:
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
+ return ret;
+}
+
+static int flush_passthru(struct rpmh_client *rc)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ struct rpmh_msg *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&rpm->lock, flags);
+ for (i = 0; rpm->passthru_cache[i]; i++) {
+ rpm_msg = rpm->passthru_cache[i];
+ ret = mbox_send_controller_data(rc->chan, &rpm_msg->msg);
+ if (ret)
+ goto fail;
+ }
+fail:
+ spin_unlock_irqrestore(&rpm->lock, flags);
+
+ return ret;
+}
+
+static void invalidate_passthru(struct rpmh_client *rc)
+{
+ struct rpmh_mbox *rpm = rc->rpmh;
+ unsigned long flags;
+ int index = 0;
+ int i;
+
+ spin_lock_irqsave(&rpm->lock, flags);
+ while (rpm->passthru_cache[index])
+ index++;
+ for (i = 0; i < index; i++) {
+ __free_msg_to_pool(rpm->passthru_cache[i]);
+ rpm->passthru_cache[i] = NULL;
+ }
+ spin_unlock_irqrestore(&rpm->lock, flags);
+}
+
/**
* rpmh_write_passthru: Write multiple batches of RPMH commands without caching
*
@@ -461,14 +608,13 @@
int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
struct tcs_cmd *cmd, int *n)
{
- struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
+ struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH] = { NULL };
DECLARE_COMPLETION_ONSTACK(compl);
atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
int count = 0;
int ret, i, j, k;
bool complete_set;
- unsigned long flags;
- struct rpmh_mbox *rpm;
+ u32 addr, data;
if (IS_ERR_OR_NULL(rc) || !cmd || !n)
return -EINVAL;
@@ -476,16 +622,11 @@
if (rpmh_standalone)
return 0;
- /* Do not allow setting wake votes when in solver mode */
- rpm = rc->rpmh;
- spin_lock_irqsave(&rpm->lock, flags);
- if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) {
- spin_unlock_irqrestore(&rpm->lock, flags);
- return -EIO;
- }
- spin_unlock_irqrestore(&rpm->lock, flags);
+ ret = check_ctrlr_state(rc, state);
+ if (ret)
+ return ret;
- while (n[count++])
+ while (n[count++] > 0)
;
count--;
if (!count || count > RPMH_MAX_REQ_IN_BATCH)
@@ -511,6 +652,8 @@
}
}
+ addr = cmd[0].addr;
+ data = cmd[0].data;
/* Create async request batches */
for (i = 0; i < count; i++) {
rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
@@ -540,16 +683,13 @@
/* For those unsent requests, spoof tx_done */
for (j = i; j < count; j++)
rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, data);
} else {
- /* Send Sleep requests to the controller, expect no response */
- for (i = 0; i < count; i++) {
- rpm_msg[i]->completion = NULL;
- ret = mbox_send_controller_data(rc->chan,
- &rpm_msg[i]->msg);
- free_msg_to_pool(rpm_msg[i]);
- }
- return 0;
+ /*
+ * Cache sleep/wake data in store.
+ * But flush passthru first before flushing all other data.
+ */
+ return cache_passthru(rc, rpm_msg, count);
}
return 0;
@@ -609,7 +749,7 @@
{
DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
- if (IS_ERR_OR_NULL(rc) || n > MAX_RPMH_PAYLOAD)
+ if (IS_ERR_OR_NULL(rc) || n <= 0 || n > MAX_RPMH_PAYLOAD)
return -EINVAL;
if (rpmh_standalone)
@@ -645,6 +785,8 @@
if (rpmh_standalone)
return 0;
+ invalidate_passthru(rc);
+
rpm = rc->rpmh;
rpm_msg.msg.invalidate = true;
rpm_msg.msg.is_complete = false;
@@ -693,7 +835,7 @@
return ret;
/* Wait until the response is received from RPMH */
- wait_for_completion(&compl);
+ wait_for_tx_done(rc, &compl, addr, 0);
/* Read the data back from the tcs_mbox_msg structrure */
*resp = rpm_msg.cmd[0].data;
@@ -758,6 +900,11 @@
}
spin_unlock_irqrestore(&rpm->lock, flags);
+ /* First flush the cached passthru's */
+ ret = flush_passthru(rc);
+ if (ret)
+ return ret;
+
/*
* Nobody else should be calling this function other than sleep,
* hence we can run without locks.
@@ -841,8 +988,10 @@
rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
RPMH_MAX_FAST_RES, GFP_KERNEL);
- if (!rpmh->msg_pool)
+ if (!rpmh->msg_pool) {
+ of_node_put(spec.np);
return ERR_PTR(-ENOMEM);
+ }
rpmh->mbox_dn = spec.np;
INIT_LIST_HEAD(&rpmh->resources);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index fcb3731..ac5cc54 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -397,18 +397,22 @@
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6)
: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
@@ -442,18 +446,22 @@
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6)
: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
@@ -490,18 +498,22 @@
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R4_STR)
+ __asmeq("%5", R5_STR)
+ __asmeq("%6", R6_STR)
+ __asmeq("%7", R0_STR)
+ __asmeq("%8", R1_STR)
+ __asmeq("%9", R2_STR)
+ __asmeq("%10", R3_STR)
+ __asmeq("%11", R4_STR)
+ __asmeq("%12", R5_STR)
+ __asmeq("%13", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
+ "=r" (r4), "=r" (r5), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6));
@@ -668,10 +680,6 @@
desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
- pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5);
-
trace_scm_call_start(x0, desc);
if (scm_version == SCM_ARMV8_64)
@@ -701,10 +709,8 @@
} while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
if (ret < 0)
- pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5, ret, desc->ret[0],
- desc->ret[1], desc->ret[2]);
+ pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
if (arglen > N_REGISTER_ARGS)
kfree(desc->extra_arg_buf);
@@ -737,10 +743,6 @@
x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
- pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5);
-
if (scm_version == SCM_ARMV8_64)
ret = __scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2],
@@ -752,9 +754,8 @@
desc->x5, &desc->ret[0],
&desc->ret[1], &desc->ret[2]);
if (ret < 0)
- pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
- x0, desc->arginfo, desc->args[0], desc->args[1],
- desc->args[2], desc->x5, ret, desc->ret[0],
+ pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, ret, desc->ret[0],
desc->ret[1], desc->ret[2]);
if (arglen > N_REGISTER_ARGS)
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index f1e7347..6553ac0 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,29 +25,12 @@
DEFINE_MUTEX(secure_buffer_mutex);
-struct cp2_mem_chunks {
- u32 chunk_list;
- u32 chunk_list_size;
- u32 chunk_size;
-} __attribute__ ((__packed__));
-
-struct cp2_lock_req {
- struct cp2_mem_chunks chunks;
- u32 mem_usage;
- u32 lock;
-} __attribute__ ((__packed__));
-
-
struct mem_prot_info {
phys_addr_t addr;
u64 size;
};
#define MEM_PROT_ASSIGN_ID 0x16
-#define MEM_PROTECT_LOCK_ID2 0x0A
-#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
-#define V2_CHUNK_SIZE SZ_1M
-#define FEATURE_ID_CP 12
struct dest_vm_and_perm_info {
u32 vm;
@@ -59,137 +42,6 @@
static void *qcom_secure_mem;
#define QCOM_SECURE_MEM_SIZE (512*1024)
-static int secure_buffer_change_chunk(u32 chunks,
- u32 nchunks,
- u32 chunk_size,
- int lock)
-{
- struct cp2_lock_req request;
- u32 resp;
- int ret;
- struct scm_desc desc = {0};
-
- desc.args[0] = request.chunks.chunk_list = chunks;
- desc.args[1] = request.chunks.chunk_list_size = nchunks;
- desc.args[2] = request.chunks.chunk_size = chunk_size;
- /* Usage is now always 0 */
- desc.args[3] = request.mem_usage = 0;
- desc.args[4] = request.lock = lock;
- desc.args[5] = 0;
- desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
- SCM_VAL);
-
- kmap_flush_unused();
- kmap_atomic_flush_unused();
-
- if (!is_scm_armv8()) {
- ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
- &request, sizeof(request), &resp, sizeof(resp));
- } else {
- ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
- MEM_PROTECT_LOCK_ID2_FLAT), &desc);
- resp = desc.ret[0];
- }
-
- return ret;
-}
-
-
-
-static int secure_buffer_change_table(struct sg_table *table, int lock)
-{
- int i, j;
- int ret = -EINVAL;
- u32 *chunk_list;
- struct scatterlist *sg;
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- int nchunks;
- int size = sg->length;
- int chunk_list_len;
- phys_addr_t chunk_list_phys;
-
- /*
- * This should theoretically be a phys_addr_t but the protocol
- * indicates this should be a u32.
- */
- u32 base;
- u64 tmp = sg_dma_address(sg);
-
- WARN((tmp >> 32) & 0xffffffff,
- "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
- __func__, sg, tmp);
- if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
- WARN(1,
- "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
- __func__, i, size, V2_CHUNK_SIZE);
- return -EINVAL;
- }
-
- base = (u32)tmp;
-
- nchunks = size / V2_CHUNK_SIZE;
- chunk_list_len = sizeof(u32)*nchunks;
-
- chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
-
- if (!chunk_list)
- return -ENOMEM;
-
- chunk_list_phys = virt_to_phys(chunk_list);
- for (j = 0; j < nchunks; j++)
- chunk_list[j] = base + j * V2_CHUNK_SIZE;
-
- /*
- * Flush the chunk list before sending the memory to the
- * secure environment to ensure the data is actually present
- * in RAM
- */
- dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
-
- ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
- nchunks, V2_CHUNK_SIZE, lock);
-
- if (!ret) {
- /*
- * Set or clear the private page flag to communicate the
- * status of the chunk to other entities
- */
- if (lock)
- SetPagePrivate(sg_page(sg));
- else
- ClearPagePrivate(sg_page(sg));
- }
-
- kfree(chunk_list);
- }
-
- return ret;
-}
-
-int msm_secure_table(struct sg_table *table)
-{
- int ret;
-
- mutex_lock(&secure_buffer_mutex);
- ret = secure_buffer_change_table(table, 1);
- mutex_unlock(&secure_buffer_mutex);
-
- return ret;
-
-}
-
-int msm_unsecure_table(struct sg_table *table)
-{
- int ret;
-
- mutex_lock(&secure_buffer_mutex);
- ret = secure_buffer_change_table(table, 0);
- mutex_unlock(&secure_buffer_mutex);
- return ret;
-
-}
-
static struct dest_vm_and_perm_info *
populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
size_t *size_in_bytes)
@@ -380,6 +232,7 @@
sg_free_table(&table);
return ret;
}
+EXPORT_SYMBOL(hyp_assign_phys);
const char *msm_secure_vmid_to_string(int secure_vmid)
{
@@ -412,6 +265,12 @@
return "VMID_WLAN_CE";
case VMID_CP_CAMERA_PREVIEW:
return "VMID_CP_CAMERA_PREVIEW";
+ case VMID_CP_SPSS_SP:
+ return "VMID_CP_SPSS_SP";
+ case VMID_CP_SPSS_SP_SHARED:
+ return "VMID_CP_SPSS_SP_SHARED";
+ case VMID_CP_SPSS_HLOS_SHARED:
+ return "VMID_CP_SPSS_HLOS_SHARED";
case VMID_INVAL:
return "VMID_INVAL";
default:
@@ -419,20 +278,6 @@
}
}
-#define MAKE_CP_VERSION(major, minor, patch) \
- (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
-
-bool msm_secure_v2_is_supported(void)
-{
- int version = scm_get_feat_version(FEATURE_ID_CP);
-
- /*
- * if the version is < 1.1.0 then dynamic buffer allocation is
- * not supported
- */
- return version >= MAKE_CP_VERSION(1, 1, 0);
-}
-
static int __init alloc_secure_shared_memory(void)
{
int ret = 0;
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 221ae0c..f4c67f1 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -84,6 +84,7 @@
struct ind_req_resp {
char service_path[SERVREG_NOTIF_NAME_LENGTH];
int transaction_id;
+ int curr_state;
};
/*
@@ -200,8 +201,30 @@
struct qmi_servreg_notif_set_ack_req_msg_v01 req;
struct msg_desc req_desc, resp_desc;
struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } };
+ struct service_notif_info *service_notif;
+ enum pd_subsys_state state = USER_PD_STATE_CHANGE;
int rc;
+ service_notif = _find_service_info(data->ind_msg.service_path);
+ if (!service_notif)
+ return;
+ if ((int)data->ind_msg.curr_state < QMI_STATE_MIN_VAL ||
+ (int)data->ind_msg.curr_state > QMI_STATE_MAX_VAL)
+ pr_err("Unexpected indication notification state %d\n",
+ data->ind_msg.curr_state);
+ else {
+ mutex_lock(¬if_add_lock);
+ mutex_lock(&service_list_lock);
+ rc = service_notif_queue_notification(service_notif,
+ data->ind_msg.curr_state, &state);
+ if (rc & NOTIFY_STOP_MASK)
+ pr_err("Notifier callback aborted for %s with error %d\n",
+ data->ind_msg.service_path, rc);
+ service_notif->curr_state = data->ind_msg.curr_state;
+ mutex_unlock(&service_list_lock);
+ mutex_unlock(¬if_add_lock);
+ }
+
req.transaction_id = data->ind_msg.transaction_id;
snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s",
data->ind_msg.service_path);
@@ -236,11 +259,9 @@
unsigned int msg_len, void *ind_cb_priv)
{
struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
- struct service_notif_info *service_notif;
struct msg_desc ind_desc;
struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
QMI_STATE_MIN_VAL, "", 0xFFFF };
- enum pd_subsys_state state = USER_PD_STATE_CHANGE;
int rc;
ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
@@ -256,27 +277,8 @@
ind_msg.service_name, ind_msg.curr_state,
ind_msg.transaction_id);
- service_notif = _find_service_info(ind_msg.service_name);
- if (!service_notif)
- return;
-
- if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL ||
- (int)ind_msg.curr_state > QMI_STATE_MAX_VAL)
- pr_err("Unexpected indication notification state %d\n",
- ind_msg.curr_state);
- else {
- mutex_lock(¬if_add_lock);
- mutex_lock(&service_list_lock);
- rc = service_notif_queue_notification(service_notif,
- ind_msg.curr_state, &state);
- if (rc & NOTIFY_STOP_MASK)
- pr_err("Notifier callback aborted for %s with error %d\n",
- ind_msg.service_name, rc);
- service_notif->curr_state = ind_msg.curr_state;
- mutex_unlock(&service_list_lock);
- mutex_unlock(¬if_add_lock);
- }
data->ind_msg.transaction_id = ind_msg.transaction_id;
+ data->ind_msg.curr_state = ind_msg.curr_state;
snprintf(data->ind_msg.service_path,
ARRAY_SIZE(data->ind_msg.service_path), "%s",
ind_msg.service_name);
@@ -373,6 +375,12 @@
mutex_unlock(&qmi_client_release_lock);
pr_info("Connection established between QMI handle and %d service\n",
data->instance_id);
+ /* Register for indication messages about service */
+ rc = qmi_register_ind_cb(data->clnt_handle,
+ root_service_service_ind_cb, (void *)data);
+ if (rc < 0)
+ pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+ data->instance_id, rc);
mutex_lock(¬if_add_lock);
mutex_lock(&service_list_lock);
list_for_each_entry(service_notif, &service_list, list) {
@@ -395,12 +403,6 @@
}
mutex_unlock(&service_list_lock);
mutex_unlock(¬if_add_lock);
- /* Register for indication messages about service */
- rc = qmi_register_ind_cb(data->clnt_handle,
- root_service_service_ind_cb, (void *)data);
- if (rc < 0)
- pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
- data->instance_id, rc);
}
static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
new file mode 100644
index 0000000..3f31fb1
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -0,0 +1,575 @@
+/*
+ * SMC Invoke driver
+ *
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/smcinvoke.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include <soc/qcom/scm.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/qseecomi.h>
+
+#include "smcinvoke_object.h"
+#include "../../misc/qseecom_kernel.h"
+
+#define SMCINVOKE_DEV "smcinvoke"
+#define SMCINVOKE_TZ_PARAM_ID 0x224
+#define SMCINVOKE_TZ_CMD 0x32000600
+#define SMCINVOKE_TZ_ROOT_OBJ 1
+#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
+#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
+#define SMCINVOKE_TZ_OBJ_NULL 0
+
+#define FOR_ARGS(ndxvar, counts, section) \
+ for (ndxvar = object_counts_index_##section(counts); \
+ ndxvar < (object_counts_index_##section(counts) \
+ + object_counts_num_##section(counts)); \
+ ++ndxvar)
+
+static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
+static int smcinvoke_open(struct inode *, struct file *);
+static int smcinvoke_release(struct inode *, struct file *);
+
+static const struct file_operations smcinvoke_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = smcinvoke_ioctl,
+ .compat_ioctl = smcinvoke_ioctl,
+ .open = smcinvoke_open,
+ .release = smcinvoke_release,
+};
+
+struct smcinvoke_buf_hdr {
+ uint32_t offset;
+ uint32_t size;
+};
+
+union smcinvoke_tz_args {
+ struct smcinvoke_buf_hdr b;
+ uint32_t tzhandle;
+};
+struct smcinvoke_msg_hdr {
+ uint32_t tzhandle;
+ uint32_t op;
+ uint32_t counts;
+};
+
+struct smcinvoke_tzobj_context {
+ uint32_t tzhandle;
+};
+
+static dev_t smcinvoke_device_no;
+struct cdev smcinvoke_cdev;
+struct class *driver_class;
+struct device *class_dev;
+
+/*
+ * size_add saturates at SIZE_MAX. If integer overflow is detected,
+ * this function would return SIZE_MAX otherwise normal a+b is returned.
+ */
+static inline size_t size_add(size_t a, size_t b)
+{
+ return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
+}
+
+/*
+ * pad_size is used along with size_align to define a buffer overflow
+ * protected version of ALIGN
+ */
+static inline size_t pad_size(size_t a, size_t b)
+{
+ return (~a + 1) % b;
+}
+
+/*
+ * size_align saturates at SIZE_MAX. If integer overflow is detected, this
+ * function would return SIZE_MAX otherwise next aligned size is returned.
+ */
+static inline size_t size_align(size_t a, size_t b)
+{
+ return size_add(a, pad_size(a, b));
+}
+
+/*
+ * This function retrieves file pointer corresponding to FD provided. It stores
+ * retrived file pointer until IOCTL call is concluded. Once call is completed,
+ * all stored file pointers are released. file pointers are stored to prevent
+ * other threads from releasing that FD while IOCTL is in progress.
+ */
+static int get_tzhandle_from_fd(int64_t fd, struct file **filp,
+ uint32_t *tzhandle)
+{
+ int ret = -EBADF;
+ struct file *tmp_filp = NULL;
+ struct smcinvoke_tzobj_context *tzobj = NULL;
+
+ if (fd == SMCINVOKE_USERSPACE_OBJ_NULL) {
+ *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
+ ret = 0;
+ goto out;
+ } else if (fd < SMCINVOKE_USERSPACE_OBJ_NULL) {
+ goto out;
+ }
+
+ tmp_filp = fget(fd);
+ if (!tmp_filp)
+ goto out;
+
+ /* Verify if filp is smcinvoke device's file pointer */
+ if (!tmp_filp->f_op || !tmp_filp->private_data ||
+ (tmp_filp->f_op != &smcinvoke_fops)) {
+ fput(tmp_filp);
+ goto out;
+ }
+
+ tzobj = tmp_filp->private_data;
+ *tzhandle = tzobj->tzhandle;
+ *filp = tmp_filp;
+ ret = 0;
+out:
+ return ret;
+}
+
+static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd)
+{
+ int unused_fd = -1, ret = -1;
+ struct file *f = NULL;
+ struct smcinvoke_tzobj_context *cxt = NULL;
+
+ if (tzhandle == SMCINVOKE_TZ_OBJ_NULL) {
+ *fd = SMCINVOKE_USERSPACE_OBJ_NULL;
+ ret = 0;
+ goto out;
+ }
+
+ cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
+ if (!cxt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ unused_fd = get_unused_fd_flags(O_RDWR);
+ if (unused_fd < 0)
+ goto out;
+
+ f = anon_inode_getfile(SMCINVOKE_DEV, &smcinvoke_fops, cxt, O_RDWR);
+ if (IS_ERR(f))
+ goto out;
+
+ *fd = unused_fd;
+ fd_install(*fd, f);
+ ((struct smcinvoke_tzobj_context *)
+ (f->private_data))->tzhandle = tzhandle;
+ return 0;
+out:
+ if (unused_fd >= 0)
+ put_unused_fd(unused_fd);
+ kfree(cxt);
+
+ return ret;
+}
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
+ const uint8_t *out_buf, size_t out_buf_len,
+ int32_t *smcinvoke_result)
+{
+ int ret = 0;
+ struct scm_desc desc = {0};
+ size_t inbuf_flush_size = (1UL << get_order(in_buf_len)) * PAGE_SIZE;
+ size_t outbuf_flush_size = (1UL << get_order(out_buf_len)) * PAGE_SIZE;
+
+ desc.arginfo = SMCINVOKE_TZ_PARAM_ID;
+ desc.args[0] = (uint64_t)virt_to_phys(in_buf);
+ desc.args[1] = inbuf_flush_size;
+ desc.args[2] = (uint64_t)virt_to_phys(out_buf);
+ desc.args[3] = outbuf_flush_size;
+
+ dmac_flush_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_flush_range(out_buf, out_buf + outbuf_flush_size);
+
+ ret = scm_call2(SMCINVOKE_TZ_CMD, &desc);
+
+ /* process listener request */
+ if (!ret && (desc.ret[0] == QSEOS_RESULT_INCOMPLETE ||
+ desc.ret[0] == QSEOS_RESULT_BLOCKED_ON_LISTENER))
+ ret = qseecom_process_listener_from_smcinvoke(&desc);
+
+ *smcinvoke_result = (int32_t)desc.ret[1];
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
+ pr_err("SCM call failed with ret val = %d %d %d %d\n",
+ ret, (int)desc.ret[0],
+ (int)desc.ret[1], (int)desc.ret[2]);
+
+ dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
+ dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
+ return ret;
+}
+
+static int marshal_out(void *buf, uint32_t buf_size,
+ struct smcinvoke_cmd_req *req,
+ union smcinvoke_arg *args_buf)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ size_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ if (offset > buf_size)
+ goto out;
+
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ tz_args += object_counts_num_BI(req->counts);
+
+ FOR_ARGS(i, req->counts, BO) {
+ args_buf[i].b.size = tz_args->b.size;
+ if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
+ tz_args->b.offset > buf_size) {
+ pr_err("%s: buffer overflow detected\n", __func__);
+ goto out;
+ }
+ if (copy_to_user((void __user *)(uintptr_t)(args_buf[i].b.addr),
+ (uint8_t *)(buf) + tz_args->b.offset,
+ tz_args->b.size)) {
+ pr_err("Error %d copying ctxt to user\n", ret);
+ goto out;
+ }
+ tz_args++;
+ }
+ tz_args += object_counts_num_OI(req->counts);
+
+ FOR_ARGS(i, req->counts, OO) {
+ /*
+ * create a new FD and assign to output object's
+ * context
+ */
+ ret = get_fd_from_tzhandle(tz_args->tzhandle,
+ &(args_buf[i].o.fd));
+ if (ret)
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * SMC expects arguments in following format
+ * ---------------------------------------------------------------------------
+ * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
+ * ---------------------------------------------------------------------------
+ * cxt: target, op: operation, counts: total arguments
+ * offset: offset is from beginning of buffer i.e. cxt
+ * size: size is 8 bytes aligned value
+ */
+static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf)
+{
+ uint32_t i = 0;
+
+ size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
+ object_counts_total(req->counts) *
+ sizeof(union smcinvoke_tz_args);
+
+ /* Computed total_size should be 8 bytes aligned from start of buf */
+ total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+ /* each buffer has to be 8 bytes aligned */
+ while (i < object_counts_num_buffers(req->counts))
+ total_size = size_add(total_size,
+ size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE));
+
+ /* Since we're using get_free_pages, no need for explicit PAGE align */
+ return total_size;
+}
+
+static int marshal_in(const struct smcinvoke_cmd_req *req,
+ const union smcinvoke_arg *args_buf, uint32_t tzhandle,
+ uint8_t *buf, size_t buf_size, struct file **arr_filp)
+{
+ int ret = -EINVAL, i = 0;
+ union smcinvoke_tz_args *tz_args = NULL;
+ struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts};
+ uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
+ sizeof(union smcinvoke_tz_args) *
+ object_counts_total(req->counts);
+
+ if (buf_size < offset)
+ goto out;
+
+ *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
+ tz_args = (union smcinvoke_tz_args *)
+ (buf + sizeof(struct smcinvoke_msg_hdr));
+
+ FOR_ARGS(i, req->counts, BI) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ if (copy_from_user(buf+offset,
+ (void __user *)(uintptr_t)(args_buf[i].b.addr),
+ args_buf[i].b.size))
+ goto out;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, BO) {
+ offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+ if ((offset > buf_size) ||
+ (args_buf[i].b.size > (buf_size - offset)))
+ goto out;
+
+ tz_args->b.offset = offset;
+ tz_args->b.size = args_buf[i].b.size;
+ tz_args++;
+
+ offset += args_buf[i].b.size;
+ }
+ FOR_ARGS(i, req->counts, OI) {
+ if (get_tzhandle_from_fd(args_buf[i].o.fd,
+ &arr_filp[i], &(tz_args->tzhandle)))
+ goto out;
+ tz_args++;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+long smcinvoke_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = -1, i = 0, nr_args = 0;
+ struct smcinvoke_cmd_req req = {0};
+ void *in_msg = NULL;
+ size_t inmsg_size = 0;
+ void *out_msg = NULL;
+ union smcinvoke_arg *args_buf = NULL;
+ struct file *filp_to_release[object_counts_max_OO] = {NULL};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+
+ switch (cmd) {
+ case SMCINVOKE_IOCTL_INVOKE_REQ:
+ if (_IOC_SIZE(cmd) != sizeof(req)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ nr_args = object_counts_num_buffers(req.counts) +
+ object_counts_num_objects(req.counts);
+
+ if (req.argsize != sizeof(union smcinvoke_arg)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nr_args) {
+
+ args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL);
+ if (!args_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(args_buf,
+ (void __user *)(uintptr_t)(req.args),
+ nr_args * req.argsize);
+
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ inmsg_size = compute_in_msg_size(&req, args_buf);
+ in_msg = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(inmsg_size));
+ if (!in_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ out_msg = (void *)__get_free_page(GFP_KERNEL);
+ if (!out_msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = marshal_in(&req, args_buf, tzobj->tzhandle, in_msg,
+ inmsg_size, filp_to_release);
+ if (ret)
+ goto out;
+
+ ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg,
+ SMCINVOKE_TZ_MIN_BUF_SIZE, &req.result);
+ if (ret)
+ goto out;
+
+ /*
+ * if invoke op results in an err, no need to marshal_out and
+ * copy args buf to user space
+ */
+ if (!req.result) {
+ ret = marshal_out(in_msg, inmsg_size, &req, args_buf);
+
+ ret |= copy_to_user(
+ (void __user *)(uintptr_t)(req.args),
+ args_buf, nr_args * req.argsize);
+ }
+ ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
+ if (ret)
+ goto out;
+
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ free_page((long)out_msg);
+ free_pages((long)in_msg, get_order(inmsg_size));
+ kfree(args_buf);
+ for (i = 0; i < object_counts_max_OO; i++) {
+ if (filp_to_release[i])
+ fput(filp_to_release[i]);
+ }
+
+ return ret;
+}
+
+static int smcinvoke_open(struct inode *nodp, struct file *filp)
+{
+ struct smcinvoke_tzobj_context *tzcxt = NULL;
+
+ tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
+ if (!tzcxt)
+ return -ENOMEM;
+
+ tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
+ filp->private_data = tzcxt;
+
+ return 0;
+}
+
+static int smcinvoke_release(struct inode *nodp, struct file *filp)
+{
+ int ret = 0, smcinvoke_result = 0;
+ uint8_t *in_buf = NULL;
+ uint8_t *out_buf = NULL;
+ struct smcinvoke_msg_hdr hdr = {0};
+ struct smcinvoke_tzobj_context *tzobj = filp->private_data;
+ uint32_t tzhandle = tzobj->tzhandle;
+
+ /* Root object is special in sense it is indestructible */
+ if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+ goto out;
+
+ in_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ out_buf = (uint8_t *)__get_free_page(GFP_KERNEL);
+ if (!in_buf || !out_buf)
+ goto out;
+
+ hdr.tzhandle = tzhandle;
+ hdr.op = object_op_RELEASE;
+ hdr.counts = 0;
+ *(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+ ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE,
+ out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, &smcinvoke_result);
+out:
+ kfree(filp->private_data);
+ free_page((long)in_buf);
+ free_page((long)out_buf);
+
+ return ret;
+}
+
+static int __init smcinvoke_init(void)
+{
+ unsigned int baseminor = 0;
+ unsigned int count = 1;
+ int rc = 0;
+
+ rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
+ SMCINVOKE_DEV);
+ if (rc < 0) {
+ pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
+ return rc;
+ }
+ driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
+ if (IS_ERR(driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d\n", rc);
+ goto exit_unreg_chrdev_region;
+ }
+ class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
+ NULL, SMCINVOKE_DEV);
+ if (!class_dev) {
+ pr_err("class_device_create failed %d\n", rc);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&smcinvoke_cdev, &smcinvoke_fops);
+ smcinvoke_cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
+ count);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
+ goto exit_destroy_device;
+ }
+ return 0;
+
+exit_destroy_device:
+ device_destroy(driver_class, smcinvoke_device_no);
+exit_destroy_class:
+ class_destroy(driver_class);
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(smcinvoke_device_no, count);
+
+ return rc;
+}
+
+static void __exit smcinvoke_exit(void)
+{
+ int count = 1;
+
+ cdev_del(&smcinvoke_cdev);
+ device_destroy(driver_class, smcinvoke_device_no);
+ class_destroy(driver_class);
+ unregister_chrdev_region(smcinvoke_device_no, count);
+}
+device_initcall(smcinvoke_init);
+module_exit(smcinvoke_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SMC Invoke driver");
diff --git a/drivers/soc/qcom/smcinvoke_object.h b/drivers/soc/qcom/smcinvoke_object.h
new file mode 100644
index 0000000..670b425
--- /dev/null
+++ b/drivers/soc/qcom/smcinvoke_object.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __SMCINVOKE_OBJECT_H
+#define __SMCINVOKE_OBJECT_H
+
+#include <linux/types.h>
+
+#define object_op_METHOD_MASK ((uint32_t)0x0000FFFFu)
+#define object_op_RELEASE (object_op_METHOD_MASK - 0)
+#define object_op_RETAIN (object_op_METHOD_MASK - 1)
+
+#define object_counts_max_BI 0xF
+#define object_counts_max_BO 0xF
+#define object_counts_max_OI 0xF
+#define object_counts_max_OO 0xF
+
+/* unpack counts */
+
+#define object_counts_num_BI(k) ((size_t) (((k) >> 0) & object_counts_max_BI))
+#define object_counts_num_BO(k) ((size_t) (((k) >> 4) & object_counts_max_BO))
+#define object_counts_num_OI(k) ((size_t) (((k) >> 8) & object_counts_max_OI))
+#define object_counts_num_OO(k) ((size_t) (((k) >> 12) & object_counts_max_OO))
+#define object_counts_num_buffers(k) \
+ (object_counts_num_BI(k) + object_counts_num_BO(k))
+
+#define object_counts_num_objects(k) \
+ (object_counts_num_OI(k) + object_counts_num_OO(k))
+
+/* Indices into args[] */
+
+#define object_counts_index_BI(k) 0
+#define object_counts_index_BO(k) \
+ (object_counts_index_BI(k) + object_counts_num_BI(k))
+#define object_counts_index_OI(k) \
+ (object_counts_index_BO(k) + object_counts_num_BO(k))
+#define object_counts_index_OO(k) \
+ (object_counts_index_OI(k) + object_counts_num_OI(k))
+#define object_counts_total(k) \
+ (object_counts_index_OO(k) + object_counts_num_OO(k))
+
+
+#endif /* __SMCINVOKE_OBJECT_H */
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 44192ff..9c764aa 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,8 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/ipc_router.h>
#include "smp2p_private.h"
#define SET_DELAY (2 * HZ)
@@ -35,10 +37,14 @@
switch (event) {
case PM_SUSPEND_PREPARE:
gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+ msleep(25); /* To be tuned based on SMP2P latencies */
+ msm_ipc_router_set_ws_allowed(true);
break;
case PM_POST_SUSPEND:
gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+ msleep(25); /* To be tuned based on SMP2P latencies */
+ msm_ipc_router_set_ws_allowed(false);
break;
}
return NOTIFY_DONE;
@@ -46,6 +52,7 @@
static struct notifier_block sleepstate_pm_nb = {
.notifier_call = sleepstate_pm_notifier,
+ .priority = INT_MAX,
};
static int smp2p_sleepstate_probe(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index c252040..c69429c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -47,6 +47,7 @@
#define SMEM_IMAGE_VERSION_OEM_OFFSET 96
#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
+static DECLARE_RWSEM(current_image_rwsem);
enum {
HW_PLATFORM_UNKNOWN = 0,
HW_PLATFORM_SURF = 1,
@@ -591,10 +592,11 @@
}
EXPORT_SYMBOL_GPL(socinfo_get_id);
-static char *socinfo_get_id_string(void)
+char *socinfo_get_id_string(void)
{
return (socinfo) ? cpu_of_id[socinfo->v0_1.id].soc_id_string : NULL;
}
+EXPORT_SYMBOL(socinfo_get_id_string);
uint32_t socinfo_get_version(void)
{
@@ -1047,7 +1049,9 @@
pr_err("Failed to get image version base address");
return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n",
string_address);
}
@@ -1060,14 +1064,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf);
return count;
}
@@ -1085,7 +1094,9 @@
return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE,
"Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n",
string_address);
@@ -1099,14 +1110,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf);
return count;
@@ -1124,7 +1140,9 @@
pr_err("Failed to get image version base address");
return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown");
}
+ down_read(¤t_image_rwsem);
string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
string_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s\n",
string_address);
@@ -1138,14 +1156,19 @@
{
char *store_address;
- if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ down_read(¤t_image_rwsem);
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) {
+ up_read(¤t_image_rwsem);
return count;
+ }
store_address = socinfo_get_image_version_base_address();
if (IS_ERR_OR_NULL(store_address)) {
pr_err("Failed to get image version base address");
+ up_read(¤t_image_rwsem);
return count;
}
store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ up_read(¤t_image_rwsem);
store_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s", buf);
return count;
@@ -1156,8 +1179,14 @@
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ int ret;
+
+ down_read(¤t_image_rwsem);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
current_image);
+ up_read(¤t_image_rwsem);
+ return ret;
+
}
static ssize_t
@@ -1169,10 +1198,12 @@
ret = kstrtoint(buf, 10, &digit);
if (ret)
return ret;
+ down_write(¤t_image_rwsem);
if (digit >= 0 && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT)
current_image = digit;
else
current_image = 0;
+ up_write(¤t_image_rwsem);
return count;
}
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 1c7c4a1..806b756 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -93,8 +93,6 @@
/* SPCOM driver name */
#define DEVICE_NAME "spcom"
-#define SPCOM_MAX_CHANNELS 0x20
-
/* maximum ION buffers should be >= SPCOM_MAX_CHANNELS */
#define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
@@ -195,6 +193,7 @@
* glink state: CONNECTED / LOCAL_DISCONNECTED, REMOTE_DISCONNECTED
*/
unsigned int glink_state;
+ bool is_closing;
/* Events notification */
struct completion connect;
@@ -244,7 +243,7 @@
int channel_count;
/* private */
- struct mutex lock;
+ struct mutex cmd_lock;
/* Link state */
struct completion link_state_changed;
@@ -483,7 +482,17 @@
switch (event) {
case GLINK_CONNECTED:
pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
+ mutex_lock(&ch->lock);
+
+ if (ch->is_closing) {
+ pr_err("Unexpected CONNECTED while closing [%s].\n",
+ ch->name);
+ mutex_unlock(&ch->lock);
+ return;
+ }
+
ch->glink_state = event;
+
/*
* if spcom_notify_state() is called within glink_open()
* then ch->glink_handle is not updated yet.
@@ -493,17 +502,28 @@
ch->glink_handle = handle;
}
- /* prepare default rx buffer after connected */
+ /* signal before unlock mutex & before calling glink */
+ complete_all(&ch->connect);
+
+ /*
+ * Prepare default rx buffer.
+ * glink_queue_rx_intent() can be called only AFTER connected.
+ * We do it here, ASAP, to allow rx data.
+ */
+
+ pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
ret = glink_queue_rx_intent(ch->glink_handle,
ch, ch->rx_buf_size);
if (ret) {
pr_err("glink_queue_rx_intent() err [%d]\n", ret);
} else {
- pr_debug("rx buf is ready, size [%zu].\n",
+ pr_debug("rx buf is ready, size [%zu]\n",
ch->rx_buf_size);
ch->rx_buf_ready = true;
}
- complete_all(&ch->connect);
+
+ pr_debug("GLINK_CONNECTED, ch name [%s] done.\n", ch->name);
+ mutex_unlock(&ch->lock);
break;
case GLINK_LOCAL_DISCONNECTED:
/*
@@ -556,9 +576,7 @@
static bool spcom_notify_rx_intent_req(void *handle, const void *priv,
size_t req_size)
{
- struct spcom_channel *ch = (struct spcom_channel *) priv;
-
- pr_err("Unexpected intent request for ch [%s].\n", ch->name);
+ pr_err("Unexpected intent request\n");
return false;
}
@@ -670,6 +688,13 @@
ch->glink_state = GLINK_LOCAL_DISCONNECTED;
ch->actual_rx_size = 0;
ch->rx_buf_size = SPCOM_RX_BUF_SIZE;
+ ch->is_closing = false;
+ ch->glink_handle = NULL;
+ ch->ref_count = 0;
+ ch->rx_abort = false;
+ ch->tx_abort = false;
+ ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+ ch->pid = 0;
return 0;
}
@@ -739,6 +764,8 @@
/* init completion before calling glink_open() */
reinit_completion(&ch->connect);
+ ch->is_closing = false;
+
handle = glink_open(&cfg);
if (IS_ERR_OR_NULL(handle)) {
pr_err("glink_open failed.\n");
@@ -753,6 +780,8 @@
ch->pid = current_pid();
ch->txn_id = INITIAL_TXN_ID;
+ mutex_unlock(&ch->lock);
+
pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
name, timeout_msec);
@@ -769,8 +798,6 @@
pr_debug("Channel [%s] opened, no timeout.\n", name);
}
- mutex_unlock(&ch->lock);
-
return 0;
exit_err:
mutex_unlock(&ch->lock);
@@ -797,6 +824,8 @@
return 0;
}
+ ch->is_closing = true;
+
ret = glink_close(ch->glink_handle);
if (ret)
pr_err("glink_close() fail, ret [%d].\n", ret);
@@ -812,6 +841,7 @@
ch->pid = 0;
pr_debug("Channel closed [%s].\n", ch->name);
+
mutex_unlock(&ch->lock);
return 0;
@@ -1090,12 +1120,12 @@
if (!spcom_is_ready()) {
pr_err("spcom is not ready.\n");
- return NULL;
+ return NULL;
}
if (!info) {
pr_err("Invalid parameter.\n");
- return NULL;
+ return NULL;
}
name = info->ch_name;
@@ -1924,18 +1954,6 @@
}
/**
- * spcom_handle_fake_ssr_command() - Handle fake ssr command from user space.
- */
-static int spcom_handle_fake_ssr_command(struct spcom_channel *ch, int arg)
-{
- pr_debug("Start Fake glink SSR subsystem [%s].\n", spcom_edge);
- glink_ssr(spcom_edge);
- pr_debug("Fake glink SSR subsystem [%s] done.\n", spcom_edge);
-
- return 0;
-}
-
-/**
* spcom_handle_write() - Handle user space write commands.
*
* @buf: command buffer.
@@ -1964,6 +1982,8 @@
swap_id = htonl(cmd->cmd_id);
memcpy(cmd_name, &swap_id, sizeof(int));
+ mutex_lock(&spcom_dev->cmd_lock);
+
pr_debug("cmd_id [0x%x] cmd_name [%s].\n", cmd_id, cmd_name);
switch (cmd_id) {
@@ -1979,17 +1999,16 @@
case SPCOM_CMD_UNLOCK_ION_BUF:
ret = spcom_handle_unlock_ion_buf_command(ch, buf, buf_size);
break;
- case SPCOM_CMD_FSSR:
- ret = spcom_handle_fake_ssr_command(ch, cmd->arg);
- break;
case SPCOM_CMD_CREATE_CHANNEL:
ret = spcom_handle_create_channel_command(buf, buf_size);
break;
default:
pr_err("Invalid Command Id [0x%x].\n", (int) cmd->cmd_id);
- return -EINVAL;
+ ret = -EINVAL;
}
+ mutex_unlock(&spcom_dev->cmd_lock);
+
return ret;
}
@@ -2690,7 +2709,7 @@
return -ENOMEM;
spcom_dev = dev;
- mutex_init(&dev->lock);
+ mutex_init(&spcom_dev->cmd_lock);
init_completion(&dev->link_state_changed);
spcom_dev->link_state = GLINK_LINK_STATE_DOWN;
@@ -2763,7 +2782,7 @@
{
int ret;
- pr_info("spcom driver Ver 1.0 23-Nov-2015.\n");
+ pr_info("spcom driver version 1.1 17-July-2017.\n");
ret = platform_driver_register(&spcom_driver);
if (ret)
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index f8f6829..01eb260 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -795,6 +795,33 @@
return rc;
}
+static int pil_deinit_image_trusted(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ u32 proc, scm_ret = 0;
+ int rc;
+ struct scm_desc desc = {0};
+
+ if (d->subsys_desc.no_auth)
+ return 0;
+
+ desc.args[0] = proc = d->pas_id;
+ desc.arginfo = SCM_ARGS(1);
+
+ if (!is_scm_armv8()) {
+ rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc,
+ sizeof(proc), &scm_ret, sizeof(scm_ret));
+ } else {
+ rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_SHUTDOWN_CMD),
+ &desc);
+ scm_ret = desc.ret[0];
+ }
+
+ if (rc)
+ return rc;
+ return scm_ret;
+}
+
static struct pil_reset_ops pil_ops_trusted = {
.init_image = pil_init_image_trusted,
.mem_setup = pil_mem_setup_trusted,
@@ -802,6 +829,7 @@
.shutdown = pil_shutdown_trusted,
.proxy_vote = pil_make_proxy_vote,
.proxy_unvote = pil_remove_proxy_vote,
+ .deinit_image = pil_deinit_image_trusted,
};
static void log_failure_reason(const struct pil_tz_data *d)
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index c8bb13d..870b9f7 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -58,6 +58,7 @@
struct wdsp_glink_tx_buf {
struct work_struct tx_work;
+ struct work_struct free_tx_work;
/* Glink channel information */
struct wdsp_glink_ch *ch;
@@ -125,6 +126,46 @@
static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch);
/*
+ * wdsp_glink_free_tx_buf_work - Work function to free tx pkt
+ * work: Work structure
+ */
+static void wdsp_glink_free_tx_buf_work(struct work_struct *work)
+{
+ struct wdsp_glink_tx_buf *tx_buf;
+
+ tx_buf = container_of(work, struct wdsp_glink_tx_buf,
+ free_tx_work);
+ vfree(tx_buf);
+}
+
+/*
+ * wdsp_glink_free_tx_buf - Function to free tx buffer
+ * priv: Pointer to the channel
+ * pkt_priv: Pointer to the tx buffer
+ */
+static void wdsp_glink_free_tx_buf(const void *priv, const void *pkt_priv)
+{
+ struct wdsp_glink_tx_buf *tx_buf = (struct wdsp_glink_tx_buf *)pkt_priv;
+ struct wdsp_glink_priv *wpriv;
+ struct wdsp_glink_ch *ch;
+
+ if (!priv) {
+ pr_err("%s: Invalid priv\n", __func__);
+ return;
+ }
+ if (!tx_buf) {
+ pr_err("%s: Invalid tx_buf\n", __func__);
+ return;
+ }
+
+ ch = (struct wdsp_glink_ch *)priv;
+ wpriv = ch->wpriv;
+ /* Work queue to free tx pkt */
+ INIT_WORK(&tx_buf->free_tx_work, wdsp_glink_free_tx_buf_work);
+ queue_work(wpriv->work_queue, &tx_buf->free_tx_work);
+}
+
+/*
* wdsp_glink_notify_rx - Glink notify rx callback for responses
* handle: Opaque Channel handle returned by GLink
* priv: Private pointer to the channel
@@ -183,14 +224,8 @@
static void wdsp_glink_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
- if (!pkt_priv) {
- pr_err("%s: Invalid parameter\n", __func__);
- return;
- }
- /* Free tx pkt */
- vfree(pkt_priv);
+ wdsp_glink_free_tx_buf(priv, pkt_priv);
}
-
/*
* wdsp_glink_notify_tx_abort - Glink notify tx abort callback to
* free tx buffer
@@ -201,12 +236,7 @@
static void wdsp_glink_notify_tx_abort(void *handle, const void *priv,
const void *pkt_priv)
{
- if (!pkt_priv) {
- pr_err("%s: Invalid parameter\n", __func__);
- return;
- }
- /* Free tx pkt */
- vfree(pkt_priv);
+ wdsp_glink_free_tx_buf(priv, pkt_priv);
}
/*
@@ -555,7 +585,7 @@
goto done;
}
ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!ch) {
ret = -ENOMEM;
goto done;
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index a4c2f0c..e1ca532 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -9,7 +9,7 @@
if SOUNDWIRE
config SOUNDWIRE_WCD_CTRL
- depends on WCD9335_CODEC || WCD934X_CODEC
+ depends on WCD9XXX_CODEC_CORE
tristate "QTI WCD CODEC Soundwire controller"
default n
help
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b..02fb967 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -660,7 +660,7 @@
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index ad3eb187..37766d29 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -23,7 +23,6 @@
#define SPI_NUM_CHIPSELECT (4)
#define SPI_XFER_TIMEOUT_MS (250)
-#define SPI_OVERSAMPLING (2)
/* SPI SE specific registers */
#define SE_SPI_CPHA (0x224)
#define SE_SPI_LOOPBACK (0x22C)
@@ -100,6 +99,7 @@
struct spi_transfer *cur_xfer;
struct completion xfer_done;
struct device *wrapper_dev;
+ int oversampling;
};
static struct spi_master *get_spi_master(struct device *dev)
@@ -123,7 +123,8 @@
clk_sel &= ~CLK_SEL_MSK;
m_clk_cfg &= ~CLK_DIV_MSK;
- ret = geni_se_clk_freq_match(&mas->spi_rsc, speed_hz, &idx,
+ ret = geni_se_clk_freq_match(&mas->spi_rsc,
+ (speed_hz * mas->oversampling), &idx,
&sclk_freq, true);
if (ret) {
dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
@@ -131,17 +132,23 @@
return ret;
}
- div = ((sclk_freq / SPI_OVERSAMPLING) / speed_hz);
- if (!div)
+ div = ((sclk_freq / mas->oversampling) / speed_hz);
+ if (!div) {
+ dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
+ __func__, sclk_freq, mas->oversampling, speed_hz);
return -EINVAL;
+ }
dev_dbg(mas->dev, "%s: req %u sclk %lu, idx %d, div %d\n", __func__,
speed_hz, sclk_freq, idx, div);
clk_sel |= (idx & CLK_SEL_MSK);
m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
ret = clk_set_rate(rsc->se_clk, sclk_freq);
- if (ret)
+ if (ret) {
+ dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
+ __func__, ret);
return ret;
+ }
geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
@@ -238,6 +245,10 @@
if (unlikely(!mas->setup)) {
int proto = get_se_proto(mas->base);
+ unsigned int major;
+ unsigned int minor;
+ unsigned int step;
+ int hw_ver;
if (unlikely(proto != SPI)) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
@@ -248,12 +259,24 @@
mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
mas->tx_fifo_width = get_tx_fifo_width(mas->base);
+ mas->oversampling = 1;
/* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1;
dev_dbg(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
mas->tx_fifo_depth, mas->rx_fifo_depth,
mas->tx_fifo_width);
mas->setup = true;
+ hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major,
+ &minor, &step);
+ if (hw_ver)
+ dev_err(mas->dev, "%s:Err getting HW version %d\n",
+ __func__, hw_ver);
+ else {
+ dev_dbg(mas->dev, "%s:Major:%d Minor:%d step:%d\n",
+ __func__, major, minor, step);
+ if ((major == 1) && (minor == 0))
+ mas->oversampling = 2;
+ }
}
exit_prepare_transfer_hardware:
return ret;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c..6db8063 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -621,8 +621,10 @@
if (!spi)
return;
- if (spi->dev.of_node)
+ if (spi->dev.of_node) {
of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+ of_node_put(spi->dev.of_node);
+ }
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_unregister(&spi->dev);
@@ -797,12 +799,12 @@
if (master->dma_tx)
tx_dev = master->dma_tx->device->dev;
else
- tx_dev = &master->dev;
+ tx_dev = master->dev.parent;
if (master->dma_rx)
rx_dev = master->dma_rx->device->dev;
else
- rx_dev = &master->dev;
+ rx_dev = master->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -844,12 +846,12 @@
if (master->dma_tx)
tx_dev = master->dma_tx->device->dev;
else
- tx_dev = &master->dev;
+ tx_dev = master->dev.parent;
if (master->dma_rx)
rx_dev = master->dma_rx->device->dev;
else
- rx_dev = &master->dev;
+ rx_dev = master->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (!master->can_dma(master, msg->spi, xfer))
@@ -1004,7 +1006,7 @@
ret = 0;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
- ms += ms + 100; /* some tolerance */
+ ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
ms = UINT_MAX;
@@ -1589,11 +1591,13 @@
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
nc->full_name);
- goto err_out;
+ goto err_of_node_put;
}
return spi;
+err_of_node_put:
+ of_node_put(nc);
err_out:
spi_dev_put(spi);
return ERR_PTR(rc);
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
index c5a31a9..2c90bef 100644
--- a/drivers/spmi/spmi-pmic-arb-debug.c
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -69,6 +70,7 @@
struct spmi_pmic_arb_debug {
void __iomem *addr;
raw_spinlock_t lock;
+ struct clk *clock;
};
static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa,
@@ -181,6 +183,12 @@
else
return -EINVAL;
+ rc = clk_prepare_enable(pa->clock);
+ if (rc) {
+ pr_err("%s: failed to enable core clock, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
raw_spin_lock_irqsave(&pa->lock, flags);
rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
@@ -192,6 +200,7 @@
buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i));
done:
raw_spin_unlock_irqrestore(&pa->lock, flags);
+ clk_disable_unprepare(pa->clock);
return rc;
}
@@ -221,6 +230,12 @@
else
return -EINVAL;
+ rc = clk_prepare_enable(pa->clock);
+ if (rc) {
+ pr_err("%s: failed to enable core clock, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
raw_spin_lock_irqsave(&pa->lock, flags);
/* Write data to FIFO */
@@ -230,6 +245,7 @@
rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
raw_spin_unlock_irqrestore(&pa->lock, flags);
+ clk_disable_unprepare(pa->clock);
return rc;
}
@@ -293,6 +309,17 @@
goto err_put_ctrl;
}
+ if (of_find_property(pdev->dev.of_node, "clock-names", NULL)) {
+ pa->clock = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(pa->clock)) {
+ rc = PTR_ERR(pa->clock);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "unable to request core clock, rc=%d\n",
+ rc);
+ goto err_put_ctrl;
+ }
+ }
+
platform_set_drvdata(pdev, ctrl);
raw_spin_lock_init(&pa->lock);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 9cc85ee..bfd4b7a 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -576,10 +576,16 @@
int last = pa->max_apid >> 5;
u32 status, enable;
int i, id, apid;
+ /* status based dispatch */
+ bool acc_valid = false;
+ u32 irq_status = 0;
for (i = first; i <= last; ++i) {
status = readl_relaxed(pa->acc_status +
pa->ver_ops->owner_acc_status(pa->ee, i));
+ if (status)
+ acc_valid = true;
+
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
@@ -595,6 +601,28 @@
periph_interrupt(pa, apid, show);
}
}
+
+ /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+ if (!acc_valid) {
+ for (i = pa->min_apid; i <= pa->max_apid; i++) {
+ /* skip if APPS is not irq owner */
+ if (pa->apid_data[i].irq_owner != pa->ee)
+ continue;
+
+ irq_status = readl_relaxed(pa->intr +
+ pa->ver_ops->irq_status(i));
+ if (irq_status) {
+ enable = readl_relaxed(pa->intr +
+ pa->ver_ops->acc_enable(i));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+ dev_dbg(&pa->spmic->dev,
+ "Dispatching IRQ for apid=%d status=%x\n",
+ i, irq_status);
+ periph_interrupt(pa, i, show);
+ }
+ }
+ }
+ }
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index a8107cc..df8900a 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@
return 0;
}
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
+ .uevent = spmi_drv_uevent,
};
/**
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
index 675b974..ce9dc7e 100644
--- a/drivers/staging/android/fiq_debugger/fiq_debugger.c
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/sysrq.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -400,7 +401,7 @@
cmd += 6;
while (*cmd == ' ')
cmd++;
- if (cmd != '\0')
+ if ((cmd != '\0') && sysrq_on())
kernel_restart(cmd);
else
kernel_restart(NULL);
@@ -430,29 +431,39 @@
static void fiq_debugger_help(struct fiq_debugger_state *state)
{
fiq_debugger_printf(&state->output,
- "FIQ Debugger commands:\n"
- " pc PC status\n"
- " regs Register dump\n"
- " allregs Extended Register dump\n"
- " bt Stack trace\n"
- " reboot [<c>] Reboot with command <c>\n"
- " reset [<c>] Hard reset with command <c>\n"
- " irqs Interupt status\n"
- " kmsg Kernel log\n"
- " version Kernel version\n");
+ "FIQ Debugger commands:\n");
+ if (sysrq_on()) {
+ fiq_debugger_printf(&state->output,
+ " pc PC status\n"
+ " regs Register dump\n"
+ " allregs Extended Register dump\n"
+ " bt Stack trace\n");
+ fiq_debugger_printf(&state->output,
+ " reboot [<c>] Reboot with command <c>\n"
+ " reset [<c>] Hard reset with command <c>\n"
+ " irqs Interrupt status\n"
+ " kmsg Kernel log\n"
+ " version Kernel version\n");
+ fiq_debugger_printf(&state->output,
+ " cpu Current CPU\n"
+ " cpu <number> Switch to CPU<number>\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
+ } else {
+ fiq_debugger_printf(&state->output,
+ " reboot Reboot\n"
+ " reset Hard reset\n"
+ " irqs Interrupt status\n");
+ }
fiq_debugger_printf(&state->output,
- " sleep Allow sleep while in FIQ\n"
- " nosleep Disable sleep while in FIQ\n"
- " console Switch terminal to console\n"
- " cpu Current CPU\n"
- " cpu <number> Switch to CPU<number>\n");
- fiq_debugger_printf(&state->output,
- " ps Process list\n"
- " sysrq sysrq options\n"
- " sysrq <param> Execute sysrq with <param>\n");
+ " sleep Allow sleep while in FIQ\n"
+ " nosleep Disable sleep while in FIQ\n"
+ " console Switch terminal to console\n"
+ " ps Process list\n");
#ifdef CONFIG_KGDB
- fiq_debugger_printf(&state->output,
- " kgdb Enter kernel debugger\n");
+ if (fiq_kgdb_enable) {
+ fiq_debugger_printf(&state->output,
+ " kgdb Enter kernel debugger\n");
#endif
}
@@ -484,18 +495,23 @@
if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
fiq_debugger_help(state);
} else if (!strcmp(cmd, "pc")) {
- fiq_debugger_dump_pc(&state->output, regs);
+ if (sysrq_on())
+ fiq_debugger_dump_pc(&state->output, regs);
} else if (!strcmp(cmd, "regs")) {
- fiq_debugger_dump_regs(&state->output, regs);
+ if (sysrq_on())
+ fiq_debugger_dump_regs(&state->output, regs);
} else if (!strcmp(cmd, "allregs")) {
- fiq_debugger_dump_allregs(&state->output, regs);
+ if (sysrq_on())
+ fiq_debugger_dump_allregs(&state->output, regs);
} else if (!strcmp(cmd, "bt")) {
- fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+ if (sysrq_on())
+ fiq_debugger_dump_stacktrace(&state->output, regs,
+ 100, svc_sp);
} else if (!strncmp(cmd, "reset", 5)) {
cmd += 5;
while (*cmd == ' ')
cmd++;
- if (*cmd) {
+ if (*cmd && sysrq_on()) {
char tmp_cmd[32];
strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
machine_restart(tmp_cmd);
@@ -505,9 +521,12 @@
} else if (!strcmp(cmd, "irqs")) {
fiq_debugger_dump_irqs(state);
} else if (!strcmp(cmd, "kmsg")) {
- fiq_debugger_dump_kernel_log(state);
+ if (sysrq_on())
+ fiq_debugger_dump_kernel_log(state);
} else if (!strcmp(cmd, "version")) {
- fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+ if (sysrq_on())
+ fiq_debugger_printf(&state->output, "%s\n",
+ linux_banner);
} else if (!strcmp(cmd, "sleep")) {
state->no_sleep = false;
fiq_debugger_printf(&state->output, "enabling sleep\n");
@@ -519,14 +538,17 @@
fiq_debugger_uart_flush(state);
state->console_enable = true;
} else if (!strcmp(cmd, "cpu")) {
- fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
- } else if (!strncmp(cmd, "cpu ", 4)) {
+ if (sysrq_on())
+ fiq_debugger_printf(&state->output, "cpu %d\n",
+ state->current_cpu);
+ } else if (!strncmp(cmd, "cpu ", 4) && sysrq_on()) {
unsigned long cpu = 0;
if (kstrtoul(cmd + 4, 10, &cpu) == 0)
fiq_debugger_switch_cpu(state, cpu);
else
fiq_debugger_printf(&state->output, "invalid cpu\n");
- fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+ fiq_debugger_printf(&state->output, "cpu %d\n",
+ state->current_cpu);
} else {
if (state->debug_busy) {
fiq_debugger_printf(&state->output,
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 9ea4a9f..168383ec 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,6 +16,7 @@
*
*/
+#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -76,7 +77,7 @@
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
* @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
+ * @lock: lock protecting the tree of handles and idr
* @name: used for debugging
* @display_name: used for debugging (unique version of @name)
* @display_serial: used for debugging (to make display_name unique)
@@ -91,7 +92,6 @@
struct ion_device *dev;
struct rb_root handles;
struct idr idr;
- /* Protects idr */
struct mutex lock;
char *name;
char *display_name;
@@ -265,7 +265,7 @@
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
- atomic_add(len, &heap->total_allocated);
+ atomic_long_add(len, &heap->total_allocated);
return buffer;
err:
@@ -283,7 +283,7 @@
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
- atomic_sub(buffer->size, &buffer->heap->total_allocated);
+ atomic_long_sub(buffer->size, &buffer->heap->total_allocated);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
@@ -321,7 +321,7 @@
{
mutex_lock(&buffer->lock);
if (buffer->handle_count == 0)
- atomic_add(buffer->size, &buffer->heap->total_handles);
+ atomic_long_add(buffer->size, &buffer->heap->total_handles);
buffer->handle_count++;
mutex_unlock(&buffer->lock);
@@ -347,7 +347,7 @@
task = current->group_leader;
get_task_comm(buffer->task_comm, task);
buffer->pid = task_pid_nr(task);
- atomic_sub(buffer->size, &buffer->heap->total_handles);
+ atomic_long_sub(buffer->size, &buffer->heap->total_handles);
}
mutex_unlock(&buffer->lock);
}
@@ -403,6 +403,15 @@
kref_get(&handle->ref);
}
+/* Must hold the client lock */
+static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+{
+ if (atomic_read(&handle->ref.refcount) + 1 == 0)
+ return ERR_PTR(-EOVERFLOW);
+ ion_handle_get(handle);
+ return handle;
+}
+
static int ion_handle_put_nolock(struct ion_handle *handle)
{
int ret;
@@ -449,9 +458,9 @@
handle = idr_find(&client->idr, id);
if (handle)
- ion_handle_get(handle);
+ return ion_handle_get_check_overflow(handle);
- return handle ? handle : ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
@@ -671,8 +680,8 @@
mutex_unlock(&client->lock);
return -ENODEV;
}
- mutex_unlock(&client->lock);
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ mutex_unlock(&client->lock);
return ret;
}
EXPORT_SYMBOL(ion_phys);
@@ -777,33 +786,7 @@
}
EXPORT_SYMBOL(ion_unmap_kernel);
-static struct mutex debugfs_mutex;
static struct rb_root *ion_root_client;
-static int is_client_alive(struct ion_client *client)
-{
- struct rb_node *node;
- struct ion_client *tmp;
- struct ion_device *dev;
-
- node = ion_root_client->rb_node;
- dev = container_of(ion_root_client, struct ion_device, clients);
-
- down_read(&dev->lock);
- while (node) {
- tmp = rb_entry(node, struct ion_client, node);
- if (client < tmp) {
- node = node->rb_left;
- } else if (client > tmp) {
- node = node->rb_right;
- } else {
- up_read(&dev->lock);
- return 1;
- }
- }
-
- up_read(&dev->lock);
- return 0;
-}
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
@@ -814,14 +797,6 @@
"heap_name", "size_in_bytes", "handle refcount",
"buffer");
- mutex_lock(&debugfs_mutex);
- if (!is_client_alive(client)) {
- seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
- client);
- mutex_unlock(&debugfs_mutex);
- return 0;
- }
-
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -836,7 +811,6 @@
seq_puts(s, "\n");
}
mutex_unlock(&client->lock);
- mutex_unlock(&debugfs_mutex);
return 0;
}
@@ -967,27 +941,27 @@
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&debugfs_mutex);
+ down_write(&dev->lock);
+ rb_erase(&client->node, &dev->clients);
+ up_write(&dev->lock);
+
+ /* After this completes, there are no more references to client */
+ debugfs_remove_recursive(client->debug_root);
+
+ mutex_lock(&client->lock);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
ion_handle_destroy(&handle->ref);
}
+ mutex_unlock(&client->lock);
idr_destroy(&client->idr);
-
- down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
-
- up_write(&dev->lock);
-
kfree(client->display_name);
kfree(client->name);
kfree(client);
- mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
@@ -1430,7 +1404,7 @@
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
- ion_handle_get(handle);
+ handle = ion_handle_get_check_overflow(handle);
mutex_unlock(&client->lock);
goto end;
}
@@ -1794,7 +1768,7 @@
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
- mutex_lock(&debugfs_mutex);
+ down_read(&dev->lock);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1813,7 +1787,7 @@
client->pid, size);
}
}
- mutex_unlock(&debugfs_mutex);
+ up_read(&dev->lock);
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
@@ -1874,10 +1848,10 @@
"Total orphaned size");
pr_info("---------------------------------\n");
plist_for_each_entry(heap, &dev->heaps, node) {
- pr_info("%16.s 0x%16.x 0x%16.x\n",
- heap->name, atomic_read(&heap->total_allocated),
- atomic_read(&heap->total_allocated) -
- atomic_read(&heap->total_handles));
+ pr_info("%16.s 0x%16.lx 0x%16.lx\n",
+ heap->name, atomic_long_read(&heap->total_allocated),
+ atomic_long_read(&heap->total_allocated) -
+ atomic_long_read(&heap->total_handles));
if (heap->debug_show)
heap->debug_show(heap, NULL, 0);
}
@@ -2048,7 +2022,6 @@
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
ion_root_client = &idev->clients;
- mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 323bb0c..b264ec2 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -4,7 +4,7 @@
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -241,29 +241,46 @@
static void ion_secure_cma_free(struct ion_buffer *buffer)
{
- int ret = 0;
- u32 source_vm;
+ int i, ret = 0;
+ int *source_vm_list;
+ int source_nelems;
int dest_vmid;
int dest_perms;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
struct ion_cma_buffer_info *info = buffer->priv_virt;
- source_vm = get_secure_vmid(buffer->flags);
- if (source_vm < 0) {
- pr_err("%s: Failed to get secure vmid\n", __func__);
+ source_nelems = count_set_bits(buffer->flags & ION_FLAGS_CP_MASK);
+ if (!source_nelems)
return;
+ source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list),
+ GFP_KERNEL);
+ if (!source_vm_list)
+ return;
+ ret = populate_vm_list(buffer->flags, source_vm_list, source_nelems);
+ if (ret) {
+ pr_err("%s: Failed to get secure vmids\n", __func__);
+ goto out_free_source;
}
+
dest_vmid = VMID_HLOS;
dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
- ret = hyp_assign_table(info->table, &source_vm, 1,
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
&dest_vmid, &dest_perms, 1);
if (ret) {
pr_err("%s: Not freeing memory since assign failed\n",
__func__);
- return;
+ goto out_free_source;
}
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ ClearPagePrivate(sg_page(sg));
+
ion_cma_free(buffer);
+out_free_source:
+ kfree(source_vm_list);
}
static int ion_secure_cma_allocate(
@@ -271,42 +288,76 @@
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags)
{
- int ret = 0;
+ int i, ret = 0;
+ int count;
int source_vm;
- int dest_vm;
- int dest_perms;
+ int *dest_vm_list = NULL;
+ int *dest_perms = NULL;
+ int dest_nelems;
struct ion_cma_buffer_info *info;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
source_vm = VMID_HLOS;
- dest_vm = get_secure_vmid(flags);
- if (dest_vm < 0) {
- pr_err("%s: Failed to get secure vmid\n", __func__);
- return -EINVAL;
+ dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
+ if (!dest_nelems) {
+ ret = -EINVAL;
+ goto out;
+ }
+ dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL);
+ if (!dest_vm_list) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dest_perms = kcalloc(dest_nelems, sizeof(*dest_perms), GFP_KERNEL);
+ if (!dest_perms) {
+ ret = -ENOMEM;
+ goto out_free_dest_vm;
+ }
+ ret = populate_vm_list(flags, dest_vm_list, dest_nelems);
+ if (ret) {
+ pr_err("%s: Failed to get secure vmid(s)\n", __func__);
+ goto out_free_dest;
}
- if (dest_vm == VMID_CP_SEC_DISPLAY)
- dest_perms = PERM_READ;
- else
- dest_perms = PERM_READ | PERM_WRITE;
+ for (count = 0; count < dest_nelems; count++) {
+ if (dest_vm_list[count] == VMID_CP_SEC_DISPLAY)
+ dest_perms[count] = PERM_READ;
+ else
+ dest_perms[count] = PERM_READ | PERM_WRITE;
+ }
ret = ion_cma_allocate(heap, buffer, len, align, flags);
if (ret) {
dev_err(heap->priv, "Unable to allocate cma buffer");
- return ret;
+ goto out_free_dest;
}
info = buffer->priv_virt;
- ret = hyp_assign_table(info->table, &source_vm, 1,
- &dest_vm, &dest_perms, 1);
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, &source_vm, 1, dest_vm_list, dest_perms,
+ dest_nelems);
if (ret) {
pr_err("%s: Assign call failed\n", __func__);
goto err;
}
+
+ /* Set the private bit to indicate that we've secured this */
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ SetPagePrivate(sg_page(sg));
+
+ kfree(dest_vm_list);
+ kfree(dest_perms);
return ret;
err:
ion_secure_cma_free(buffer);
+out_free_dest:
+ kfree(dest_perms);
+out_free_dest_vm:
+ kfree(dest_vm_list);
+out:
return ret;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 49d947e..bb119cc 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -216,8 +216,8 @@
struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
- atomic_t total_allocated;
- atomic_t total_handles;
+ atomic_long_t total_allocated;
+ atomic_long_t total_handles;
};
/**
@@ -293,7 +293,10 @@
int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
int get_secure_vmid(unsigned long flags);
+int get_vmid(unsigned long flags);
bool is_secure_vmid_valid(int vmid);
+unsigned int count_set_bits(unsigned long val);
+int populate_vm_list(unsigned long flags, unsigned int *vm_list, int nelems);
/**
* Functions to help assign/unassign sg_table for System Secure Heap
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index ad6028f..c9028bb 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -123,9 +123,11 @@
if (order)
gfp_mask = high_order_gfp_flags;
+
page = alloc_pages(gfp_mask, order);
- ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ if (page)
+ ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index ae9bf5f..c7b58ce 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
#include <linux/highmem.h>
#include <linux/cma.h>
#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/show_mem_notifier.h>
#include <asm/cacheflush.h>
#include "../ion_priv.h"
@@ -618,7 +619,33 @@
vmid == VMID_CP_CAMERA ||
vmid == VMID_CP_SEC_DISPLAY ||
vmid == VMID_CP_APP ||
- vmid == VMID_CP_CAMERA_PREVIEW);
+ vmid == VMID_CP_CAMERA_PREVIEW ||
+ vmid == VMID_CP_SPSS_SP ||
+ vmid == VMID_CP_SPSS_SP_SHARED ||
+ vmid == VMID_CP_SPSS_HLOS_SHARED);
+}
+
+unsigned int count_set_bits(unsigned long val)
+{
+ return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG));
+}
+
+int populate_vm_list(unsigned long flags, unsigned int *vm_list,
+ int nelems)
+{
+ unsigned int itr = 0;
+ int vmid;
+
+ flags = flags & ION_FLAGS_CP_MASK;
+ for_each_set_bit(itr, &flags, BITS_PER_LONG) {
+ vmid = get_vmid(0x1UL << itr);
+ if (vmid < 0 || !nelems)
+ return -EINVAL;
+
+ vm_list[nelems - 1] = vmid;
+ nelems--;
+ }
+ return 0;
}
int get_secure_vmid(unsigned long flags)
@@ -639,8 +666,27 @@
return VMID_CP_APP;
if (flags & ION_FLAG_CP_CAMERA_PREVIEW)
return VMID_CP_CAMERA_PREVIEW;
+ if (flags & ION_FLAG_CP_SPSS_SP)
+ return VMID_CP_SPSS_SP;
+ if (flags & ION_FLAG_CP_SPSS_SP_SHARED)
+ return VMID_CP_SPSS_SP_SHARED;
+ if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
+ return VMID_CP_SPSS_HLOS_SHARED;
return -EINVAL;
}
+
+int get_vmid(unsigned long flags)
+{
+ int vmid;
+
+ vmid = get_secure_vmid(flags);
+ if (vmid < 0) {
+ if (flags & ION_FLAG_CP_HLOS)
+ vmid = VMID_HLOS;
+ }
+ return vmid;
+}
+
/* fix up the cases where the ioctl direction bits are incorrect */
static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
{
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
index 55b02b6..741d017 100644
--- a/drivers/staging/android/ion/msm/msm_ion.h
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,8 @@
IPT_TYPE_MDP_WRITEBACK = 2,
};
+#define ION_FLAGS_CP_MASK 0x7FFF0000
+
/*
* This flag allows clients when mapping into the IOMMU to specify to
* defer un-mapping from the IOMMU until the buffer memory is freed.
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 9846c51..616375a 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -42,6 +42,22 @@
#include <linux/rcupdate.h>
#include <linux/profile.h>
#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/cpuset.h>
+#include <linux/vmpressure.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/almk.h>
+#include <linux/show_mem_notifier.h>
+
+#ifdef CONFIG_HIGHMEM
+#define _ZONE ZONE_HIGHMEM
+#else
+#define _ZONE ZONE_NORMAL
+#endif
#define CREATE_TRACE_POINTS
#include "trace/lowmemorykiller.h"
@@ -63,6 +79,7 @@
};
static int lowmem_minfree_size = 4;
+static int lmk_fast_run = 1;
static unsigned long lowmem_deathpending_timeout;
@@ -81,6 +98,314 @@
global_node_page_state(NR_INACTIVE_FILE);
}
+static atomic_t shift_adj = ATOMIC_INIT(0);
+static short adj_max_shift = 353;
+module_param_named(adj_max_shift, adj_max_shift, short, 0644);
+
+/* User knob to enable/disable adaptive lmk feature */
+static int enable_adaptive_lmk;
+module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int, 0644);
+
+/*
+ * This parameter controls the behaviour of LMK when vmpressure is in
+ * the range of 90-94. Adaptive lmk triggers based on number of file
+ * pages wrt vmpressure_file_min, when vmpressure is in the range of
+ * 90-94. Usually this is a pseudo minfree value, higher than the
+ * highest configured value in minfree array.
+ */
+static int vmpressure_file_min;
+module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644);
+
+enum {
+ VMPRESSURE_NO_ADJUST = 0,
+ VMPRESSURE_ADJUST_ENCROACH,
+ VMPRESSURE_ADJUST_NORMAL,
+};
+
+static int adjust_minadj(short *min_score_adj)
+{
+ int ret = VMPRESSURE_NO_ADJUST;
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (atomic_read(&shift_adj) &&
+ (*min_score_adj > adj_max_shift)) {
+ if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1)
+ ret = VMPRESSURE_ADJUST_ENCROACH;
+ else
+ ret = VMPRESSURE_ADJUST_NORMAL;
+ *min_score_adj = adj_max_shift;
+ }
+ atomic_set(&shift_adj, 0);
+
+ return ret;
+}
+
+static int lmk_vmpressure_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int other_free, other_file;
+ unsigned long pressure = action;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (!enable_adaptive_lmk)
+ return 0;
+
+ if (pressure >= 95) {
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ } else if (pressure >= 90) {
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+
+ other_free = global_page_state(NR_FREE_PAGES);
+
+ if ((other_free < lowmem_minfree[array_size - 1]) &&
+ (other_file < vmpressure_file_min)) {
+ atomic_set(&shift_adj, 1);
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ }
+ } else if (atomic_read(&shift_adj)) {
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages();
+
+ other_free = global_page_state(NR_FREE_PAGES);
+ /*
+ * shift_adj would have been set by a previous invocation
+ * of notifier, which is not followed by a lowmem_shrink yet.
+ * Since vmpressure has improved, reset shift_adj to avoid
+ * false adaptive LMK trigger.
+ */
+ trace_almk_vmpressure(pressure, other_free, other_file);
+ atomic_set(&shift_adj, 0);
+ }
+
+ return 0;
+}
+
+static struct notifier_block lmk_vmpr_nb = {
+ .notifier_call = lmk_vmpressure_notifier,
+};
+
+static int test_task_flag(struct task_struct *p, int flag)
+{
+ struct task_struct *t;
+
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (test_tsk_thread_flag(t, flag)) {
+ task_unlock(t);
+ return 1;
+ }
+ task_unlock(t);
+ }
+
+ return 0;
+}
+
+static int test_task_lmk_waiting(struct task_struct *p)
+{
+ struct task_struct *t;
+
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (task_lmk_waiting(t)) {
+ task_unlock(t);
+ return 1;
+ }
+ task_unlock(t);
+ }
+
+ return 0;
+}
+
+static DEFINE_MUTEX(scan_mutex);
+
+static int can_use_cma_pages(gfp_t gfp_mask)
+{
+ int can_use = 0;
+ int mtype = gfpflags_to_migratetype(gfp_mask);
+ int i = 0;
+ int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+ if (is_migrate_cma(mtype)) {
+ can_use = 1;
+ } else {
+ for (i = 0;; i++) {
+ int fallbacktype = mtype_fallbacks[i];
+
+ if (is_migrate_cma(fallbacktype)) {
+ can_use = 1;
+ break;
+ }
+
+ if (fallbacktype == MIGRATE_TYPES)
+ break;
+ }
+ }
+ return can_use;
+}
+
+void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
+ int *other_free, int *other_file,
+ int use_cma_pages)
+{
+ struct zone *zone;
+ struct zoneref *zoneref;
+ int zone_idx;
+
+ for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
+ zone_idx = zonelist_zone_idx(zoneref);
+ if (zone_idx == ZONE_MOVABLE) {
+ if (!use_cma_pages && other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_CMA_PAGES);
+ continue;
+ }
+
+ if (zone_idx > classzone_idx) {
+ if (other_free != NULL)
+ *other_free -= zone_page_state(zone,
+ NR_FREE_PAGES);
+ if (other_file != NULL)
+ *other_file -= zone_page_state(zone,
+ NR_ZONE_INACTIVE_FILE) +
+ zone_page_state(zone,
+ NR_ZONE_ACTIVE_FILE);
+ } else if (zone_idx < classzone_idx) {
+ if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) &&
+ other_free) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ zone->lowmem_reserve[classzone_idx] +
+ zone_page_state(
+ zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ zone->lowmem_reserve[classzone_idx];
+ }
+ } else {
+ if (other_free)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_PAGES);
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_HIGHMEM
+static void adjust_gfp_mask(gfp_t *gfp_mask)
+{
+ struct zone *preferred_zone;
+ struct zoneref *zref;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx;
+
+ if (current_is_kswapd()) {
+ zonelist = node_zonelist(0, *gfp_mask);
+ high_zoneidx = gfp_zone(*gfp_mask);
+ zref = first_zones_zonelist(zonelist, high_zoneidx, NULL);
+ preferred_zone = zref->zone;
+
+ if (high_zoneidx == ZONE_NORMAL) {
+ if (zone_watermark_ok_safe(
+ preferred_zone, 0,
+ high_wmark_pages(preferred_zone), 0))
+ *gfp_mask |= __GFP_HIGHMEM;
+ } else if (high_zoneidx == ZONE_HIGHMEM) {
+ *gfp_mask |= __GFP_HIGHMEM;
+ }
+ }
+}
+#else
+static void adjust_gfp_mask(gfp_t *unused)
+{
+}
+#endif
+
+void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
+{
+ gfp_t gfp_mask;
+ struct zone *preferred_zone;
+ struct zoneref *zref;
+ struct zonelist *zonelist;
+ enum zone_type high_zoneidx, classzone_idx;
+ unsigned long balance_gap;
+ int use_cma_pages;
+
+ gfp_mask = sc->gfp_mask;
+ adjust_gfp_mask(&gfp_mask);
+
+ zonelist = node_zonelist(0, gfp_mask);
+ high_zoneidx = gfp_zone(gfp_mask);
+ zref = first_zones_zonelist(zonelist, high_zoneidx, NULL);
+ preferred_zone = zref->zone;
+ classzone_idx = zone_idx(preferred_zone);
+ use_cma_pages = can_use_cma_pages(gfp_mask);
+
+ balance_gap = min(low_wmark_pages(preferred_zone),
+ (preferred_zone->present_pages +
+ 100-1) /
+ 100);
+
+ if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0,
+ high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX +
+ balance_gap, 0, 0))) {
+ if (lmk_fast_run)
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+ else
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ NULL, use_cma_pages);
+
+ if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ preferred_zone->lowmem_reserve[_ZONE]
+ + zone_page_state(
+ preferred_zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ preferred_zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ preferred_zone->lowmem_reserve[_ZONE];
+ }
+ } else {
+ *other_free -= zone_page_state(preferred_zone,
+ NR_FREE_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
+ "ofree %d, %d\n", *other_free, *other_file);
+ } else {
+ tune_lmk_zone_param(zonelist, classzone_idx, other_free,
+ other_file, use_cma_pages);
+
+ if (!use_cma_pages) {
+ *other_free -=
+ zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
+ }
+
+ lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
+ "%d\n", *other_free, *other_file);
+ }
+}
+
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
@@ -88,16 +413,31 @@
unsigned long rem = 0;
int tasksize;
int i;
+ int ret = 0;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- global_node_page_state(NR_UNEVICTABLE) -
- total_swapcache_pages();
+ int other_free;
+ int other_file;
+
+ if (mutex_lock_interruptible(&scan_mutex) < 0)
+ return 0;
+
+ other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
+
+ if (global_node_page_state(NR_SHMEM) + total_swapcache_pages() +
+ global_node_page_state(NR_UNEVICTABLE) <
+ global_node_page_state(NR_FILE_PAGES))
+ other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ global_node_page_state(NR_UNEVICTABLE) -
+ total_swapcache_pages();
+ else
+ other_file = 0;
+
+ tune_lmk_param(&other_free, &other_file, sc);
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
@@ -111,13 +451,17 @@
}
}
+ ret = adjust_minadj(&min_score_adj);
+
lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_score_adj);
if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ trace_almk_shrink(0, ret, other_free, other_file, 0);
lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
sc->nr_to_scan, sc->gfp_mask);
+ mutex_unlock(&scan_mutex);
return 0;
}
@@ -131,16 +475,24 @@
if (tsk->flags & PF_KTHREAD)
continue;
+ /* if task no longer has any memory ignore it */
+ if (test_task_flag(tsk, TIF_MM_RELEASED))
+ continue;
+
+ if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
+ if (test_task_lmk_waiting(tsk)) {
+ rcu_read_unlock();
+ /* give the system time to free up the memory */
+ msleep_interruptible(20);
+ mutex_unlock(&scan_mutex);
+ return 0;
+ }
+ }
+
p = find_lock_task_mm(tsk);
if (!p)
continue;
- if (task_lmk_waiting(p) &&
- time_before_eq(jiffies, lowmem_deathpending_timeout)) {
- task_unlock(p);
- rcu_read_unlock();
- return 0;
- }
oom_score_adj = p->signal->oom_score_adj;
if (oom_score_adj < min_score_adj) {
task_unlock(p);
@@ -160,7 +512,7 @@
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n",
p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
@@ -174,24 +526,53 @@
task_set_lmk_waiting(selected);
task_unlock(selected);
trace_lowmemory_kill(selected, cache_size, cache_limit, free);
- lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
- " to free %ldkB on behalf of '%s' (%d) because\n"
- " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
- " Free memory is %ldkB above reserved\n",
- selected->comm, selected->pid,
- selected_oom_score_adj,
- selected_tasksize * (long)(PAGE_SIZE / 1024),
- current->comm, current->pid,
- cache_size, cache_limit,
- min_score_adj,
- free);
+ lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n"
+ "to free %ldkB on behalf of '%s' (%d) because\n"
+ "cache %ldkB is below limit %ldkB for oom score %hd\n"
+ "Free memory is %ldkB above reserved.\n"
+ "Free CMA is %ldkB\n"
+ "Total reserve is %ldkB\n"
+ "Total free pages is %ldkB\n"
+ "Total file cache is %ldkB\n"
+ "GFP mask is 0x%x\n",
+ selected->comm, selected->pid, selected->tgid,
+ selected_oom_score_adj,
+ selected_tasksize * (long)(PAGE_SIZE / 1024),
+ current->comm, current->pid,
+ cache_size, cache_limit,
+ min_score_adj,
+ free,
+ global_page_state(NR_FREE_CMA_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ totalreserve_pages * (long)(PAGE_SIZE / 1024),
+ global_page_state(NR_FREE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ global_node_page_state(NR_FILE_PAGES) *
+ (long)(PAGE_SIZE / 1024),
+ sc->gfp_mask);
+
+ if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) {
+ show_mem(SHOW_MEM_FILTER_NODES);
+ show_mem_call_notifiers();
+ dump_tasks(NULL, NULL);
+ }
+
lowmem_deathpending_timeout = jiffies + HZ;
rem += selected_tasksize;
+ rcu_read_unlock();
+ /* give the system time to free up the memory */
+ msleep_interruptible(20);
+ trace_almk_shrink(selected_tasksize, ret,
+ other_free, other_file,
+ selected_oom_score_adj);
+ } else {
+ trace_almk_shrink(1, ret, other_free, other_file, 0);
+ rcu_read_unlock();
}
lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
- rcu_read_unlock();
+ mutex_unlock(&scan_mutex);
return rem;
}
@@ -204,6 +585,7 @@
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
+ vmpressure_notifier_register(&lmk_vmpr_nb);
return 0;
}
device_initcall(lowmem_init);
@@ -299,6 +681,7 @@
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
#endif
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- 0644);
-module_param_named(debug_level, lowmem_debug_level, uint, 0644);
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index db4fc63..84598db 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -84,10 +84,12 @@
#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
#define ION_FLAG_CP_CAMERA ION_BIT(21)
#define ION_FLAG_CP_HLOS ION_BIT(22)
-#define ION_FLAG_CP_HLOS_FREE ION_BIT(23)
+#define ION_FLAG_CP_SPSS_SP ION_BIT(23)
+#define ION_FLAG_CP_SPSS_SP_SHARED ION_BIT(24)
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
+#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
/**
@@ -96,13 +98,6 @@
*/
#define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED)
-/**
- * Flag for clients to force contiguous memort allocation
- *
- * Use of this flag is carefully monitored!
- */
-#define ION_FLAG_FORCE_CONTIGUOUS ION_BIT(30)
-
/*
* Used in conjunction with heap which pool memory to force an allocation
* to come from the page allocator directly instead of from the pool allocation
@@ -113,7 +108,6 @@
* Deprecated! Please use the corresponding ION_FLAG_*
*/
#define ION_SECURE ION_FLAG_SECURE
-#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
/**
* Macro should be used with ion_heap_ids defined above.
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 64b3966..a34fd5a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2908,6 +2908,7 @@
dev = comedi_alloc_board_minor(NULL);
if (IS_ERR(dev)) {
comedi_cleanup_board_minors();
+ class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 1c967c3..a574885 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3078,8 +3078,7 @@
/* following line: 2-1 per STC */
ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
} else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index ea15bc1..197201a 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -854,7 +854,7 @@
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum) / length;
+ statP->stddev = int_sqrt((long)sample_sum / length);
}
/**
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de88..995f2da 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1650,8 +1650,13 @@
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
- &from);
+ rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
+ &from);
+ if (rc != payload_nob) {
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+ return -EFAULT;
+ }
+
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
@@ -1751,8 +1756,14 @@
break;
}
- copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
- IBLND_MSG_SIZE, to);
+ rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
+ to);
+ if (rc != rlen) {
+ rc = -EFAULT;
+ break;
+ }
+
+ rc = 0;
lnet_finalize(ni, lntmsg, 0);
break;
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 611e07b..057c9b5 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1017,7 +1017,7 @@
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (last_la == CEC_LOG_ADDR_INVALID ||
last_la == CEC_LOG_ADDR_UNREGISTERED ||
- !(last_la & type2mask[type]))
+ !((1 << last_la) & type2mask[type]))
last_la = la_list[0];
err = cec_config_log_addr(adap, i, last_la);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 553e8d5..6513ace 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -890,7 +890,7 @@
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6b..b432153 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index becb4bb..01438fa 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -536,7 +536,7 @@
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
- memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ strncpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag) {
/* Additional Length */
buf[4] = 0x33;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e25..86ace14 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1049,6 +1049,26 @@
return err;
}
+static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
+ kfree(ap);
+ return 0;
+}
+
static int lynxfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1057,6 +1077,10 @@
int fbidx;
int err;
+ err = lynxfb_kick_out_firmware_fb(pdev);
+ if (err)
+ return err;
+
/* enable device */
err = pcim_enable_device(pdev);
if (err)
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828..b195537 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -522,6 +522,9 @@
goto free_all;
}
+ if (vnt_key_init_table(priv))
+ goto free_all;
+
priv->int_interval = 1; /* bInterval is set to 1 */
vnt_int_start_interrupt(priv);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 01ea228..155fe0e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1287,6 +1287,18 @@
*/
if (dump_payload)
goto after_immediate_data;
+ /*
+ * Check for underflow case where both EDTL and immediate data payload
+ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+ * already been set in target_cmd_size_check() as se_cmd->data_length.
+ *
+ * For this special case, fail the command and dump the immediate data
+ * payload.
+ */
+ if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+ cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+ goto after_immediate_data;
+ }
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
@@ -4431,8 +4443,11 @@
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4448,8 +4463,11 @@
{
int sleep = 1;
- if (!conn->conn_transport->rdma_shutdown)
+ if (!conn->conn_transport->rdma_shutdown) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 7e70fe8..9cbbc9c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -802,6 +802,7 @@
DEF_TPG_ATTRIB(t10_pi);
DEF_TPG_ATTRIB(fabric_prot_type);
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
@@ -817,6 +818,7 @@
&iscsi_tpg_attrib_attr_t10_pi,
&iscsi_tpg_attrib_attr_fabric_prot_type,
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+ &iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 89d34bd..6693d7c 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -819,7 +819,8 @@
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
@@ -889,7 +890,8 @@
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 4a073339a..0151776 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -765,7 +765,8 @@
return 0;
}
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+ bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
@@ -773,19 +774,31 @@
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, IMMEDIATEDATA))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, MAXCONNECTIONS))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for Mellanox Flexboot PXE boot ROM
+ */
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
@@ -1422,7 +1435,8 @@
u8 sender,
char *textbuf,
u32 *length,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
@@ -1458,7 +1472,8 @@
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
- iscsi_check_proposer_for_optional_reply(param);
+ iscsi_check_proposer_for_optional_reply(param,
+ keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3..17a58c2 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -40,7 +40,7 @@
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
- struct iscsi_param_list *);
+ struct iscsi_param_list *, bool);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 205a509..63e1dcc 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -899,3 +900,21 @@
return 0;
}
+
+int iscsit_ta_login_keys_workaround(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->login_keys_workaround = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da2119..901a712 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e2c970a..be52838 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -131,7 +131,7 @@
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4f229e7..27dd1e1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
unsigned long flags;
bool remove = true, send_tas;
@@ -91,7 +91,7 @@
transport_send_task_abort(cmd);
}
- transport_cmd_finish_abort(cmd, remove);
+ return transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -185,8 +185,8 @@
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- transport_cmd_finish_abort(se_cmd, true);
- target_put_sess_cmd(se_cmd);
+ if (!transport_cmd_finish_abort(se_cmd, true))
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -286,8 +286,8 @@
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- transport_cmd_finish_abort(cmd, 1);
- target_put_sess_cmd(cmd);
+ if (!transport_cmd_finish_abort(cmd, 1))
+ target_put_sess_cmd(cmd);
}
}
@@ -385,8 +385,8 @@
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(cmd, tas);
- target_put_sess_cmd(cmd);
+ if (!core_tmr_handle_tas_abort(cmd, tas))
+ target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 077344c..e8a1f5c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -673,9 +673,10 @@
percpu_ref_put(&lun->lun_ref);
}
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+ int ret = 0;
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
@@ -687,9 +688,11 @@
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
- return;
+ return 1;
if (remove && ack_kref)
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
+
+ return ret;
}
static void target_complete_failure_work(struct work_struct *work)
@@ -750,6 +753,15 @@
if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ /*
+ * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+ * release se_device->caw_sem obtained by sbc_compare_and_write()
+ * since target_complete_ok_work() or target_complete_failure_work()
+ * won't be called to invoke the normal CAW completion callbacks.
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ up(&dev->caw_sem);
+ }
complete_all(&cmd->t_transport_stop_comp);
return;
} else if (!success) {
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index cd5bde3..09f7f20 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -205,8 +205,10 @@
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ unsigned long level = get_level(cpufreq_dev, freq);
+
mutex_unlock(&cooling_list_lock);
- return get_level(cpufreq_dev, freq);
+ return level;
}
}
mutex_unlock(&cooling_list_lock);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 83905ff..7e98927 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -104,8 +104,6 @@
return -EINVAL;
}
- pdev->dev.of_node = pdev->dev.parent->of_node;
-
mtherm->dev = &pdev->dev;
mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!mtherm->rmap) {
@@ -113,6 +111,14 @@
return -ENODEV;
}
+ /*
+ * Drop any current reference to a device-tree node and get a
+ * reference to the parent's node which will be balanced on reprobe or
+ * on platform-device release.
+ */
+ of_node_put(pdev->dev.of_node);
+ pdev->dev.of_node = of_node_get(pdev->dev.parent->of_node);
+
mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
mtherm, &max77620_thermal_ops);
if (IS_ERR(mtherm->tz_device)) {
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 1ab5b0c..fe0a7c7 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -79,6 +79,9 @@
{ .compatible = "qcom,sdm845-tsens",
.data = &data_tsens24xx,
},
+ { .compatible = "qcom,tsens24xx",
+ .data = &data_tsens24xx,
+ },
{}
};
MODULE_DEVICE_TABLE(of, tsens_table);
@@ -157,14 +160,18 @@
for (i = 0; i < TSENS_MAX_SENSORS; i++) {
tmdev->sensor[i].tmdev = tmdev;
tmdev->sensor[i].hw_id = i;
- tmdev->sensor[i].tzd =
- devm_thermal_zone_of_sensor_register(
- &tmdev->pdev->dev, i,
- &tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
- if (IS_ERR(tmdev->sensor[i].tzd)) {
- pr_debug("Error registering sensor:%d\n", i);
- sensor_missing++;
- continue;
+ if (tmdev->ops->sensor_en(tmdev, i)) {
+ tmdev->sensor[i].tzd =
+ devm_thermal_zone_of_sensor_register(
+ &tmdev->pdev->dev, i,
+ &tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
+ if (IS_ERR(tmdev->sensor[i].tzd)) {
+ pr_debug("Error registering sensor:%d\n", i);
+ sensor_missing++;
+ continue;
+ }
+ } else {
+ pr_debug("Sensor not enabled:%d\n", i);
}
}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index f6e1b86..be33725 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -40,3 +40,23 @@
The virtual sensor information includes the underlying thermal
sensors to query for temperature and the aggregation logic to
determine the virtual sensor temperature.
+
+config QTI_REG_COOLING_DEVICE
+ bool "QTI Regulator cooling device"
+ depends on THERMAL_OF && MSM_QMP
+ help
+ This enables the Regulator cooling device. This cooling device
+ will be used by QTI chipset to place a floor voltage restriction at
+ low temperatures. The regulator cooling device will message the AOP
+ using mail box to establish the floor voltage.
+
+config QTI_QMI_COOLING_DEVICE
+ bool "QTI QMI cooling devices"
+ depends on MSM_QMI_INTERFACE && THERMAL_OF
+ help
+ This enables the QTI remote subsystem cooling devices. These cooling
+ devices will be used by QTI chipset to place various remote
+ subsystem mitigations like remote processor passive mitigation,
+ remote subsystem voltage restriction at low temperatures etc.
+ The QMI cooling device will interface with remote subsystem
+ using QTI QMI interface.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 8859380..000c6e7 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
-obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
+obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o
obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
+obj-$(CONFIG_QTI_REG_COOLING_DEVICE) += regulator_cooling.o
+obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o qmi_cooling.o
diff --git a/drivers/thermal/qcom/lmh_dbg.c b/drivers/thermal/qcom/lmh_dbg.c
new file mode 100644
index 0000000..d027bd9
--- /dev/null
+++ b/drivers/thermal/qcom/lmh_dbg.c
@@ -0,0 +1,568 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "lmh_dbg.h"
+
+#define LMH_MON_NAME "lmh_monitor"
+#define LMH_DBGFS_READ "data"
+#define LMH_DBGFS_CONFIG_READ "config"
+#define LMH_DBGFS_READ_TYPES "data_types"
+#define LMH_DBGFS_CONFIG_TYPES "config_types"
+#define LMH_SCM_PAYLOAD_SIZE 10
+#define LMH_READ_LINE_LENGTH 10
+#define LMH_DEBUG_READ_TYPE 0x0
+#define LMH_DEBUG_CONFIG_TYPE 0x1
+#define LMH_DEBUG_SET 0x08
+#define LMH_DEBUG_READ_BUF_SIZE 0x09
+#define LMH_DEBUG_READ 0x0A
+#define LMH_DEBUG_GET_TYPE 0x0B
+
+struct lmh_driver_data {
+ struct device *dev;
+ uint32_t *read_type;
+ uint32_t *config_type;
+ uint32_t read_type_count;
+ uint32_t config_type_count;
+ struct dentry *debugfs_parent;
+ struct dentry *debug_read;
+ struct dentry *debug_config;
+ struct dentry *debug_read_type;
+ struct dentry *debug_config_type;
+};
+
+enum lmh_read_type {
+ LMH_READ_TYPE = 0,
+ LMH_CONFIG_TYPE,
+};
+
+static struct lmh_driver_data *lmh_data;
+
+static int lmh_debug_read(uint32_t **buf)
+{
+ int ret = 0, size = 0, tz_ret = 0;
+ static uint32_t curr_size;
+ struct scm_desc desc_arg;
+ static uint32_t *payload;
+
+ desc_arg.arginfo = SCM_ARGS(0);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_DEBUG_READ_BUF_SIZE), &desc_arg);
+ size = desc_arg.ret[0];
+ if (ret) {
+ pr_err("Error in SCM v%d get debug buffer size call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto get_dbg_exit;
+ }
+ if (!size) {
+ pr_err("No Debug data to read.\n");
+ ret = -ENODEV;
+ goto get_dbg_exit;
+ }
+ size = SCM_BUFFER_SIZE(uint32_t) * size * LMH_READ_LINE_LENGTH;
+ if (curr_size != size) {
+ if (payload)
+ devm_kfree(lmh_data->dev, payload);
+ payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size),
+ GFP_KERNEL);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto get_dbg_exit;
+ }
+ curr_size = size;
+ }
+
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = curr_size;
+ desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+ dmac_flush_range(payload, (void *)payload + curr_size);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_READ),
+ &desc_arg);
+ dmac_inv_range(payload, (void *)payload + curr_size);
+ tz_ret = desc_arg.ret[0];
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ if (ret) {
+ pr_err("Error in get debug read. err:%d\n", ret);
+ goto get_dbg_exit;
+ }
+ if (tz_ret) {
+ pr_err("TZ API returned error. err:%d\n", tz_ret);
+ ret = tz_ret;
+ goto get_dbg_exit;
+ }
+
+get_dbg_exit:
+ if (ret && payload) {
+ devm_kfree(lmh_data->dev, payload);
+ payload = NULL;
+ curr_size = 0;
+ }
+ *buf = payload;
+
+ return (ret < 0) ? ret : curr_size;
+}
+
+static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
+{
+ int ret = 0, size_bytes = 0;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL;
+
+ size_bytes = (size - 3) * sizeof(uint32_t);
+ payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes),
+ GFP_KERNEL);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto set_cfg_exit;
+ }
+ memcpy(payload, &buf[3], size_bytes);
+
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = size_bytes;
+ desc_arg.args[2] = buf[0];
+ desc_arg.args[3] = buf[1];
+ desc_arg.args[4] = buf[2];
+ desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL);
+ dmac_flush_range(payload, (void *)payload + size_bytes);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg);
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ if (ret) {
+ pr_err("Error in config debug read. err:%d\n", ret);
+ goto set_cfg_exit;
+ }
+
+set_cfg_exit:
+ return ret;
+}
+
+static int lmh_parse_and_extract(const char __user *user_buf, size_t count,
+ enum lmh_read_type type)
+{
+ char *local_buf = NULL, *token = NULL, *curr_ptr = NULL, *token1 = NULL;
+ char *next_line = NULL;
+ int ret = 0, data_ct = 0, i = 0, size = 0;
+ uint32_t *config_buf = NULL;
+
+ /* Allocate two extra space to add ';' character and NULL terminate */
+ local_buf = kzalloc(count + 2, GFP_KERNEL);
+ if (!local_buf) {
+ ret = -ENOMEM;
+ goto dfs_cfg_write_exit;
+ }
+ if (copy_from_user(local_buf, user_buf, count)) {
+ pr_err("user buf error\n");
+ ret = -EFAULT;
+ goto dfs_cfg_write_exit;
+ }
+ size = count + (strnchr(local_buf, count, '\n') ? 1 : 2);
+ local_buf[size - 2] = ';';
+ local_buf[size - 1] = '\0';
+ curr_ptr = next_line = local_buf;
+ while ((token1 = strnchr(next_line, local_buf + size - next_line, ';'))
+ != NULL) {
+ data_ct = 0;
+ *token1 = '\0';
+ curr_ptr = next_line;
+ next_line = token1 + 1;
+ for (token = (char *)curr_ptr; token &&
+ ((token = strnchr(token, next_line - token, ' '))
+ != NULL); token++)
+ data_ct++;
+ if (data_ct < 2) {
+ pr_err("Invalid format string:[%s]\n", curr_ptr);
+ ret = -EINVAL;
+ goto dfs_cfg_write_exit;
+ }
+ config_buf = kzalloc((++data_ct) * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!config_buf) {
+ ret = -ENOMEM;
+ goto dfs_cfg_write_exit;
+ }
+ pr_debug("Input:%s data_ct:%d\n", curr_ptr, data_ct);
+ for (i = 0, token = (char *)curr_ptr; token && (i < data_ct);
+ i++) {
+ token = strnchr(token, next_line - token, ' ');
+ if (token)
+ *token = '\0';
+ ret = kstrtouint(curr_ptr, 0, &config_buf[i]);
+ if (ret < 0) {
+ pr_err("Data[%s] scan error. err:%d\n",
+ curr_ptr, ret);
+ kfree(config_buf);
+ goto dfs_cfg_write_exit;
+ }
+ if (token)
+ curr_ptr = ++token;
+ }
+ switch (type) {
+ case LMH_READ_TYPE:
+ case LMH_CONFIG_TYPE:
+ ret = lmh_debug_config_write(LMH_DEBUG_SET,
+ config_buf, data_ct);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ kfree(config_buf);
+ if (ret) {
+ pr_err("Config error. type:%d err:%d\n", type, ret);
+ goto dfs_cfg_write_exit;
+ }
+ }
+
+dfs_cfg_write_exit:
+ kfree(local_buf);
+ return ret;
+}
+
+static ssize_t lmh_dbgfs_config_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ lmh_parse_and_extract(user_buf, count, LMH_CONFIG_TYPE);
+ return count;
+}
+
+static int lmh_dbgfs_data_read(struct seq_file *seq_fp, void *data)
+{
+ static uint32_t *read_buf;
+ static int read_buf_size;
+ int idx = 0, ret = 0;
+
+ if (!read_buf_size) {
+ ret = lmh_debug_read(&read_buf);
+ if (ret <= 0)
+ goto dfs_read_exit;
+ if (!read_buf || ret < sizeof(uint32_t)) {
+ ret = -EINVAL;
+ goto dfs_read_exit;
+ }
+ read_buf_size = ret;
+ ret = 0;
+ }
+
+ do {
+ seq_printf(seq_fp, "0x%x ", read_buf[idx]);
+ if (seq_has_overflowed(seq_fp)) {
+ pr_err("Seq overflow. idx:%d\n", idx);
+ goto dfs_read_exit;
+ }
+ idx++;
+ if ((idx % LMH_READ_LINE_LENGTH) == 0) {
+ seq_puts(seq_fp, "\n");
+ if (seq_has_overflowed(seq_fp)) {
+ pr_err("Seq overflow. idx:%d\n", idx);
+ goto dfs_read_exit;
+ }
+ }
+ } while (idx < (read_buf_size / sizeof(uint32_t)));
+ read_buf_size = 0;
+ read_buf = NULL;
+
+dfs_read_exit:
+ return ret;
+}
+
+static int lmh_get_recurssive_data(struct scm_desc *desc_arg, uint32_t cmd_idx,
+ uint32_t *payload, uint32_t *size, uint32_t *dest_buf)
+{
+ int idx = 0, ret = 0;
+ uint32_t next = 0;
+
+ do {
+ desc_arg->args[cmd_idx] = next;
+ dmac_flush_range(payload, (void *)payload +
+ sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE),
+ desc_arg);
+ dmac_inv_range(payload, (void *)payload +
+ sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE);
+ *size = desc_arg->ret[0];
+ /* Have barrier before reading from TZ data */
+ mb();
+ if (ret) {
+ pr_err("Error in SCM get type. cmd:%x err:%d\n",
+ LMH_DEBUG_GET_TYPE, ret);
+ return ret;
+ }
+ if (!*size) {
+ pr_err("No LMH device supported.\n");
+ return -ENODEV;
+ }
+ if (!dest_buf) {
+ dest_buf = devm_kcalloc(lmh_data->dev, *size,
+ sizeof(*dest_buf), GFP_KERNEL);
+ if (!dest_buf)
+ return -ENOMEM;
+ }
+
+ for (idx = next;
+ idx < min((next + LMH_SCM_PAYLOAD_SIZE), *size);
+ idx++)
+ dest_buf[idx] = payload[idx - next];
+ next += LMH_SCM_PAYLOAD_SIZE;
+ } while (next < *size);
+
+ return ret;
+}
+
+static ssize_t lmh_dbgfs_data_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ lmh_parse_and_extract(user_buf, count, LMH_READ_TYPE);
+ return count;
+}
+
+static int lmh_dbgfs_data_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_data_read, inode->i_private);
+}
+
+static int lmh_debug_get_types(bool is_read, uint32_t **buf)
+{
+ int ret = 0;
+ uint32_t size = 0;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL, *dest_buf = NULL;
+
+ if (is_read && lmh_data->read_type) {
+ *buf = lmh_data->read_type;
+ return lmh_data->read_type_count;
+ } else if (!is_read && lmh_data->config_type) {
+ *buf = lmh_data->config_type;
+ return lmh_data->config_type_count;
+ }
+ payload = devm_kzalloc(lmh_data->dev,
+ PAGE_ALIGN(LMH_SCM_PAYLOAD_SIZE *
+ sizeof(*payload)), GFP_KERNEL);
+ if (!payload)
+ return -ENOMEM;
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] =
+ SCM_BUFFER_SIZE(uint32_t) * LMH_SCM_PAYLOAD_SIZE;
+ desc_arg.args[2] = (is_read) ?
+ LMH_DEBUG_READ_TYPE : LMH_DEBUG_CONFIG_TYPE;
+ desc_arg.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL);
+ ret = lmh_get_recurssive_data(&desc_arg, 3, payload, &size, dest_buf);
+ if (ret)
+ goto get_type_exit;
+ pr_debug("Total %s types:%d\n", (is_read) ? "read" : "config", size);
+ if (is_read) {
+ lmh_data->read_type = *buf = dest_buf;
+ lmh_data->read_type_count = size;
+ } else {
+ lmh_data->config_type = *buf = dest_buf;
+ lmh_data->config_type_count = size;
+ }
+
+get_type_exit:
+ if (ret) {
+ if (lmh_data->read_type_count) {
+ devm_kfree(lmh_data->dev, lmh_data->read_type);
+ lmh_data->read_type_count = 0;
+ }
+ if (lmh_data->config_type_count) {
+ devm_kfree(lmh_data->dev, lmh_data->config_type);
+ lmh_data->config_type_count = 0;
+ }
+ }
+ if (payload)
+ devm_kfree(lmh_data->dev, payload);
+
+ return (ret) ? ret : size;
+}
+
+static int lmh_get_types(struct seq_file *seq_fp, enum lmh_read_type type)
+{
+ int ret = 0, idx = 0, size = 0;
+ uint32_t *type_list = NULL;
+
+ switch (type) {
+ case LMH_READ_TYPE:
+ ret = lmh_debug_get_types(true, &type_list);
+ break;
+ case LMH_CONFIG_TYPE:
+ ret = lmh_debug_get_types(false, &type_list);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret <= 0 || !type_list) {
+ pr_err("No device information. err:%d\n", ret);
+ return -ENODEV;
+ }
+ size = ret;
+ for (idx = 0; idx < size; idx++)
+ seq_printf(seq_fp, "0x%x ", type_list[idx]);
+ seq_puts(seq_fp, "\n");
+
+ return 0;
+}
+
+static int lmh_dbgfs_read_type(struct seq_file *seq_fp, void *data)
+{
+ return lmh_get_types(seq_fp, LMH_READ_TYPE);
+}
+
+static int lmh_dbgfs_read_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_read_type, inode->i_private);
+}
+
+static int lmh_dbgfs_config_type(struct seq_file *seq_fp, void *data)
+{
+ return lmh_get_types(seq_fp, LMH_CONFIG_TYPE);
+}
+
+static int lmh_dbgfs_config_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_config_type, inode->i_private);
+}
+
+static const struct file_operations lmh_dbgfs_config_fops = {
+ .write = lmh_dbgfs_config_write,
+};
+static const struct file_operations lmh_dbgfs_read_fops = {
+ .open = lmh_dbgfs_data_open,
+ .read = seq_read,
+ .write = lmh_dbgfs_data_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations lmh_dbgfs_read_type_fops = {
+ .open = lmh_dbgfs_read_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations lmh_dbgfs_config_type_fops = {
+ .open = lmh_dbgfs_config_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int lmh_check_tz_debug_cmds(void)
+{
+ if (!scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_SET)
+ || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ_BUF_SIZE)
+ || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ)
+ || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE)) {
+ pr_debug("LMH debug scm not available\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int lmh_debug_init(void)
+{
+ int ret = 0;
+
+ if (lmh_check_tz_debug_cmds()) {
+ pr_debug("Debug commands not available.\n");
+ return -ENODEV;
+ }
+
+ lmh_data->debugfs_parent = debugfs_create_dir(LMH_MON_NAME, NULL);
+ if (IS_ERR(lmh_data->debugfs_parent)) {
+ ret = PTR_ERR(lmh_data->debugfs_parent);
+ pr_debug("Error creating debugfs dir:%s. err:%d\n",
+ LMH_MON_NAME, ret);
+ return ret;
+ }
+ lmh_data->debug_read = debugfs_create_file(LMH_DBGFS_READ, 0600,
+ lmh_data->debugfs_parent, NULL,
+ &lmh_dbgfs_read_fops);
+ if (IS_ERR(lmh_data->debug_read)) {
+ pr_err("Error creating" LMH_DBGFS_READ "entry.\n");
+ ret = PTR_ERR(lmh_data->debug_read);
+ goto dbg_reg_exit;
+ }
+ lmh_data->debug_config = debugfs_create_file(LMH_DBGFS_CONFIG_READ,
+ 0200, lmh_data->debugfs_parent, NULL,
+ &lmh_dbgfs_config_fops);
+ if (IS_ERR(lmh_data->debug_config)) {
+ pr_err("Error creating" LMH_DBGFS_CONFIG_READ "entry\n");
+ ret = PTR_ERR(lmh_data->debug_config);
+ goto dbg_reg_exit;
+ }
+ lmh_data->debug_read_type = debugfs_create_file(LMH_DBGFS_READ_TYPES,
+ 0400, lmh_data->debugfs_parent, NULL,
+ &lmh_dbgfs_read_type_fops);
+ if (IS_ERR(lmh_data->debug_read_type)) {
+ pr_err("Error creating" LMH_DBGFS_READ_TYPES "entry\n");
+ ret = PTR_ERR(lmh_data->debug_read_type);
+ goto dbg_reg_exit;
+ }
+ lmh_data->debug_read_type = debugfs_create_file(
+ LMH_DBGFS_CONFIG_TYPES,
+ 0400, lmh_data->debugfs_parent, NULL,
+ &lmh_dbgfs_config_type_fops);
+ if (IS_ERR(lmh_data->debug_config_type)) {
+ pr_err("Error creating" LMH_DBGFS_CONFIG_TYPES "entry\n");
+ ret = PTR_ERR(lmh_data->debug_config_type);
+ goto dbg_reg_exit;
+ }
+
+dbg_reg_exit:
+ if (ret)
+ /*Clean up all the dbg nodes*/
+ debugfs_remove_recursive(lmh_data->debugfs_parent);
+
+ return ret;
+}
+
+int lmh_debug_register(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (lmh_data) {
+ pr_debug("Reinitializing lmh hardware driver\n");
+ return -EEXIST;
+ }
+ lmh_data = devm_kzalloc(&pdev->dev, sizeof(*lmh_data), GFP_KERNEL);
+ if (!lmh_data)
+ return -ENOMEM;
+ lmh_data->dev = &pdev->dev;
+
+ ret = lmh_debug_init();
+ if (ret) {
+ pr_debug("LMH debug init failed. err:%d\n", ret);
+ goto probe_exit;
+ }
+
+ return ret;
+
+probe_exit:
+ lmh_data = NULL;
+ return ret;
+}
+EXPORT_SYMBOL(lmh_debug_register);
diff --git a/drivers/thermal/qcom/lmh_dbg.h b/drivers/thermal/qcom/lmh_dbg.h
new file mode 100644
index 0000000..6ceb832
--- /dev/null
+++ b/drivers/thermal/qcom/lmh_dbg.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QTI_LMH_H__
+#define __QTI_LMH_H__
+
+#include <linux/platform_device.h>
+
+int lmh_debug_register(struct platform_device *pdev);
+
+#endif /* __QTI_LMH_H__ */
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 65dc2df..d590d24 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -26,6 +26,7 @@
#include <linux/pm_opp.h>
#include <linux/cpu_cooling.h>
#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
#include <asm/smp_plat.h>
#include <asm/cacheflush.h>
@@ -33,6 +34,7 @@
#include <soc/qcom/scm.h>
#include "../thermal_core.h"
+#include "lmh_dbg.h"
#define CREATE_TRACE_POINTS
#include <trace/events/lmh.h>
@@ -101,10 +103,12 @@
unsigned long max_freq;
unsigned long min_freq;
unsigned long hw_freq_limit;
+ struct device_attribute lmh_freq_attr;
struct list_head list;
atomic_t is_irq_enabled;
struct mutex access_lock;
struct __limits_cdev_data *cdev_data;
+ struct regulator *isens_reg;
};
LIST_HEAD(lmh_dcvs_hw_list);
@@ -429,6 +433,60 @@
return 0;
}
+static void limits_isens_vref_ldo_init(struct platform_device *pdev,
+ struct limits_dcvs_hw *hw)
+{
+ int ret = 0;
+ uint32_t settings[3];
+
+ hw->isens_reg = devm_regulator_get(&pdev->dev, "isens_vref");
+ if (IS_ERR_OR_NULL(hw->isens_reg)) {
+ if (PTR_ERR(hw->isens_reg) == -ENODEV)
+ return;
+
+ pr_err("Regulator:isens_vref init error:%ld\n",
+ PTR_ERR(hw->isens_reg));
+ return;
+ }
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "isens-vref-settings",
+ settings, 3);
+ if (ret) {
+ pr_err("Regulator:isens_vref settings read error:%d\n",
+ ret);
+ devm_regulator_put(hw->isens_reg);
+ return;
+ }
+ ret = regulator_set_voltage(hw->isens_reg, settings[0], settings[1]);
+ if (ret) {
+ pr_err("Regulator:isens_vref set voltage error:%d\n", ret);
+ devm_regulator_put(hw->isens_reg);
+ return;
+ }
+ ret = regulator_set_load(hw->isens_reg, settings[2]);
+ if (ret) {
+ pr_err("Regulator:isens_vref set load error:%d\n", ret);
+ devm_regulator_put(hw->isens_reg);
+ return;
+ }
+ if (regulator_enable(hw->isens_reg)) {
+ pr_err("Failed to enable regulator:isens_vref\n");
+ devm_regulator_put(hw->isens_reg);
+ return;
+ }
+}
+
+static ssize_t
+lmh_freq_limit_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct limits_dcvs_hw *hw = container_of(devattr,
+ struct limits_dcvs_hw,
+ lmh_freq_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hw->hw_freq_limit);
+}
+
static int limits_dcvs_probe(struct platform_device *pdev)
{
int ret;
@@ -584,12 +642,18 @@
ret = 0;
goto probe_exit;
}
+ limits_isens_vref_ldo_init(pdev, hw);
+ hw->lmh_freq_attr.attr.name = "lmh_freq_limit";
+ hw->lmh_freq_attr.show = lmh_freq_limit_show;
+ hw->lmh_freq_attr.attr.mode = 0444;
+ device_create_file(&pdev->dev, &hw->lmh_freq_attr);
probe_exit:
mutex_lock(&lmh_dcvs_list_access);
INIT_LIST_HEAD(&hw->list);
list_add(&hw->list, &lmh_dcvs_hw_list);
mutex_unlock(&lmh_dcvs_list_access);
+ lmh_debug_register(pdev);
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online",
limits_cpu_online, NULL);
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
new file mode 100644
index 0000000..af82030
--- /dev/null
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -0,0 +1,681 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+#define QMI_CDEV_DRIVER "qmi-cooling-device"
+#define QMI_TMD_RESP_TOUT_MSEC 50
+#define QMI_CLIENT_NAME_LENGTH 40
+
+enum qmi_device_type {
+ QMI_CDEV_MAX_LIMIT_TYPE,
+ QMI_CDEV_MIN_LIMIT_TYPE,
+ QMI_CDEV_TYPE_NR,
+};
+
+struct qmi_cooling_device {
+ struct device_node *np;
+ char cdev_name[THERMAL_NAME_LENGTH];
+ char qmi_name[QMI_CLIENT_NAME_LENGTH];
+ bool connection_active;
+ enum qmi_device_type type;
+ struct list_head qmi_node;
+ struct thermal_cooling_device *cdev;
+ unsigned int mtgn_state;
+ unsigned int max_level;
+ struct qmi_tmd_instance *tmd;
+};
+
+struct qmi_tmd_instance {
+ struct device *dev;
+ struct qmi_handle *handle;
+ struct mutex mutex;
+ struct work_struct work_svc_arrive;
+ struct work_struct work_svc_exit;
+ struct work_struct work_rcv_msg;
+ struct notifier_block nb;
+ uint32_t inst_id;
+ struct list_head tmd_cdev_list;
+};
+
+struct qmi_dev_info {
+ char *dev_name;
+ enum qmi_device_type type;
+};
+
+static struct workqueue_struct *qmi_tmd_wq;
+static struct qmi_tmd_instance *tmd_instances;
+static int tmd_inst_cnt;
+
+static struct qmi_dev_info device_clients[] = {
+ {
+ .dev_name = "pa",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cx_vdd_limit",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem_current",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "modem_bw",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cpuv_restriction_cold",
+ .type = QMI_CDEV_MIN_LIMIT_TYPE,
+ },
+ {
+ .dev_name = "cpr_cold",
+ .type = QMI_CDEV_MIN_LIMIT_TYPE,
+ }
+};
+
+static int qmi_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ *state = qmi_cdev->max_level;
+
+ return 0;
+}
+
+static int qmi_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) {
+ *state = 0;
+ return 0;
+ }
+ *state = qmi_cdev->mtgn_state;
+
+ return 0;
+}
+
+static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev,
+ uint8_t state)
+{
+ int ret = 0;
+ struct tmd_set_mitigation_level_req_msg_v01 req;
+ struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp;
+ struct msg_desc req_desc, resp_desc;
+ struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+ memset(&req, 0, sizeof(req));
+ memset(&tmd_resp, 0, sizeof(tmd_resp));
+
+ strlcpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name,
+ QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01);
+ req.mitigation_level = state;
+
+ req_desc.max_msg_len = TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01;
+ req_desc.ei_array = tmd_set_mitigation_level_req_msg_v01_ei;
+
+ resp_desc.max_msg_len =
+ TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01;
+ resp_desc.ei_array = tmd_set_mitigation_level_resp_msg_v01_ei;
+
+ mutex_lock(&tmd->mutex);
+ ret = qmi_send_req_wait(tmd->handle,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &tmd_resp, sizeof(tmd_resp),
+ QMI_TMD_RESP_TOUT_MSEC);
+ if (ret < 0) {
+ pr_err("qmi set state:%d failed for %s ret:%d\n",
+ state, qmi_cdev->cdev_name, ret);
+ goto qmi_send_exit;
+ }
+
+ if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = tmd_resp.resp.result;
+ pr_err("qmi set state:%d NOT success for %s ret:%d\n",
+ state, qmi_cdev->cdev_name, ret);
+ goto qmi_send_exit;
+ }
+ pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name);
+
+qmi_send_exit:
+ mutex_unlock(&tmd->mutex);
+ return ret;
+}
+
+static int qmi_set_cur_or_min_state(struct qmi_cooling_device *qmi_cdev,
+ unsigned long state)
+{
+ int ret = 0;
+ struct qmi_tmd_instance *tmd = qmi_cdev->tmd;
+
+ if (!tmd)
+ return -EINVAL;
+
+ if (qmi_cdev->mtgn_state == state)
+ return ret;
+
+ /* save it and return if server exit */
+ if (!qmi_cdev->connection_active) {
+ qmi_cdev->mtgn_state = state;
+ pr_debug("Pending request:%ld for %s\n", state,
+ qmi_cdev->cdev_name);
+ return ret;
+ }
+
+ /* It is best effort to save state even if QMI fail */
+ ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state);
+
+ qmi_cdev->mtgn_state = state;
+
+ return ret;
+}
+
+static int qmi_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE)
+ return 0;
+
+ if (state > qmi_cdev->max_level)
+ state = qmi_cdev->max_level;
+
+ return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_set_min_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE)
+ return 0;
+
+ if (state > qmi_cdev->max_level)
+ state = qmi_cdev->max_level;
+
+ /* Convert state into QMI client expects for min state */
+ state = qmi_cdev->max_level - state;
+
+ return qmi_set_cur_or_min_state(qmi_cdev, state);
+}
+
+static int qmi_get_min_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmi_cooling_device *qmi_cdev = cdev->devdata;
+
+ if (!qmi_cdev)
+ return -EINVAL;
+
+ if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) {
+ *state = 0;
+ return 0;
+ }
+ *state = qmi_cdev->max_level - qmi_cdev->mtgn_state;
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops qmi_device_ops = {
+ .get_max_state = qmi_get_max_state,
+ .get_cur_state = qmi_get_cur_state,
+ .set_cur_state = qmi_set_cur_state,
+ .set_min_state = qmi_set_min_state,
+ .get_min_state = qmi_get_min_state,
+};
+
+static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev)
+{
+ qmi_cdev->cdev = thermal_of_cooling_device_register(
+ qmi_cdev->np,
+ qmi_cdev->cdev_name,
+ qmi_cdev,
+ &qmi_device_ops);
+ if (IS_ERR(qmi_cdev->cdev)) {
+ pr_err("Cooling register failed for %s, ret:%ld\n",
+ qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev));
+ return PTR_ERR(qmi_cdev->cdev);
+ }
+ pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name);
+
+ return 0;
+}
+
+static int verify_devices_and_register(struct qmi_tmd_instance *tmd)
+{
+ struct tmd_get_mitigation_device_list_req_msg_v01 req;
+ struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0, i;
+
+ memset(&req, 0, sizeof(req));
+ /* size of tmd_resp is very high, use heap memory rather than stack */
+ tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL);
+ if (!tmd_resp)
+ return -ENOMEM;
+
+ req_desc.max_msg_len =
+ TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01;
+ req_desc.ei_array = tmd_get_mitigation_device_list_req_msg_v01_ei;
+
+ resp_desc.max_msg_len =
+ TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01;
+ resp_desc.ei_array = tmd_get_mitigation_device_list_resp_msg_v01_ei;
+
+ mutex_lock(&tmd->mutex);
+ ret = qmi_send_req_wait(tmd->handle,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, tmd_resp, sizeof(*tmd_resp),
+ 0);
+ if (ret < 0) {
+ pr_err("qmi get device list failed for inst_id:0x%x ret:%d\n",
+ tmd->inst_id, ret);
+ goto reg_exit;
+ }
+
+ if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = tmd_resp->resp.result;
+ pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n",
+ tmd->inst_id, ret);
+ goto reg_exit;
+ }
+ mutex_unlock(&tmd->mutex);
+
+ for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) {
+ struct qmi_cooling_device *qmi_cdev = NULL;
+
+ list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list,
+ qmi_node) {
+ struct tmd_mitigation_dev_list_type_v01 *device =
+ &tmd_resp->mitigation_device_list[i];
+
+ if ((strncasecmp(qmi_cdev->qmi_name,
+ device->mitigation_dev_id.mitigation_dev_id,
+ QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01)))
+ continue;
+
+ qmi_cdev->connection_active = true;
+ qmi_cdev->max_level = device->max_mitigation_level;
+ /*
+ * It is better to set current state
+ * initially or during restart
+ */
+ qmi_tmd_send_state_request(qmi_cdev,
+ qmi_cdev->mtgn_state);
+ if (!qmi_cdev->cdev)
+ ret = qmi_register_cooling_device(qmi_cdev);
+ break;
+ }
+ }
+
+ kfree(tmd_resp);
+ return ret;
+
+reg_exit:
+ mutex_unlock(&tmd->mutex);
+ kfree(tmd_resp);
+
+ return ret;
+}
+
+static void qmi_tmd_rcv_msg(struct work_struct *work)
+{
+ int rc;
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_rcv_msg);
+
+ do {
+ pr_debug("Notified about a Receive Event\n");
+ } while ((rc = qmi_recv_msg(tmd->handle)) == 0);
+
+ if (rc != -ENOMSG)
+ pr_err("Error receiving message for SVC:0x%x, ret:%d\n",
+ tmd->inst_id, rc);
+}
+
+static void qmi_tmd_clnt_notify(struct qmi_handle *handle,
+ enum qmi_event_type event, void *priv_data)
+{
+ struct qmi_tmd_instance *tmd =
+ (struct qmi_tmd_instance *)priv_data;
+
+ if (!tmd) {
+ pr_debug("tmd is NULL\n");
+ return;
+ }
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ queue_work(qmi_tmd_wq, &tmd->work_rcv_msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void qmi_tmd_svc_arrive(struct work_struct *work)
+{
+ int ret = 0;
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_svc_arrive);
+
+ mutex_lock(&tmd->mutex);
+ tmd->handle = qmi_handle_create(qmi_tmd_clnt_notify, tmd);
+ if (!tmd->handle) {
+ pr_err("QMI TMD client handle alloc failed for 0x%x\n",
+ tmd->inst_id);
+ goto arrive_exit;
+ }
+
+ ret = qmi_connect_to_service(tmd->handle, TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd->inst_id);
+ if (ret < 0) {
+ pr_err("Could not connect handle to service for 0x%x, ret:%d\n",
+ tmd->inst_id, ret);
+ qmi_handle_destroy(tmd->handle);
+ tmd->handle = NULL;
+ goto arrive_exit;
+ }
+ mutex_unlock(&tmd->mutex);
+
+ verify_devices_and_register(tmd);
+
+ return;
+
+arrive_exit:
+ mutex_unlock(&tmd->mutex);
+}
+
+static void qmi_tmd_svc_exit(struct work_struct *work)
+{
+ struct qmi_tmd_instance *tmd = container_of(work,
+ struct qmi_tmd_instance,
+ work_svc_exit);
+ struct qmi_cooling_device *qmi_cdev;
+
+ mutex_lock(&tmd->mutex);
+ qmi_handle_destroy(tmd->handle);
+ tmd->handle = NULL;
+
+ list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node)
+ qmi_cdev->connection_active = false;
+
+ mutex_unlock(&tmd->mutex);
+}
+
+static int qmi_tmd_svc_event_notify(struct notifier_block *this,
+ unsigned long event,
+ void *data)
+{
+ struct qmi_tmd_instance *tmd = container_of(this,
+ struct qmi_tmd_instance,
+ nb);
+
+ if (!tmd) {
+ pr_debug("tmd is NULL\n");
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case QMI_SERVER_ARRIVE:
+ schedule_work(&tmd->work_svc_arrive);
+ break;
+ case QMI_SERVER_EXIT:
+ schedule_work(&tmd->work_svc_exit);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void qmi_tmd_cleanup(void)
+{
+ int idx = 0;
+ struct qmi_tmd_instance *tmd = tmd_instances;
+ struct qmi_cooling_device *qmi_cdev, *c_next;
+
+ for (; idx < tmd_inst_cnt; idx++) {
+ mutex_lock(&tmd[idx].mutex);
+ list_for_each_entry_safe(qmi_cdev, c_next,
+ &tmd[idx].tmd_cdev_list, qmi_node) {
+ if (qmi_cdev->cdev)
+ thermal_cooling_device_unregister(
+ qmi_cdev->cdev);
+
+ list_del(&qmi_cdev->qmi_node);
+ }
+ if (tmd[idx].handle)
+ qmi_handle_destroy(tmd[idx].handle);
+
+ if (tmd[idx].nb.notifier_call)
+ qmi_svc_event_notifier_unregister(TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd[idx].inst_id,
+ &tmd[idx].nb);
+ mutex_unlock(&tmd[idx].mutex);
+ }
+
+ if (qmi_tmd_wq) {
+ destroy_workqueue(qmi_tmd_wq);
+ qmi_tmd_wq = NULL;
+ }
+}
+
+static int of_get_qmi_tmd_platform_data(struct device *dev)
+{
+ int ret = 0, idx = 0, i = 0, subsys_cnt = 0;
+ struct device_node *np = dev->of_node;
+ struct device_node *subsys_np, *cdev_np;
+ struct qmi_tmd_instance *tmd;
+ struct qmi_cooling_device *qmi_cdev;
+
+ subsys_cnt = of_get_available_child_count(np);
+ if (!subsys_cnt) {
+ dev_err(dev, "No child node to process\n");
+ return -EFAULT;
+ }
+
+ tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL);
+ if (!tmd)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, subsys_np) {
+ if (idx >= subsys_cnt)
+ break;
+
+ ret = of_property_read_u32(subsys_np, "qcom,instance-id",
+ &tmd[idx].inst_id);
+ if (ret) {
+ dev_err(dev, "error reading qcom,insance-id. ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ tmd[idx].dev = dev;
+ mutex_init(&tmd[idx].mutex);
+ INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list);
+
+ for_each_available_child_of_node(subsys_np, cdev_np) {
+ const char *qmi_name;
+
+ qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev),
+ GFP_KERNEL);
+ if (!qmi_cdev) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ strlcpy(qmi_cdev->cdev_name, cdev_np->name,
+ THERMAL_NAME_LENGTH);
+
+ if (!of_property_read_string(cdev_np,
+ "qcom,qmi-dev-name",
+ &qmi_name)) {
+ strlcpy(qmi_cdev->qmi_name, qmi_name,
+ QMI_CLIENT_NAME_LENGTH);
+ } else {
+ dev_err(dev, "Fail to parse dev name for %s\n",
+ cdev_np->name);
+ break;
+ }
+ /* Check for supported qmi dev*/
+ for (i = 0; i < ARRAY_SIZE(device_clients); i++) {
+ if (strcmp(device_clients[i].dev_name,
+ qmi_cdev->qmi_name) == 0)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(device_clients)) {
+ dev_err(dev, "Not supported dev name for %s\n",
+ cdev_np->name);
+ break;
+ }
+ qmi_cdev->type = device_clients[i].type;
+ qmi_cdev->tmd = &tmd[idx];
+ qmi_cdev->np = cdev_np;
+ qmi_cdev->mtgn_state = 0;
+ list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list);
+ }
+ idx++;
+ }
+ tmd_instances = tmd;
+ tmd_inst_cnt = subsys_cnt;
+
+ return 0;
+}
+
+static int qmi_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0, idx = 0;
+
+ ret = of_get_qmi_tmd_platform_data(dev);
+ if (ret)
+ goto probe_err;
+
+ if (!tmd_instances || !tmd_inst_cnt) {
+ dev_err(dev, "Empty tmd instances\n");
+ return -EINVAL;
+ }
+
+ qmi_tmd_wq = create_singlethread_workqueue("qmi_tmd_wq");
+ if (!qmi_tmd_wq) {
+ dev_err(dev, "Failed to create single thread workqueue\n");
+ ret = -EFAULT;
+ goto probe_err;
+ }
+
+ for (; idx < tmd_inst_cnt; idx++) {
+ struct qmi_tmd_instance *tmd = &tmd_instances[idx];
+
+ if (list_empty(&tmd->tmd_cdev_list))
+ continue;
+
+ tmd->nb.notifier_call = qmi_tmd_svc_event_notify;
+ INIT_WORK(&tmd->work_svc_arrive, qmi_tmd_svc_arrive);
+ INIT_WORK(&tmd->work_svc_exit, qmi_tmd_svc_exit);
+ INIT_WORK(&tmd->work_rcv_msg, qmi_tmd_rcv_msg);
+
+ ret = qmi_svc_event_notifier_register(TMD_SERVICE_ID_V01,
+ TMD_SERVICE_VERS_V01,
+ tmd->inst_id,
+ &tmd->nb);
+ if (ret < 0) {
+ dev_err(dev, "QMI register failed for 0x%x, ret:%d\n",
+ tmd->inst_id, ret);
+ goto probe_err;
+ }
+ }
+
+ return 0;
+
+probe_err:
+ qmi_tmd_cleanup();
+ return ret;
+}
+
+static int qmi_device_remove(struct platform_device *pdev)
+{
+ qmi_tmd_cleanup();
+
+ return 0;
+}
+
+static const struct of_device_id qmi_device_match[] = {
+ {.compatible = "qcom,qmi_cooling_devices"},
+ {}
+};
+
+static struct platform_driver qmi_device_driver = {
+ .probe = qmi_device_probe,
+ .remove = qmi_device_remove,
+ .driver = {
+ .name = "QMI_CDEV_DRIVER",
+ .owner = THIS_MODULE,
+ .of_match_table = qmi_device_match,
+ },
+};
+
+static int __init qmi_device_init(void)
+{
+ return platform_driver_register(&qmi_device_driver);
+}
+module_init(qmi_device_init);
+
+static void __exit qmi_device_exit(void)
+{
+ platform_driver_unregister(&qmi_device_driver);
+}
+module_exit(qmi_device_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI QMI cooling device driver");
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 3064c74..923680a 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -29,7 +29,7 @@
.logic = VIRT_MAXIMUM,
},
{
- .virt_zone_name = "silver-virt-max-usr",
+ .virt_zone_name = "silv-virt-max-step",
.num_sensors = 4,
.sensor_names = {"cpu0-silver-usr",
"cpu1-silver-usr",
@@ -38,7 +38,7 @@
.logic = VIRT_MAXIMUM,
},
{
- .virt_zone_name = "gold-virt-max-usr",
+ .virt_zone_name = "gold-virt-max-step",
.num_sensors = 4,
.sensor_names = {"cpu0-gold-usr",
"cpu1-gold-usr",
diff --git a/drivers/thermal/qcom/regulator_cooling.c b/drivers/thermal/qcom/regulator_cooling.c
new file mode 100644
index 0000000..3cbf198
--- /dev/null
+++ b/drivers/thermal/qcom/regulator_cooling.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mailbox_client.h>
+
+#define REG_CDEV_DRIVER "reg-cooling-device"
+#define REG_MSG_FORMAT "{class:volt_flr, event:zero_temp, res:%s, value:%s}"
+#define REG_CDEV_MAX_STATE 1
+#define MBOX_TOUT_MS 1000
+#define REG_MSG_MAX_LEN 100
+
+struct reg_cooling_device {
+ struct thermal_cooling_device *cdev;
+ unsigned int min_state;
+ const char *resource_name;
+ struct mbox_chan *qmp_chan;
+ struct mbox_client *client;
+};
+
+struct aop_msg {
+ uint32_t len;
+ void *msg;
+};
+
+enum regulator_rail_type {
+ REG_COOLING_CX,
+ REG_COOLING_MX,
+ REG_COOLING_EBI,
+ REG_COOLING_NR,
+};
+
+static char *regulator_rail[REG_COOLING_NR] = {
+ "cx",
+ "mx",
+ "ebi",
+};
+
+static int aop_send_msg(struct reg_cooling_device *reg_dev, int min_state)
+{
+ char msg_buf[REG_MSG_MAX_LEN] = {0};
+ int ret = 0;
+ struct aop_msg msg;
+
+ if (!reg_dev->qmp_chan) {
+ pr_err("mbox not initialized for resource:%s\n",
+ reg_dev->resource_name);
+ return -EINVAL;
+ }
+
+ ret = snprintf(msg_buf, REG_MSG_MAX_LEN, REG_MSG_FORMAT,
+ reg_dev->resource_name,
+ (min_state == REG_CDEV_MAX_STATE) ? "off" : "on");
+ if (ret >= REG_MSG_MAX_LEN) {
+ pr_err("Message too long for resource:%s\n",
+ reg_dev->resource_name);
+ return -E2BIG;
+ }
+ msg.len = REG_MSG_MAX_LEN;
+ msg.msg = msg_buf;
+ ret = mbox_send_message(reg_dev->qmp_chan, &msg);
+
+ return (ret < 0) ? ret : 0;
+}
+
+static int reg_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = REG_CDEV_MAX_STATE;
+ return 0;
+}
+
+static int reg_get_min_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct reg_cooling_device *reg_dev = cdev->devdata;
+
+ *state = reg_dev->min_state;
+ return 0;
+}
+
+static int reg_send_min_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct reg_cooling_device *reg_dev = cdev->devdata;
+ int ret = 0;
+
+ if (state > REG_CDEV_MAX_STATE)
+ state = REG_CDEV_MAX_STATE;
+
+ if (reg_dev->min_state == state)
+ return ret;
+
+ ret = aop_send_msg(reg_dev, state);
+ if (ret) {
+ pr_err("regulator:%s switching to floor %lu error. err:%d\n",
+ reg_dev->resource_name, state, ret);
+ } else {
+ pr_debug("regulator:%s switched to %lu from %d\n",
+ reg_dev->resource_name, state, reg_dev->min_state);
+ reg_dev->min_state = state;
+ }
+
+ return ret;
+}
+
+static int reg_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 0;
+ return 0;
+}
+
+static int reg_send_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ return 0;
+}
+
+static struct thermal_cooling_device_ops reg_dev_ops = {
+ .get_max_state = reg_get_max_state,
+ .get_cur_state = reg_get_cur_state,
+ .set_cur_state = reg_send_cur_state,
+ .set_min_state = reg_send_min_state,
+ .get_min_state = reg_get_min_state,
+};
+
+static int reg_init_mbox(struct platform_device *pdev,
+ struct reg_cooling_device *reg_dev)
+{
+ reg_dev->client = devm_kzalloc(&pdev->dev, sizeof(*reg_dev->client),
+ GFP_KERNEL);
+ if (!reg_dev->client)
+ return -ENOMEM;
+
+ reg_dev->client->dev = &pdev->dev;
+ reg_dev->client->tx_block = true;
+ reg_dev->client->tx_tout = MBOX_TOUT_MS;
+ reg_dev->client->knows_txdone = false;
+
+ reg_dev->qmp_chan = mbox_request_channel(reg_dev->client, 0);
+ if (IS_ERR(reg_dev->qmp_chan)) {
+ dev_err(&pdev->dev, "Mbox request failed. err:%ld\n",
+ PTR_ERR(reg_dev->qmp_chan));
+ return PTR_ERR(reg_dev->qmp_chan);
+ }
+
+ return 0;
+}
+
+static int reg_dev_probe(struct platform_device *pdev)
+{
+ int ret = 0, idx = 0;
+ struct reg_cooling_device *reg_dev = NULL;
+
+ reg_dev = devm_kzalloc(&pdev->dev, sizeof(*reg_dev), GFP_KERNEL);
+ if (!reg_dev)
+ return -ENOMEM;
+
+ ret = reg_init_mbox(pdev, reg_dev);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_string(pdev->dev.of_node,
+ "qcom,reg-resource-name",
+ ®_dev->resource_name);
+ if (ret) {
+ dev_err(&pdev->dev, "Error reading resource name. err:%d\n",
+ ret);
+ goto mbox_free;
+ }
+
+ for (idx = 0; idx < REG_COOLING_NR; idx++) {
+ if (!strcmp(reg_dev->resource_name, regulator_rail[idx]))
+ break;
+ }
+ if (idx == REG_COOLING_NR) {
+ dev_err(&pdev->dev, "Invalid regulator resource name:%s\n",
+ reg_dev->resource_name);
+ ret = -EINVAL;
+ goto mbox_free;
+ }
+ reg_dev->min_state = REG_CDEV_MAX_STATE;
+ reg_dev->cdev = thermal_of_cooling_device_register(
+ pdev->dev.of_node,
+ (char *)reg_dev->resource_name,
+ reg_dev, ®_dev_ops);
+ if (IS_ERR(reg_dev->cdev))
+ goto mbox_free;
+
+ return ret;
+
+mbox_free:
+ mbox_free_channel(reg_dev->qmp_chan);
+
+ return ret;
+}
+
+static const struct of_device_id reg_dev_of_match[] = {
+ {.compatible = "qcom,rpmh-reg-cdev", },
+ {}
+};
+
+static struct platform_driver reg_dev_driver = {
+ .driver = {
+ .name = REG_CDEV_DRIVER,
+ .of_match_table = reg_dev_of_match,
+ },
+ .probe = reg_dev_probe,
+};
+builtin_platform_driver(reg_dev_driver);
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
new file mode 100644
index 0000000..af020eb
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c
@@ -0,0 +1,359 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "thermal_mitigation_device_service_v01.h"
+
+static struct elem_info tmd_mitigation_dev_id_type_v01_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_id_type_v01,
+ mitigation_dev_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info tmd_mitigation_dev_list_type_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_list_type_v01,
+ mitigation_dev_id),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(
+ struct tmd_mitigation_dev_list_type_v01,
+ max_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_TMD_MITIGATION_DEV_LIST_MAX_V01,
+ .elem_size = sizeof(
+ struct tmd_mitigation_dev_list_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_device_list_resp_msg_v01,
+ mitigation_device_list),
+ .ei_array = tmd_mitigation_dev_list_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_req_msg_v01,
+ mitigation_dev_id),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_req_msg_v01,
+ mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_set_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ current_mitigation_level_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ current_mitigation_level),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ requested_mitigation_level_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct tmd_get_mitigation_level_resp_msg_v01,
+ requested_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_register_notification_mitigation_level_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_register_notification_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_register_notification_mitigation_level_resp_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_register_notification_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_req_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ tmd_deregister_notification_mitigation_level_req_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[]
+ = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ tmd_deregister_notification_mitigation_level_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct tmd_mitigation_level_report_ind_msg_v01,
+ mitigation_device),
+ .ei_array = tmd_mitigation_dev_id_type_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct tmd_mitigation_level_report_ind_msg_v01,
+ current_mitigation_level),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
new file mode 100644
index 0000000..c2d1201
--- /dev/null
+++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+#define THERMAL_MITIGATION_DEVICE_SERVICE_V01_H
+
+#define TMD_SERVICE_ID_V01 0x18
+#define TMD_SERVICE_VERS_V01 0x01
+
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01 0x0020
+#define QMI_TMD_GET_MITIGATION_LEVEL_REQ_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_MSGS_REQ_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01 0x0021
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0023
+#define QMI_TMD_GET_SUPPORTED_MSGS_RESP_V01 0x001E
+#define QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01 0x0021
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0024
+#define QMI_TMD_MITIGATION_LEVEL_REPORT_IND_V01 0x0025
+#define QMI_TMD_GET_MITIGATION_LEVEL_RESP_V01 0x0022
+#define QMI_TMD_GET_SUPPORTED_FIELDS_REQ_V01 0x001F
+#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01 0x0020
+#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0023
+#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0024
+#define QMI_TMD_GET_SUPPORTED_FIELDS_RESP_V01 0x001F
+
+#define QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 32
+#define QMI_TMD_MITIGATION_DEV_LIST_MAX_V01 32
+
+struct tmd_mitigation_dev_id_type_v01 {
+ char mitigation_dev_id[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1];
+};
+
+struct tmd_mitigation_dev_list_type_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+ uint8_t max_mitigation_level;
+};
+
+struct tmd_get_mitigation_device_list_req_msg_v01 {
+ char placeholder;
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_device_list_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t mitigation_device_list_valid;
+ uint32_t mitigation_device_list_len;
+ struct tmd_mitigation_dev_list_type_v01
+ mitigation_device_list[QMI_TMD_MITIGATION_DEV_LIST_MAX_V01];
+};
+#define TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 1099
+extern struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id;
+ uint8_t mitigation_level;
+};
+#define TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_set_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_GET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_get_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ uint8_t current_mitigation_level_valid;
+ uint8_t current_mitigation_level;
+ uint8_t requested_mitigation_level_valid;
+ uint8_t requested_mitigation_level;
+};
+#define TMD_GET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 15
+extern struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+ tmd_register_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_register_notification_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+ tmd_register_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_req_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36
+extern struct elem_info
+ tmd_deregister_notification_mitigation_level_req_msg_v01_ei[];
+
+struct tmd_deregister_notification_mitigation_level_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info
+ tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[];
+
+struct tmd_mitigation_level_report_ind_msg_v01 {
+ struct tmd_mitigation_dev_id_type_v01 mitigation_device;
+ uint8_t current_mitigation_level;
+};
+#define TMD_MITIGATION_LEVEL_REPORT_IND_MSG_V01_MAX_MSG_LEN 40
+extern struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 6b05b7b..f6f30a0 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -102,7 +102,8 @@
if (!throttle)
next_target = THERMAL_NO_TARGET;
} else {
- next_target = cur_state - 1;
+ if (!throttle)
+ next_target = cur_state - 1;
if (next_target > instance->upper)
next_target = instance->upper;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 4c1ccee..2bfe1b5 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -418,8 +418,9 @@
thermal_zone_device_set_polling(thermal_passive_wq,
tz, tz->passive_delay);
else if (tz->polling_delay)
- thermal_zone_device_set_polling(system_freezable_wq,
- tz, tz->polling_delay);
+ thermal_zone_device_set_polling(
+ system_freezable_power_efficient_wq,
+ tz, tz->polling_delay);
else
thermal_zone_device_set_polling(NULL, tz, 0);
@@ -476,6 +477,7 @@
* So, start monitoring again.
*/
monitor_thermal_zone(tz);
+ trace_thermal_handle_trip(tz, trip);
}
/**
@@ -566,6 +568,7 @@
ret = tz->ops->set_trips(tz, low, high);
if (ret)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+ trace_thermal_set_trip(tz);
exit:
mutex_unlock(&tz->lock);
@@ -620,6 +623,7 @@
if (!tz->ops->get_temp)
return;
+ trace_thermal_device_update(tz, event);
update_temperature(tz);
thermal_zone_set_trips(tz);
@@ -896,6 +900,60 @@
}
static ssize_t
+polling_delay_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", tz->polling_delay);
+}
+
+static ssize_t
+polling_delay_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int delay;
+
+ if (kstrtoint(buf, 10, &delay))
+ return -EINVAL;
+
+ mutex_lock(&tz->lock);
+ tz->polling_delay = delay;
+ mutex_unlock(&tz->lock);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
+static ssize_t
+passive_delay_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", tz->passive_delay);
+}
+
+static ssize_t
+passive_delay_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int delay;
+
+ if (kstrtoint(buf, 10, &delay))
+ return -EINVAL;
+
+ mutex_lock(&tz->lock);
+ tz->passive_delay = delay;
+ mutex_unlock(&tz->lock);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
+static ssize_t
policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1166,6 +1224,10 @@
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
static DEVICE_ATTR(policy, S_IRUGO | S_IWUSR, policy_show, policy_store);
static DEVICE_ATTR(available_policies, S_IRUGO, available_policies_show, NULL);
+static DEVICE_ATTR(passive_delay, 0644, passive_delay_show,
+ passive_delay_store);
+static DEVICE_ATTR(polling_delay, 0644, polling_delay_show,
+ polling_delay_store);
/* sys I/F for cooling device */
#define to_cooling_device(_dev) \
@@ -1323,6 +1385,54 @@
return sprintf(buf, "%d\n", instance->trip);
}
+static ssize_t
+thermal_cooling_device_upper_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ struct thermal_instance *instance;
+ int ret, upper_limit;
+
+ instance =
+ container_of(attr, struct thermal_instance, upper_attr);
+
+ ret = kstrtoint(buf, 0, &upper_limit);
+ if (ret)
+ return ret;
+ if (upper_limit < instance->lower)
+ return -EINVAL;
+
+ instance->upper = upper_limit;
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
+static ssize_t
+thermal_cooling_device_lower_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ struct thermal_instance *instance;
+ int ret, lower_limit;
+
+ instance =
+ container_of(attr, struct thermal_instance, lower_attr);
+
+ ret = kstrtoint(buf, 0, &lower_limit);
+ if (ret)
+ return ret;
+ if (lower_limit > instance->upper)
+ return -EINVAL;
+
+ instance->lower = lower_limit;
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ return count;
+}
+
static struct attribute *cooling_device_attrs[] = {
&dev_attr_cdev_type.attr,
&dev_attr_max_state.attr,
@@ -1483,8 +1593,9 @@
"cdev%d_upper_limit", dev->id);
sysfs_attr_init(&dev->upper_attr.attr);
dev->upper_attr.attr.name = dev->upper_attr_name;
- dev->upper_attr.attr.mode = 0444;
+ dev->upper_attr.attr.mode = 0644;
dev->upper_attr.show = thermal_cooling_device_upper_limit_show;
+ dev->upper_attr.store = thermal_cooling_device_upper_limit_store;
result = device_create_file(&tz->device, &dev->upper_attr);
if (result)
goto remove_trip_file;
@@ -1493,8 +1604,9 @@
"cdev%d_lower_limit", dev->id);
sysfs_attr_init(&dev->lower_attr.attr);
dev->lower_attr.attr.name = dev->lower_attr_name;
- dev->lower_attr.attr.mode = 0444;
+ dev->lower_attr.attr.mode = 0644;
dev->lower_attr.show = thermal_cooling_device_lower_limit_show;
+ dev->lower_attr.store = thermal_cooling_device_lower_limit_store;
result = device_create_file(&tz->device, &dev->lower_attr);
if (result)
goto remove_upper_file;
@@ -1825,6 +1937,7 @@
current_target = instance->target;
}
}
+ trace_cdev_update_start(cdev);
cdev->ops->set_cur_state(cdev, current_target);
if (cdev->ops->set_min_state)
cdev->ops->set_min_state(cdev, min_target);
@@ -2083,6 +2196,13 @@
if (result)
goto unregister;
}
+ result = device_create_file(&tz->device, &dev_attr_passive_delay);
+ if (result)
+ goto unregister;
+
+ result = device_create_file(&tz->device, &dev_attr_polling_delay);
+ if (result)
+ goto unregister;
if (IS_ENABLED(CONFIG_THERMAL_EMULATION)) {
result = device_create_file(&tz->device, &dev_attr_emul_temp);
@@ -2134,7 +2254,7 @@
/* Bind cooling devices for this zone */
bind_tz(tz);
- INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ INIT_DEFERRABLE_WORK(&(tz->poll_queue), thermal_zone_device_check);
thermal_zone_device_reset(tz);
/* Update the new thermal zone and mark it as already updated. */
@@ -2206,6 +2326,8 @@
device_remove_file(&tz->device, &dev_attr_mode);
device_remove_file(&tz->device, &dev_attr_policy);
device_remove_file(&tz->device, &dev_attr_available_policies);
+ device_remove_file(&tz->device, &dev_attr_passive_delay);
+ device_remove_file(&tz->device, &dev_attr_polling_delay);
remove_trip_attrs(tz);
thermal_set_governor(tz, NULL);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 770b982..a695d57 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -91,6 +91,7 @@
int (*set_trips)(struct tsens_sensor *, int, int);
int (*interrupts_reg)(struct tsens_device *);
int (*dbg)(struct tsens_device *, u32, u32, int *);
+ int (*sensor_en)(struct tsens_device *, u32);
};
struct tsens_irqs {
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 55be2f9..de9f27f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -61,6 +61,7 @@
#define TSENS_TM_WATCHDOG_LOG(n) ((n) + 0x13c)
#define TSENS_EN BIT(0)
+#define TSENS_CTRL_SENSOR_EN_MASK(n) ((n >> 3) & 0xffff)
static void msm_tsens_convert_temp(int last_temp, int *temp)
{
@@ -499,6 +500,21 @@
return IRQ_HANDLED;
}
+static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
+ u32 sensor_id)
+{
+ void __iomem *srot_addr;
+ unsigned int srot_val, sensor_en;
+
+ srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+ srot_val = readl_relaxed(srot_addr);
+ srot_val = TSENS_CTRL_SENSOR_EN_MASK(srot_val);
+
+ sensor_en = ((1 << sensor_id) & srot_val);
+
+ return sensor_en;
+}
+
static int tsens2xxx_hw_init(struct tsens_device *tmdev)
{
void __iomem *srot_addr;
@@ -602,6 +618,7 @@
.set_trips = tsens2xxx_set_trip_temp,
.interrupts_reg = tsens2xxx_register_interrupts,
.dbg = tsens2xxx_dbg,
+ .sensor_en = tsens2xxx_hw_sensor_en,
};
const struct tsens_data data_tsens2xxx = {
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 195acc8..5d47691 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
#define UARTn_FRAME 0x04
#define UARTn_FRAME_DATABITS__MASK 0x000f
#define UARTn_FRAME_DATABITS(n) ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK 0x0300
#define UARTn_FRAME_PARITY_NONE 0x0000
#define UARTn_FRAME_PARITY_EVEN 0x0200
#define UARTn_FRAME_PARITY_ODD 0x0300
@@ -572,12 +573,16 @@
16 * (4 + (clkdiv >> 6)));
frame = efm32_uart_read32(efm_port, UARTn_FRAME);
- if (frame & UARTn_FRAME_PARITY_ODD)
+ switch (frame & UARTn_FRAME_PARITY__MASK) {
+ case UARTn_FRAME_PARITY_ODD:
*parity = 'o';
- else if (frame & UARTn_FRAME_PARITY_EVEN)
+ break;
+ case UARTn_FRAME_PARITY_EVEN:
*parity = 'e';
- else
+ break;
+ default:
*parity = 'n';
+ }
*bits = (frame & UARTn_FRAME_DATABITS__MASK) -
UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 94ba2c3e..11b4958 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -26,6 +26,7 @@
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -107,9 +108,12 @@
#define UART_CORE2X_VOTE (10000)
#define WAKEBYTE_TIMEOUT_MSEC (2000)
-#define IPC_LOG_PWR_PAGES (2)
-#define IPC_LOG_MISC_PAGES (2)
-#define IPC_LOG_TX_RX_PAGES (3)
+#define WAIT_XFER_MAX_ITER (50)
+#define WAIT_XFER_MAX_TIMEOUT_US (10000)
+#define WAIT_XFER_MIN_TIMEOUT_US (9000)
+#define IPC_LOG_PWR_PAGES (6)
+#define IPC_LOG_MISC_PAGES (6)
+#define IPC_LOG_TX_RX_PAGES (8)
#define DATA_BYTES_PER_LINE (32)
#define IPC_LOG_MSG(ctx, x...) do { \
@@ -117,6 +121,7 @@
ipc_log_string(ctx, x); \
} while (0)
+#define DMA_RX_BUF_SIZE (512)
struct msm_geni_serial_port {
struct uart_port uport;
char name[20];
@@ -136,6 +141,10 @@
unsigned int rx_last);
struct device *wrapper_dev;
struct se_geni_rsc serial_rsc;
+ dma_addr_t tx_dma;
+ unsigned int xmit_size;
+ void *rx_buf;
+ dma_addr_t rx_dma;
int loopback;
int wakeup_irq;
unsigned char wakeup_byte;
@@ -146,6 +155,7 @@
void *ipc_log_misc;
unsigned int cur_baud;
int ioctl_count;
+ int edge_count;
};
static const struct uart_ops msm_geni_serial_pops;
@@ -221,27 +231,66 @@
(unsigned int)addr, size, buf);
}
-static bool check_tx_active(struct uart_port *uport)
+static bool check_transfers_inflight(struct uart_port *uport)
{
- /*
- * Poll if the GENI STATUS bit for TX is cleared. If the bit is
- * clear (poll condition met), return false, meaning tx isn't active
- * else return true. So return not of the poll return.
- */
- return !msm_geni_serial_poll_bit(uport, SE_GENI_STATUS,
- M_GENI_CMD_ACTIVE, false);
+ bool xfer_on = false;
+ bool tx_active = false;
+ bool tx_empty = false;
+ bool m_cmd_active = false;
+ bool rx_active = false;
+ u32 rx_fifo_status = 0;
+ u32 geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ /* Possible stop tx is called multiple times. */
+ m_cmd_active = geni_status & M_GENI_CMD_ACTIVE;
+ tx_empty = msm_geni_serial_tx_empty(uport);
+ tx_active = m_cmd_active || !tx_empty;
+ rx_fifo_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_RX_FIFO_STATUS);
+ if (rx_fifo_status)
+ rx_active = true;
+
+ if (rx_active || tx_active)
+ xfer_on = true;
+
+ return xfer_on;
+}
+
+static void wait_for_transfers_inflight(struct uart_port *uport)
+{
+ int iter = 0;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+
+ while (iter < WAIT_XFER_MAX_ITER) {
+ if (check_transfers_inflight(uport)) {
+ usleep_range(WAIT_XFER_MIN_TIMEOUT_US,
+ WAIT_XFER_MAX_TIMEOUT_US);
+ iter++;
+ } else {
+ break;
+ }
+ }
+ if (check_transfers_inflight(uport)) {
+ u32 geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ u32 geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
+ u32 rx_fifo_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_RX_FIFO_STATUS);
+
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s IOS 0x%x geni status 0x%x rx fifo 0x%x\n",
+ __func__, geni_ios, geni_status, rx_fifo_status);
+ }
}
static int vote_clock_on(struct uart_port *uport)
{
- int ret = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- int usage_count = atomic_read(&uport->dev->power.usage_count);
+ int ret = 0;
if (!pm_runtime_enabled(uport->dev)) {
dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
- ret = -EPERM;
- return ret;
+ return -EPERM;
}
ret = msm_geni_serial_power_on(uport);
if (ret) {
@@ -249,39 +298,31 @@
return ret;
}
port->ioctl_count++;
- __pm_relax(&port->geni_wake);
- IPC_LOG_MSG(port->ipc_log_pwr, "%s rpm %d ioctl %d\n",
- __func__, usage_count, port->ioctl_count);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
+ current->comm, port->ioctl_count);
return 0;
}
static int vote_clock_off(struct uart_port *uport)
{
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- int ret = 0;
- int usage_count = atomic_read(&uport->dev->power.usage_count);
if (!pm_runtime_enabled(uport->dev)) {
dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
- ret = -EPERM;
- return ret;
+ return -EPERM;
}
- /* Check on going Tx. Don't block on this for now. */
- if (check_tx_active(uport))
- dev_warn(uport->dev, "%s: Vote off called during active Tx",
- __func__);
if (!port->ioctl_count) {
dev_warn(uport->dev, "%s:Imbalanced vote off ioctl %d\n",
- __func__, usage_count);
+ __func__, port->ioctl_count);
IPC_LOG_MSG(port->ipc_log_pwr,
- "%s:Imbalanced vote_off from userspace rpm%d",
- __func__, usage_count);
- return 0;
+ "%s:Imbalanced vote_off from userspace. %d",
+ __func__, port->ioctl_count);
+ return -EPERM;
}
port->ioctl_count--;
msm_geni_serial_power_off(uport);
- IPC_LOG_MSG(port->ipc_log_pwr, "%s rpm %d ioctl %d\n",
- __func__, usage_count, port->ioctl_count);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
+ current->comm, port->ioctl_count);
return 0;
};
@@ -311,14 +352,11 @@
static void msm_geni_serial_break_ctl(struct uart_port *uport, int ctl)
{
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
- dev_err(uport->dev, "%s Device suspended,vote clocks on.\n",
- __func__);
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
return;
- }
if (ctl) {
- check_tx_active(uport);
+ wait_for_transfers_inflight(uport);
geni_setup_m_cmd(uport->membase, UART_START_BREAK, 0);
} else {
geni_setup_m_cmd(uport->membase, UART_STOP_BREAK, 0);
@@ -357,11 +395,8 @@
{
u32 uart_manual_rfr = 0;
- if (pm_runtime_status_suspended(uport->dev)) {
- dev_info(uport->dev, "%sDevice suspended,vote clocks on\n",
- __func__);
+ if (pm_runtime_status_suspended(uport->dev))
return;
- }
if (!(mctrl & TIOCM_RTS))
uart_manual_rfr |= (UART_MANUAL_RFR_EN | UART_RFR_NOT_READY);
geni_write_reg_nolog(uart_manual_rfr, uport->membase,
@@ -396,9 +431,12 @@
static int msm_geni_serial_power_on(struct uart_port *uport)
{
int ret = 0;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
ret = pm_runtime_get_sync(uport->dev);
if (ret < 0) {
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s Err\n", __func__);
+ WARN_ON_ONCE(1);
pm_runtime_put_noidle(uport->dev);
pm_runtime_set_suspended(uport->dev);
return ret;
@@ -469,6 +507,9 @@
int done = 0;
unsigned int irq_clear = M_CMD_DONE_EN;
+ if (!uart_console(uport))
+ return;
+
done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
M_CMD_DONE_EN, true);
if (!done) {
@@ -682,27 +723,98 @@
#endif /* (CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)) */
+static int msm_geni_serial_prep_dma_tx(struct uart_port *uport)
+{
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ unsigned int xmit_size;
+ int ret = 0;
+
+ xmit_size = uart_circ_chars_pending(xmit);
+ if (xmit_size < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+
+ if (xmit_size > (UART_XMIT_SIZE - xmit->tail))
+ xmit_size = UART_XMIT_SIZE - xmit->tail;
+
+ if (!xmit_size)
+ return ret;
+
+ dump_ipc(msm_port->ipc_log_tx, "DMA Tx",
+ (char *)&xmit->buf[xmit->tail], 0, xmit_size);
+ msm_geni_serial_setup_tx(uport, xmit_size);
+ ret = geni_se_tx_dma_prep(msm_port->wrapper_dev, uport->membase,
+ &xmit->buf[xmit->tail], xmit_size, &msm_port->tx_dma);
+ if (!ret) {
+ msm_port->xmit_size = xmit_size;
+ } else {
+ geni_write_reg_nolog(0, uport->membase,
+ SE_UART_TX_TRANS_LEN);
+ geni_cancel_m_cmd(uport->membase);
+ if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true)) {
+ geni_abort_m_cmd(uport->membase);
+ msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ }
+ geni_write_reg_nolog(M_CMD_CANCEL_EN, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+ IPC_LOG_MSG(msm_port->ipc_log_tx, "%s: DMA map failure %d\n",
+ __func__, ret);
+ msm_port->tx_dma = (dma_addr_t)NULL;
+ msm_port->xmit_size = 0;
+ }
+ return ret;
+}
+
static void msm_geni_serial_start_tx(struct uart_port *uport)
{
unsigned int geni_m_irq_en;
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-
- if (!msm_geni_serial_tx_empty(uport))
- return;
+ unsigned int geni_status;
+ unsigned int geni_ios;
if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s.Device is suspended.\n", __func__);
return;
}
- geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
+ if (msm_port->xfer_mode == FIFO_MODE) {
+ geni_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_STATUS);
+ if (geni_status & M_GENI_CMD_ACTIVE)
+ goto check_flow_ctrl;
- geni_write_reg_nolog(msm_port->tx_wm, uport->membase,
+ if (!msm_geni_serial_tx_empty(uport))
+ goto check_flow_ctrl;
+
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_m_irq_en |= (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
+
+ geni_write_reg_nolog(msm_port->tx_wm, uport->membase,
SE_GENI_TX_WATERMARK_REG);
- geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
- /* Geni command setup/irq enables should complete before returning.*/
- mb();
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase,
+ SE_GENI_M_IRQ_EN);
+ /* Geni command setup should complete before returning.*/
+ mb();
+ } else if (msm_port->xfer_mode == SE_DMA) {
+ if (msm_port->tx_dma)
+ goto check_flow_ctrl;
+
+ msm_geni_serial_prep_dma_tx(uport);
+ }
+ IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
+ return;
+check_flow_ctrl:
+ geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS);
+ if (!(geni_ios & IO2_DATA_IN))
+ IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: ios: 0x%08x\n",
+ __func__, geni_ios);
}
static void msm_geni_serial_stop_tx(struct uart_port *uport)
@@ -711,12 +823,30 @@
unsigned int geni_status;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s.Device is suspended.\n", __func__);
return;
+ }
geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
- geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
- geni_write_reg_nolog(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
+ geni_m_irq_en &= ~M_CMD_DONE_EN;
+ if (port->xfer_mode == FIFO_MODE) {
+ geni_m_irq_en &= ~M_TX_FIFO_WATERMARK_EN;
+ geni_write_reg_nolog(0, uport->membase,
+ SE_GENI_TX_WATERMARK_REG);
+ } else if (port->xfer_mode == SE_DMA) {
+ if (port->tx_dma) {
+ geni_write_reg_nolog(1, uport->membase,
+ SE_DMA_TX_FSM_RST);
+ geni_se_tx_dma_unprep(port->wrapper_dev, port->tx_dma,
+ port->xmit_size);
+ port->tx_dma = (dma_addr_t)NULL;
+ }
+ }
+ port->xmit_size = 0;
+
geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
geni_status = geni_read_reg_nolog(uport->membase,
@@ -746,26 +876,57 @@
unsigned int rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
unsigned int geni_status;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ int ret;
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s.Device is suspended.\n", __func__);
return;
+ }
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
if (geni_status & S_GENI_CMD_ACTIVE)
msm_geni_serial_abort_rx(uport);
- geni_s_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_M_IRQ_EN);
- geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
- geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+
se_get_packing_config(8, 4, false, &cfg0, &cfg1);
geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_RX_PACKING_CFG0);
geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_RX_PACKING_CFG1);
geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
- geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
- geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+
+ if (port->xfer_mode == FIFO_MODE) {
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+
+ geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase,
+ SE_GENI_M_IRQ_EN);
+ } else if (port->xfer_mode == SE_DMA) {
+ port->rx_buf = kzalloc(DMA_RX_BUF_SIZE, GFP_KERNEL);
+ if (!port->rx_buf) {
+ dev_err(uport->dev, "%s: kzalloc failed\n",
+ __func__);
+ msm_geni_serial_abort_rx(uport);
+ return;
+ }
+
+ ret = geni_se_rx_dma_prep(port->wrapper_dev, uport->membase,
+ port->rx_buf, DMA_RX_BUF_SIZE, &port->rx_dma);
+ if (ret) {
+ dev_err(uport->dev, "%s: RX Prep dma failed %d\n",
+ __func__, ret);
+ kfree(port->rx_buf);
+ msm_geni_serial_abort_rx(uport);
+ return;
+ }
+ }
/*
* Ensure the writes to the secondary sequencer and interrupt enables
* go through.
@@ -779,25 +940,42 @@
unsigned int geni_s_irq_en;
unsigned int geni_m_irq_en;
unsigned int geni_status;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s.Device is suspended.\n", __func__);
return;
+ }
- geni_s_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_S_IRQ_EN);
- geni_m_irq_en = geni_read_reg_nolog(uport->membase,
- SE_GENI_M_IRQ_EN);
- geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
- geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ if (port->xfer_mode == FIFO_MODE) {
+ geni_s_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+ geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
- geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
- geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+ geni_write_reg_nolog(geni_s_irq_en, uport->membase,
+ SE_GENI_S_IRQ_EN);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase,
+ SE_GENI_M_IRQ_EN);
+ } else if (port->xfer_mode == SE_DMA && port->rx_dma) {
+ geni_write_reg_nolog(1, uport->membase, SE_DMA_RX_FSM_RST);
+ geni_se_rx_dma_unprep(port->wrapper_dev, port->rx_dma,
+ DMA_RX_BUF_SIZE);
+ kfree(port->rx_buf);
+ port->rx_buf = NULL;
+ port->rx_dma = (dma_addr_t)NULL;
+ }
geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
/* Possible stop rx is called multiple times. */
if (!(geni_status & S_GENI_CMD_ACTIVE))
return;
msm_geni_serial_abort_rx(uport);
+ IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
}
static int handle_rx_hs(struct uart_port *uport,
@@ -868,7 +1046,10 @@
unsigned int xmit_size;
unsigned int fifo_width_bytes =
(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
+ unsigned int geni_m_irq_en;
+ xmit->tail = (xmit->tail + msm_port->xmit_size) & (UART_XMIT_SIZE - 1);
+ msm_port->xmit_size = 0;
tx_fifo_status = geni_read_reg_nolog(uport->membase,
SE_GENI_TX_FIFO_STATUS);
if (uart_circ_empty(xmit) && !tx_fifo_status) {
@@ -876,6 +1057,16 @@
goto exit_handle_tx;
}
+ if (!uart_console(uport)) {
+ geni_m_irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN);
+ geni_write_reg_nolog(0, uport->membase,
+ SE_GENI_TX_WATERMARK_REG);
+ geni_write_reg_nolog(geni_m_irq_en, uport->membase,
+ SE_GENI_M_IRQ_EN);
+ }
+
avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) *
fifo_width_bytes;
xmit_size = uart_circ_chars_pending(xmit);
@@ -895,15 +1086,16 @@
while (i < xmit_size) {
unsigned int tx_bytes;
unsigned int buf = 0;
+ int temp_tail;
int c;
tx_bytes = ((bytes_remaining < fifo_width_bytes) ?
bytes_remaining : fifo_width_bytes);
+ temp_tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
for (c = 0; c < tx_bytes ; c++)
- buf |= (xmit->buf[xmit->tail + c] << (c * 8));
+ buf |= (xmit->buf[temp_tail + c] << (c * 8));
geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
- xmit->tail = (xmit->tail + tx_bytes) & (UART_XMIT_SIZE - 1);
i += tx_bytes;
uport->icount.tx += tx_bytes;
bytes_remaining -= tx_bytes;
@@ -911,45 +1103,137 @@
wmb();
}
msm_geni_serial_poll_cancel_tx(uport);
+ if (uart_console(uport))
+ xmit->tail = (xmit->tail + xmit_size) & (UART_XMIT_SIZE - 1);
+ else
+ msm_port->xmit_size = xmit_size;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
exit_handle_tx:
return ret;
}
+static int msm_geni_serial_handle_dma_rx(struct uart_port *uport)
+{
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ unsigned int rx_bytes = 0;
+ struct tty_port *tport;
+ int ret;
+ unsigned int geni_status;
+
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+ /* Possible stop rx is called */
+ if (!(geni_status & S_GENI_CMD_ACTIVE))
+ return 0;
+
+ geni_se_rx_dma_unprep(msm_port->wrapper_dev, msm_port->rx_dma,
+ DMA_RX_BUF_SIZE);
+ rx_bytes = geni_read_reg_nolog(uport->membase, SE_DMA_RX_LEN_IN);
+ if (unlikely(!msm_port->rx_buf || !rx_bytes)) {
+ IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: Rx_buf %pK Size %d\n",
+ __func__, msm_port->rx_buf, rx_bytes);
+ return 0;
+ }
+
+ tport = &uport->state->port;
+ ret = tty_insert_flip_string(tport, (unsigned char *)(msm_port->rx_buf),
+ rx_bytes);
+ if (ret != rx_bytes) {
+ dev_err(uport->dev, "%s: ret %d rx_bytes %d\n", __func__,
+ ret, rx_bytes);
+ WARN_ON(1);
+ }
+ uport->icount.rx += ret;
+ tty_flip_buffer_push(tport);
+ dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0,
+ rx_bytes);
+ ret = geni_se_rx_dma_prep(msm_port->wrapper_dev, uport->membase,
+ msm_port->rx_buf, DMA_RX_BUF_SIZE, &msm_port->rx_dma);
+ if (ret)
+ IPC_LOG_MSG(msm_port->ipc_log_rx, "%s: %d\n", __func__, ret);
+ return ret;
+}
+
+static int msm_geni_serial_handle_dma_tx(struct uart_port *uport)
+{
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+
+ xmit->tail = (xmit->tail + msm_port->xmit_size) & (UART_XMIT_SIZE - 1);
+ geni_se_tx_dma_unprep(msm_port->wrapper_dev, msm_port->tx_dma,
+ msm_port->xmit_size);
+ uport->icount.tx += msm_port->xmit_size;
+ msm_port->tx_dma = (dma_addr_t)NULL;
+ msm_port->xmit_size = 0;
+
+ if (!uart_circ_empty(xmit))
+ msm_geni_serial_prep_dma_tx(uport);
+ else
+ uart_write_wakeup(uport);
+ return 0;
+}
+
static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
{
unsigned int m_irq_status;
unsigned int s_irq_status;
+ unsigned int dma;
+ unsigned int dma_tx_status;
+ unsigned int dma_rx_status;
struct uart_port *uport = dev;
unsigned long flags;
+ unsigned int m_irq_en;
+ struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
spin_lock_irqsave(&uport->lock, flags);
if (uart_console(uport) && uport->suspended)
goto exit_geni_serial_isr;
- if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev))
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s.Device is suspended.\n", __func__);
goto exit_geni_serial_isr;
+ }
m_irq_status = geni_read_reg_nolog(uport->membase,
SE_GENI_M_IRQ_STATUS);
s_irq_status = geni_read_reg_nolog(uport->membase,
SE_GENI_S_IRQ_STATUS);
- geni_write_reg_nolog(m_irq_status, uport->membase,
- SE_GENI_M_IRQ_CLEAR);
- geni_write_reg_nolog(s_irq_status, uport->membase,
- SE_GENI_S_IRQ_CLEAR);
+ m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
+ dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT);
+ dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT);
+
+ geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
+ geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
WARN_ON(1);
goto exit_geni_serial_isr;
}
- if ((s_irq_status & S_RX_FIFO_WATERMARK_EN) ||
- (s_irq_status & S_RX_FIFO_LAST_EN)) {
- msm_geni_serial_handle_rx(uport);
- }
+ if (!dma) {
+ if ((s_irq_status & S_RX_FIFO_WATERMARK_EN) ||
+ (s_irq_status & S_RX_FIFO_LAST_EN))
+ msm_geni_serial_handle_rx(uport);
- if ((m_irq_status & M_TX_FIFO_WATERMARK_EN))
- msm_geni_serial_handle_tx(uport);
+ if ((m_irq_status & m_irq_en) &
+ (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ msm_geni_serial_handle_tx(uport);
+ } else {
+ if (dma_tx_status) {
+ geni_write_reg_nolog(dma_tx_status, uport->membase,
+ SE_DMA_TX_IRQ_CLR);
+ if (dma_tx_status & TX_DMA_DONE)
+ msm_geni_serial_handle_dma_tx(uport);
+ }
+
+ if (dma_rx_status) {
+ geni_write_reg_nolog(dma_rx_status, uport->membase,
+ SE_DMA_RX_IRQ_CLR);
+ if (dma_rx_status & RX_DMA_DONE)
+ msm_geni_serial_handle_dma_rx(uport);
+ }
+ }
exit_geni_serial_isr:
spin_unlock_irqrestore(&uport->lock, flags);
@@ -964,16 +1248,19 @@
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
- if (port->wakeup_byte) {
+ IPC_LOG_MSG(port->ipc_log_rx, "%s: Edge-Count %d\n", __func__,
+ port->edge_count);
+ if (port->wakeup_byte && (port->edge_count == 2)) {
tty = uport->state->port.tty;
tty_insert_flip_char(tty->port, port->wakeup_byte, TTY_NORMAL);
IPC_LOG_MSG(port->ipc_log_rx, "%s: Inject 0x%x\n",
__func__, port->wakeup_byte);
+ port->edge_count = 0;
tty_flip_buffer_push(tty->port);
+ __pm_wakeup_event(&port->geni_wake, WAKEBYTE_TIMEOUT_MSEC);
+ } else if (port->edge_count < 2) {
+ port->edge_count++;
}
- __pm_wakeup_event(&port->geni_wake, WAKEBYTE_TIMEOUT_MSEC);
- IPC_LOG_MSG(port->ipc_log_misc, "%s:Holding Wake Lock for %d ms\n",
- __func__, WAKEBYTE_TIMEOUT_MSEC);
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
@@ -1030,25 +1317,29 @@
unsigned long flags;
/* Stop the console before stopping the current tx */
- if (uart_console(uport))
+ if (uart_console(uport)) {
console_stop(uport->cons);
+ } else {
+ msm_geni_serial_power_on(uport);
+ wait_for_transfers_inflight(uport);
+ }
+ disable_irq(uport->irq);
+ free_irq(uport->irq, msm_port);
spin_lock_irqsave(&uport->lock, flags);
msm_geni_serial_stop_tx(uport);
msm_geni_serial_stop_rx(uport);
spin_unlock_irqrestore(&uport->lock, flags);
- disable_irq(uport->irq);
- free_irq(uport->irq, msm_port);
if (uart_console(uport)) {
se_geni_resources_off(&msm_port->serial_rsc);
} else {
+ msm_geni_serial_power_off(uport);
if (msm_port->wakeup_irq > 0) {
+ irq_set_irq_wake(msm_port->wakeup_irq, 0);
disable_irq(msm_port->wakeup_irq);
free_irq(msm_port->wakeup_irq, msm_port);
}
- __pm_relax(&msm_port->geni_wake);
- msm_geni_serial_power_off(uport);
}
IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
}
@@ -1063,12 +1354,20 @@
set_rfr_wm(msm_port);
if (!uart_console(uport)) {
/* For now only assume FIFO mode. */
- msm_port->xfer_mode = FIFO_MODE;
+ msm_port->xfer_mode = SE_DMA;
se_get_packing_config(8, 4, false, &cfg0, &cfg1);
geni_write_reg_nolog(cfg0, uport->membase,
SE_GENI_TX_PACKING_CFG0);
geni_write_reg_nolog(cfg1, uport->membase,
SE_GENI_TX_PACKING_CFG1);
+ msm_port->handle_rx = handle_rx_hs;
+ msm_port->rx_fifo = devm_kzalloc(uport->dev,
+ sizeof(msm_port->rx_fifo_depth * sizeof(u32)),
+ GFP_KERNEL);
+ if (!msm_port->rx_fifo) {
+ ret = -ENOMEM;
+ goto exit_portsetup;
+ }
} else {
/*
* Make an unconditional cancel on the main sequencer to reset
@@ -1153,8 +1452,11 @@
if (likely(!uart_console(uport))) {
ret = msm_geni_serial_power_on(&msm_port->uport);
- if (ret)
- goto exit_startup;
+ if (ret) {
+ dev_err(uport->dev, "%s:Failed to power on %d\n",
+ __func__, ret);
+ return ret;
+ }
}
if (unlikely(get_se_proto(uport->membase) != UART)) {
@@ -1166,12 +1468,12 @@
goto exit_startup;
}
+ get_tx_fifo_size(msm_port);
if (!msm_port->port_setup) {
if (msm_geni_serial_port_setup(uport))
goto exit_startup;
}
- get_tx_fifo_size(msm_port);
msm_geni_serial_start_rx(uport);
/*
* Ensure that all the port configuration writes complete
@@ -1187,8 +1489,7 @@
}
if (msm_port->wakeup_irq > 0) {
- ret = request_threaded_irq(msm_port->wakeup_irq, NULL,
- msm_geni_wakeup_isr,
+ ret = request_irq(msm_port->wakeup_irq, msm_geni_wakeup_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hs_uart_wakeup", uport);
if (unlikely(ret)) {
@@ -1197,9 +1498,17 @@
goto exit_startup;
}
disable_irq(msm_port->wakeup_irq);
+ ret = irq_set_irq_wake(msm_port->wakeup_irq, 1);
+ if (unlikely(ret)) {
+ dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
+ __func__, ret);
+ goto exit_startup;
+ }
}
IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
exit_startup:
+ if (likely(!uart_console(uport)))
+ msm_geni_serial_power_off(&msm_port->uport);
return ret;
}
@@ -1286,6 +1595,11 @@
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
unsigned long clk_rate;
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s Device suspended,vote clocks on.\n", __func__);
+ return;
+ }
/* baud rate */
baud = uart_get_baud_rate(uport, termios, old, 300, 4000000);
port->cur_baud = baud;
@@ -1360,6 +1674,9 @@
tx_trans_cfg |= UART_CTS_MASK;
/* status bits to ignore */
+ if (likely(baud))
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
stop_bit_len, ser_clk_cfg);
@@ -1379,15 +1696,81 @@
{
unsigned int tx_fifo_status;
unsigned int is_tx_empty = 1;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- tx_fifo_status = geni_read_reg_nolog(uport->membase,
- SE_GENI_TX_FIFO_STATUS);
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s Device suspended,vote clocks on.\n", __func__);
+ return 1;
+ }
+
+ if (port->xfer_mode == SE_DMA)
+ tx_fifo_status = port->tx_dma ? 1 : 0;
+ else
+ tx_fifo_status = geni_read_reg_nolog(uport->membase,
+ SE_GENI_TX_FIFO_STATUS);
if (tx_fifo_status)
is_tx_empty = 0;
return is_tx_empty;
}
+static ssize_t msm_geni_serial_xfer_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+ ssize_t ret = 0;
+
+ if (port->xfer_mode == FIFO_MODE)
+ ret = snprintf(buf, sizeof("FIFO\n"), "FIFO\n");
+ else if (port->xfer_mode == SE_DMA)
+ ret = snprintf(buf, sizeof("SE_DMA\n"), "SE_DMA\n");
+
+ return ret;
+}
+
+static ssize_t msm_geni_serial_xfer_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
+ struct uart_port *uport = &port->uport;
+ int xfer_mode = port->xfer_mode;
+ unsigned long flags;
+
+ if (uart_console(uport))
+ return -EOPNOTSUPP;
+
+ if (strnstr(buf, "FIFO", strlen("FIFO"))) {
+ xfer_mode = FIFO_MODE;
+ } else if (strnstr(buf, "SE_DMA", strlen("SE_DMA"))) {
+ xfer_mode = SE_DMA;
+ } else {
+ dev_err(dev, "%s: Invalid input %s\n", __func__, buf);
+ return -EINVAL;
+ }
+
+ if (xfer_mode == port->xfer_mode)
+ return size;
+
+ msm_geni_serial_power_on(uport);
+ spin_lock_irqsave(&uport->lock, flags);
+ msm_geni_serial_stop_tx(uport);
+ msm_geni_serial_stop_rx(uport);
+ port->xfer_mode = xfer_mode;
+ geni_se_select_mode(uport->membase, port->xfer_mode);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ msm_geni_serial_start_rx(uport);
+ msm_geni_serial_power_off(uport);
+
+ return size;
+}
+
+static DEVICE_ATTR(xfer_mode, 0644, msm_geni_serial_xfer_mode_show,
+ msm_geni_serial_xfer_mode_store);
+
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
static int __init msm_geni_console_setup(struct console *co, char *options)
{
@@ -1638,6 +2021,7 @@
bool is_console = false;
struct platform_device *wrapper_pdev;
struct device_node *wrapper_ph_node;
+ u32 wake_char = 0;
id = of_match_device(msm_geni_device_tbl, &pdev->dev);
if (id) {
@@ -1700,9 +2084,14 @@
if (ret)
goto exit_geni_serial_probe;
- if (of_property_read_u8(pdev->dev.of_node, "qcom,wakeup-byte",
- &dev_port->wakeup_byte))
- dev_info(&pdev->dev, "No Wakeup byte specified\n");
+ if (of_property_read_u32(pdev->dev.of_node, "qcom,wakeup-byte",
+ &wake_char)) {
+ dev_dbg(&pdev->dev, "No Wakeup byte specified\n");
+ } else {
+ dev_port->wakeup_byte = (u8)wake_char;
+ dev_info(&pdev->dev, "Wakeup byte 0x%x\n",
+ dev_port->wakeup_byte);
+ }
dev_port->serial_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
if (IS_ERR(dev_port->serial_rsc.se_clk)) {
@@ -1790,10 +2179,6 @@
dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32),
GFP_KERNEL);
} else {
- dev_port->handle_rx = handle_rx_hs;
- dev_port->rx_fifo = devm_kzalloc(uport->dev,
- sizeof(dev_port->rx_fifo_depth * sizeof(u32)),
- GFP_KERNEL);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -1801,6 +2186,7 @@
dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
line, uport->fifosize, is_console);
device_create_file(uport->dev, &dev_attr_loopback);
+ device_create_file(uport->dev, &dev_attr_xfer_mode);
msm_geni_serial_debug_init(uport);
dev_port->port_setup = false;
return uart_add_one_port(drv, uport);
@@ -1828,14 +2214,28 @@
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
int ret = 0;
+ wait_for_transfers_inflight(&port->uport);
ret = se_geni_resources_off(&port->serial_rsc);
if (ret) {
dev_err(dev, "%s: Error ret %d\n", __func__, ret);
goto exit_runtime_suspend;
}
- if (port->wakeup_irq > 0)
+ disable_irq(port->uport.irq);
+ if (port->wakeup_irq > 0) {
+ struct se_geni_rsc *rsc = &port->serial_rsc;
+
+ port->edge_count = 0;
+ ret = pinctrl_select_state(rsc->geni_pinctrl,
+ rsc->geni_gpio_active);
+ if (ret) {
+ dev_err(dev, "%s: Error %d pinctrl_select_state\n",
+ __func__, ret);
+ goto exit_runtime_suspend;
+ }
enable_irq(port->wakeup_irq);
+ }
IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__);
+ __pm_relax(&port->geni_wake);
exit_runtime_suspend:
return ret;
}
@@ -1846,13 +2246,21 @@
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
int ret = 0;
+ /*
+ * Do an unconditional relax followed by a stay awake in case the
+ * wake source is activated by the wakeup isr.
+ */
+ __pm_relax(&port->geni_wake);
+ __pm_stay_awake(&port->geni_wake);
if (port->wakeup_irq > 0)
disable_irq(port->wakeup_irq);
ret = se_geni_resources_on(&port->serial_rsc);
if (ret) {
dev_err(dev, "%s: Error ret %d\n", __func__, ret);
+ __pm_relax(&port->geni_wake);
goto exit_runtime_resume;
}
+ enable_irq(port->uport.irq);
IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__);
exit_runtime_resume:
return ret;
@@ -1868,10 +2276,20 @@
uart_suspend_port((struct uart_driver *)uport->private_data,
uport);
} else {
+ struct uart_state *state = uport->state;
+ struct tty_port *tty_port = &state->port;
+
+ mutex_lock(&tty_port->mutex);
if (!pm_runtime_status_suspended(dev)) {
- dev_info(dev, "%s: Is still active\n", __func__);
+ dev_err(dev, "%s:Active userspace vote; ioctl_cnt %d\n",
+ __func__, port->ioctl_count);
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s:Active userspace vote; ioctl_cnt %d\n",
+ __func__, port->ioctl_count);
+ mutex_unlock(&tty_port->mutex);
return -EBUSY;
}
+ mutex_unlock(&tty_port->mutex);
}
return 0;
}
@@ -1882,7 +2300,8 @@
struct msm_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_port *uport = &port->uport;
- if (uart_console(uport)) {
+ if (uart_console(uport) &&
+ console_suspend_enabled && uport->suspended) {
se_geni_resources_on(&port->serial_rsc);
uart_resume_port((struct uart_driver *)uport->private_data,
uport);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ee84f89..7e97a1c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2366,6 +2366,10 @@
*/
udelay(DIV_ROUND_UP(10 * 1000000, baud));
}
+ if (port->flags & UPF_HARD_FLOW) {
+ /* Refresh (Auto) RTS */
+ sci_set_mctrl(port, port->mctrl);
+ }
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 701c085..94c3718 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -55,10 +55,11 @@
static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
-static bool sysrq_on(void)
+bool sysrq_on(void)
{
return sysrq_enabled || sysrq_always_enabled;
}
+EXPORT_SYMBOL(sysrq_on);
/*
* A value of 1 means 'all', other nonzero values are an op mask:
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d..ce2c3c6 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2711,13 +2711,13 @@
* related to the kernel should not use this.
*/
data = vt_get_shift_state();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETVESABLANK:
console_lock();
@@ -2726,7 +2726,7 @@
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a876d47..f16491c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1770,6 +1770,9 @@
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index eef716b..3fd2b54 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -981,6 +981,15 @@
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
+ break;
+ case USB_CAP_TYPE_CONFIG_SUMMARY:
+ /* one such desc per configuration */
+ if (!dev->bos->num_config_summary_desc)
+ dev->bos->config_summary =
+ (struct usb_config_summary_descriptor *)buffer;
+
+ dev->bos->num_config_summary_desc++;
+ break;
default:
break;
}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8d..0f10ff2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,8 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -40,6 +42,34 @@
&& desc->bInterfaceProtocol == 1;
}
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+ int desc_cnt, func_cnt, numfunc;
+ int num_cfg_desc;
+ struct usb_config_summary_descriptor *conf_summary;
+
+ if (!bos || !bos->config_summary)
+ goto done;
+
+ conf_summary = bos->config_summary;
+ num_cfg_desc = bos->num_config_summary_desc;
+
+ for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+ numfunc = conf_summary->bNumFunctions;
+ for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+ /* honor device preferred config */
+ if (conf_summary->cs_info[func_cnt].bClass ==
+ USB_CLASS_AUDIO &&
+ conf_summary->cs_info[func_cnt].bSubClass !=
+ FULL_ADC_3_0)
+ return conf_summary->bConfigurationValue;
+ }
+ }
+
+done:
+ return -EINVAL;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -130,7 +160,6 @@
best = c;
break;
}
-
/* If all the remaining configs are vendor-specific,
* choose the first one. */
else if (!best)
@@ -143,7 +172,10 @@
insufficient_power, plural(insufficient_power));
if (best) {
- i = best->desc.bConfigurationValue;
+ /* choose usb audio class preferred config if available */
+ i = usb_audio_max_rev_config(udev->bos);
+ if (i < 0)
+ i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
i, num_configs, plural(num_configs));
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 32f99da..e07fa76 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2578,6 +2578,7 @@
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
+ kfree(hcd->address0_mutex);
kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcbaa61..50679bc 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1345,7 +1345,13 @@
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
- } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
+ }
+
+ maxchild = USB_MAXCHILDREN;
+ if (hub_is_superspeed(hdev))
+ maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
+
+ if (hub->descriptor->bNbrPorts > maxchild) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96b21b0..3116edf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -223,6 +223,10 @@
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Hauppauge HVR-950q */
+ { USB_DEVICE(0x2040, 0x7200), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7272f9a..92e5d13 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -279,6 +279,8 @@
usb_destroy_configuration(udev);
usb_release_bos_descriptor(udev);
+ if (udev->parent)
+ of_node_put(dev->of_node);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 24fbebc..cfdd5c3 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2532,7 +2532,7 @@
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
/* set the PLL on, remove the HNP/SRP and set the PHY */
val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
@@ -3403,7 +3403,7 @@
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
index 285cd5a..3860a1a 100644
--- a/drivers/usb/dwc3/dbm.c
+++ b/drivers/usb/dwc3/dbm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -129,7 +129,7 @@
enum dbm_reg reg, int ep,
const u32 mask, u32 val)
{
- u32 shift = find_first_bit((void *)&mask, 32);
+ u32 shift = __ffs(mask);
u32 offset = dbm->reg_table[reg].offset +
(dbm->reg_table[reg].ep_mult * ep);
u32 tmp = ioread32(dbm->base + offset);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e..9eba51b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -148,7 +148,8 @@
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -206,6 +207,7 @@
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index a496468..69d617f 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -55,6 +55,8 @@
#include "debug.h"
#include "xhci.h"
+#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
+
/* time out to wait for USB cable status notification (in ms)*/
#define SM_INIT_TIMEOUT 30000
@@ -262,6 +264,7 @@
int pm_qos_latency;
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
+ struct delayed_work sdp_check;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -319,7 +322,7 @@
u32 offset,
const u32 mask)
{
- u32 shift = ffs(mask);
+ u32 shift = __ffs(mask);
u32 val = ioread32(base + offset);
val &= mask; /* clear other bits */
@@ -353,7 +356,7 @@
static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
const u32 mask, u32 val)
{
- u32 shift = find_first_bit((void *)&mask, 32);
+ u32 shift = __ffs(mask);
u32 tmp = ioread32(base + offset);
tmp &= ~mask; /* clear written bits */
@@ -1768,9 +1771,10 @@
/*
* Below sequence is used when controller is working without
- * having ssphy and only USB high speed is supported.
+ * having ssphy and only USB high/full speed is supported.
*/
- if (dwc->maximum_speed == USB_SPEED_HIGH) {
+ if (dwc->maximum_speed == USB_SPEED_HIGH ||
+ dwc->maximum_speed == USB_SPEED_FULL) {
dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
dwc3_msm_read_reg(mdwc->base,
QSCRATCH_GENERAL_CFG)
@@ -2817,6 +2821,25 @@
return NOTIFY_DONE;
}
+
+static void check_for_sdp_connection(struct work_struct *w)
+{
+ struct dwc3_msm *mdwc =
+ container_of(w, struct dwc3_msm, sdp_check.work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ if (!mdwc->vbus_active)
+ return;
+
+ /* floating D+/D- lines detected */
+ if (dwc->gadget.state < USB_STATE_DEFAULT &&
+ dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
+ mdwc->vbus_active = 0;
+ dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+}
+
static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2911,6 +2934,7 @@
}
}
+ edev = NULL;
/* Use third phandle (optional) for EUD based detach/attach events */
if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
@@ -2920,7 +2944,7 @@
}
}
- if (!IS_ERR(edev)) {
+ if (!IS_ERR_OR_NULL(edev)) {
mdwc->extcon_eud = edev;
mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
ret = extcon_register_notifier(edev, EXTCON_USB,
@@ -3102,6 +3126,7 @@
INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
+ INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
if (!mdwc->dwc3_wq) {
@@ -3763,20 +3788,9 @@
platform_device_del(dwc->xhci);
usb_unregister_notify(&mdwc->host_nb);
- /*
- * Perform USB hardware RESET (both core reset and DBM reset)
- * when moving from host to peripheral. This is required for
- * peripheral mode to work.
- */
- dwc3_msm_block_reset(mdwc, true);
-
dwc3_usb3_phy_suspend(dwc, false);
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
-
mdwc->in_host_mode = false;
- /* re-init core and OTG registers as block reset clears these */
- dwc3_post_host_reset_core_init(dwc);
pm_runtime_mark_last_busy(mdwc->dev);
pm_runtime_put_sync_autosuspend(mdwc->dev);
dbg_event(0xFF, "StopHost psync",
@@ -3868,34 +3882,46 @@
return 0;
}
-static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
+static int get_psy_type(struct dwc3_msm *mdwc)
{
union power_supply_propval pval = {0};
- int ret;
if (mdwc->charging_disabled)
- return 0;
-
- if (mdwc->max_power == mA)
- return 0;
+ return -EINVAL;
if (!mdwc->usb_psy) {
mdwc->usb_psy = power_supply_get_by_name("usb");
if (!mdwc->usb_psy) {
- dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+ dev_err(mdwc->dev, "Could not get usb psy\n");
return -ENODEV;
}
}
- power_supply_get_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_REAL_TYPE, &pval);
- if (pval.intval != POWER_SUPPLY_TYPE_USB)
+ power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
+ &pval);
+
+ return pval.intval;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
+{
+ union power_supply_propval pval = {0};
+ int ret, psy_type;
+
+ if (mdwc->max_power == mA)
return 0;
- dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ psy_type = get_psy_type(mdwc);
+ if (psy_type == POWER_SUPPLY_TYPE_USB) {
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+ } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ pval.intval = -ETIMEDOUT;
+ } else {
+ return 0;
+ }
- /* Set max current limit in uA */
- pval.intval = 1000 * mA;
ret = power_supply_set_property(mdwc->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
if (ret) {
@@ -3966,6 +3992,10 @@
work = 1;
} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "b_sess_vld\n");
+ if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
+ queue_delayed_work(mdwc->dwc3_wq,
+ &mdwc->sdp_check,
+ msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
/*
* Increment pm usage count upon cable connect. Count
* is decremented in OTG_STATE_B_PERIPHERAL state on
@@ -3988,6 +4018,7 @@
!test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "!id || !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
/*
* Decrement pm usage count upon cable disconnect
@@ -4020,6 +4051,7 @@
if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP !susp\n");
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index aaaf256..4cf5381 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -231,7 +231,7 @@
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
+ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn =
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d8566f..edd000b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1211,9 +1211,9 @@
return -ESHUTDOWN;
}
- if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
+ if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
+ dwc3_trace(trace_dwc3_gadget, "request %pK belongs to '%s'",
&req->request, req->dep->name);
return -EINVAL;
}
@@ -1405,7 +1405,7 @@
dwc3_stop_active_transfer(dwc, dep->number, true);
goto out1;
}
- dev_err(dwc->dev, "request %p was not queued to %s\n",
+ dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
goto out0;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e9e8f46..af3ce4f 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
#include "core.h"
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+ int irq;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq > 0)
+ goto out;
+
+ if (irq == -EPROBE_DEFER)
+ goto out;
+
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq > 0)
+ goto out;
+
+ if (irq != -EPROBE_DEFER)
+ dev_err(dwc->dev, "missing host IRQ\n");
+
+ if (!irq)
+ irq = -EINVAL;
+
+out:
+ return irq;
+}
+
int dwc3_host_init(struct dwc3 *dwc)
{
struct property_entry props[3];
@@ -28,39 +61,18 @@
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
- irq = platform_get_irq_byname(dwc3_pdev, "host");
- if (irq == -EPROBE_DEFER)
+ irq = dwc3_host_get_irq(dwc);
+ if (irq < 0)
return irq;
- if (irq <= 0) {
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
- if (irq == -EPROBE_DEFER)
- return irq;
-
- if (irq <= 0) {
- irq = platform_get_irq(dwc3_pdev, 0);
- if (irq <= 0) {
- if (irq != -EPROBE_DEFER) {
- dev_err(dwc->dev,
- "missing host IRQ\n");
- }
- if (!irq)
- irq = -EINVAL;
- return irq;
- } else {
- res = platform_get_resource(dwc3_pdev,
- IORESOURCE_IRQ, 0);
- }
- } else {
- res = platform_get_resource_byname(dwc3_pdev,
- IORESOURCE_IRQ,
- "dwc_usb3");
- }
-
- } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+ if (!res)
res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "host");
- }
+ "dwc_usb3");
+ if (!res)
+ res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -ENOMEM;
dwc->xhci_resources[1].start = irq;
dwc->xhci_resources[1].end = irq;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index ec166f2..8a6ae0b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2291,7 +2291,7 @@
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
- kfree(cdev->os_desc_req);
+ usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6b2c137..f910990 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -22,6 +22,7 @@
static struct class *android_class;
static struct device *android_device;
static int index;
+static int gadget_index;
struct device *create_function_device(char *name)
{
@@ -137,21 +138,28 @@
struct list_head list;
};
+#define MAX_USB_STRING_LEN 126
+#define MAX_USB_STRING_WITH_NULL_LEN (MAX_USB_STRING_LEN+1)
+
static int usb_string_copy(const char *s, char **s_copy)
{
int ret;
char *str;
char *copy = *s_copy;
ret = strlen(s);
- if (ret > 126)
+ if (ret > MAX_USB_STRING_LEN)
return -EOVERFLOW;
- str = kstrdup(s, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
+ if (copy) {
+ str = copy;
+ } else {
+ str = kmalloc(MAX_USB_STRING_WITH_NULL_LEN, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ }
+ strncpy(str, s, MAX_USB_STRING_WITH_NULL_LEN);
if (str[ret - 1] == '\n')
str[ret - 1] = '\0';
- kfree(copy);
*s_copy = str;
return 0;
}
@@ -1425,21 +1433,21 @@
spin_unlock_irqrestore(&cdev->lock, flags);
if (status[0]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, connected);
pr_info("%s: sent uevent %s\n", __func__, connected[0]);
uevent_sent = true;
}
if (status[1]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, configured);
pr_info("%s: sent uevent %s\n", __func__, configured[0]);
uevent_sent = true;
}
if (status[2]) {
- kobject_uevent_env(&android_device->kobj,
+ kobject_uevent_env(&gi->dev->kobj,
KOBJ_CHANGE, disconnected);
pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
uevent_sent = true;
@@ -1600,23 +1608,28 @@
{
struct device_attribute **attrs;
struct device_attribute *attr;
+ char str[10];
INIT_WORK(&gi->work, android_work);
- android_device = device_create(android_class, NULL,
- MKDEV(0, 0), NULL, "android0");
- if (IS_ERR(android_device))
- return PTR_ERR(android_device);
+ snprintf(str, sizeof(str), "android%d", gadget_index - 1);
+ pr_debug("Creating android device %s\n", str);
+ gi->dev = device_create(android_class, NULL,
+ MKDEV(0, 0), NULL, str);
+ if (IS_ERR(gi->dev))
+ return PTR_ERR(gi->dev);
- dev_set_drvdata(android_device, gi);
+ dev_set_drvdata(gi->dev, gi);
+ if (gadget_index == 1)
+ android_device = gi->dev;
attrs = android_usb_attributes;
while ((attr = *attrs++)) {
int err;
- err = device_create_file(android_device, attr);
+ err = device_create_file(gi->dev, attr);
if (err) {
- device_destroy(android_device->class,
- android_device->devt);
+ device_destroy(gi->dev->class,
+ gi->dev->devt);
return err;
}
}
@@ -1624,15 +1637,15 @@
return 0;
}
-static void android_device_destroy(void)
+static void android_device_destroy(struct device *dev)
{
struct device_attribute **attrs;
struct device_attribute *attr;
attrs = android_usb_attributes;
while ((attr = *attrs++))
- device_remove_file(android_device, attr);
- device_destroy(android_device->class, android_device->devt);
+ device_remove_file(dev, attr);
+ device_destroy(dev->class, dev->devt);
}
#else
static inline int android_device_create(struct gadget_info *gi)
@@ -1640,7 +1653,7 @@
return 0;
}
-static inline void android_device_destroy(void)
+static inline void android_device_destroy(struct device *dev)
{
}
#endif
@@ -1696,6 +1709,8 @@
if (!gi->composite.gadget_driver.function)
goto err;
+ gadget_index++;
+ pr_debug("Creating gadget index %d\n", gadget_index);
if (android_device_create(gi) < 0)
goto err;
@@ -1708,8 +1723,14 @@
static void gadgets_drop(struct config_group *group, struct config_item *item)
{
+ struct gadget_info *gi;
+
+ gi = container_of(to_config_group(item), struct gadget_info, group);
config_item_put(item);
- android_device_destroy();
+ if (gi->dev) {
+ android_device_destroy(gi->dev);
+ gi->dev = NULL;
+ }
}
static struct configfs_group_operations gadgets_ops = {
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 46df732..a7cb586 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -346,6 +346,7 @@
struct acc_dev *dev = ep->driver_data;
char *string_dest = NULL;
int length = req->actual;
+ unsigned long flags;
if (req->status != 0) {
pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +372,26 @@
case ACCESSORY_STRING_SERIAL:
string_dest = dev->serial;
break;
- }
- if (string_dest) {
- unsigned long flags;
-
- if (length >= ACC_STRING_SIZE)
- length = ACC_STRING_SIZE - 1;
-
- spin_lock_irqsave(&dev->lock, flags);
- memcpy(string_dest, req->buf, length);
- /* ensure zero termination */
- string_dest[length] = 0;
- spin_unlock_irqrestore(&dev->lock, flags);
- } else {
+ default:
pr_err("unknown accessory string index %d\n",
- dev->string_index);
+ dev->string_index);
+ return;
}
+
+ if (!length) {
+ pr_debug("zero length for accessory string index %d\n",
+ dev->string_index);
+ return;
+ }
+
+ if (length >= ACC_STRING_SIZE)
+ length = ACC_STRING_SIZE - 1;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memcpy(string_dest, req->buf, length);
+ /* ensure zero termination */
+ string_dest[length] = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 51ab794..1590927 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -1029,8 +1029,14 @@
config_group_init_type_name(&fi_audio->func_inst.group, "",
&audio_source_func_type);
- snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ if (!count) {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+ "f_audio_source");
+ count++;
+ } else {
+ snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
"f_audio_source%d", count++);
+ }
dev = create_function_device(device_name);
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
index 1801a6c..7817f1c 100644
--- a/drivers/usb/gadget/function/f_ccid.c
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -26,7 +26,7 @@
#include "f_ccid.h"
#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
-#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
+#define BULK_OUT_BUFFER_SIZE 1024
#define CTRL_BUF_SIZE 4
#define FUNCTION_NAME "ccid"
#define MAX_INST_NAME_LEN 40
@@ -629,14 +629,14 @@
struct f_ccid *ccid_dev = fp->private_data;
struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
struct usb_request *req;
- int r = count, xfer;
+ int r = count, xfer, len;
int ret;
unsigned long flags;
pr_debug("ccid_bulk_read(%zu)\n", count);
if (count > BULK_OUT_BUFFER_SIZE) {
- pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n",
__func__, BULK_OUT_BUFFER_SIZE, count);
return -ENOMEM;
}
@@ -647,6 +647,7 @@
goto done;
}
+ len = ALIGN(count, ccid_dev->out->maxpacket);
requeue_req:
spin_lock_irqsave(&ccid_dev->lock, flags);
if (!atomic_read(&ccid_dev->online)) {
@@ -655,7 +656,7 @@
}
/* queue a request */
req = bulk_dev->rx_req;
- req->length = count;
+ req->length = len;
bulk_dev->rx_done = 0;
spin_unlock_irqrestore(&ccid_dev->lock, flags);
ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
@@ -688,6 +689,9 @@
spin_unlock_irqrestore(&ccid_dev->lock, flags);
goto requeue_req;
}
+ if (req->actual > count)
+ pr_err("%s More data received(%d) than required(%zu)\n",
+ __func__, req->actual, count);
xfer = (req->actual < count) ? req->actual : count;
atomic_set(&bulk_dev->rx_req_busy, 1);
spin_unlock_irqrestore(&ccid_dev->lock, flags);
@@ -875,7 +879,8 @@
count = CTRL_BUF_SIZE;
ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
- ctrl_dev->tx_ctrl_done);
+ ctrl_dev->tx_ctrl_done ||
+ !atomic_read(&ccid_dev->online));
if (ret < 0)
return ret;
ctrl_dev->tx_ctrl_done = 0;
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 920c08a..5804840 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2013-2017, The Linux Foundation. All rights reserved.
* Linux Foundation chooses to take subject only to the GPLv2 license terms,
* and distributes only under these terms.
*
@@ -826,8 +826,10 @@
opts = container_of(fi, struct f_cdev_opts, func_inst);
- device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
- cdev_del(&opts->port->fcdev_cdev);
+ if (opts->port) {
+ device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
+ cdev_del(&opts->port->fcdev_cdev);
+ }
usb_cser_chardev_deinit();
kfree(opts->func_name);
kfree(opts->port);
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index ea17164..ca8ed69 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -137,6 +137,7 @@
} perf[MAX_ITERATION];
unsigned int dbg_read_index;
unsigned int dbg_write_index;
+ struct mutex read_mutex;
};
static struct usb_interface_descriptor mtp_interface_desc = {
@@ -626,11 +627,18 @@
dev->state = STATE_BUSY;
spin_unlock_irq(&dev->lock);
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ goto done;
+ }
requeue_req:
/* queue a request */
req = dev->rx_req[0];
req->length = len;
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
@@ -656,6 +664,7 @@
usb_ep_dequeue(dev->ep_out, req);
goto done;
}
+ mutex_lock(&dev->read_mutex);
if (dev->state == STATE_BUSY) {
/* If we got a 0-len packet, throw it back and try again. */
if (req->actual == 0)
@@ -669,6 +678,7 @@
} else
r = -EIO;
+ mutex_unlock(&dev->read_mutex);
done:
spin_lock_irq(&dev->lock);
if (dev->state == STATE_CANCELED)
@@ -920,6 +930,12 @@
while (count > 0 || write_req) {
if (count > 0) {
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
/* queue a request */
read_req = dev->rx_req[cur_buf];
cur_buf = (cur_buf + 1) % RX_REQ_MAX;
@@ -928,6 +944,7 @@
read_req->length = mtp_rx_req_len;
dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
if (ret < 0) {
r = -EIO;
@@ -940,15 +957,23 @@
if (write_req) {
DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
start_time = ktime_get();
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
ret = vfs_write(filp, write_req->buf, write_req->actual,
&offset);
DBG(cdev, "vfs_write %d\n", ret);
if (ret != write_req->actual) {
r = -EIO;
+ mutex_unlock(&dev->read_mutex);
if (dev->state != STATE_OFFLINE)
dev->state = STATE_ERROR;
break;
}
+ mutex_unlock(&dev->read_mutex);
dev->perf[dev->dbg_write_index].vfs_wtime =
ktime_to_us(ktime_sub(ktime_get(), start_time));
dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
@@ -976,6 +1001,13 @@
r = read_req->status;
break;
}
+
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
/* Check if we aligned the size due to MTU constraint */
if (count < read_req->length)
read_req->actual = (read_req->actual > count ?
@@ -996,6 +1028,7 @@
write_req = read_req;
read_req = NULL;
+ mutex_unlock(&dev->read_mutex);
}
}
@@ -1446,12 +1479,14 @@
int i;
mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+ mutex_lock(&dev->read_mutex);
while ((req = mtp_req_get(dev, &dev->tx_idle)))
mtp_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
mtp_request_free(dev->rx_req[i], dev->ep_out);
while ((req = mtp_req_get(dev, &dev->intr_idle)))
mtp_request_free(req, dev->ep_intr);
+ mutex_unlock(&dev->read_mutex);
dev->state = STATE_OFFLINE;
kfree(f->os_desc_table);
f->os_desc_n = 0;
@@ -1545,7 +1580,7 @@
}
seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
- min, max, sum / iteration);
+ min, max, (iteration ? (sum / iteration) : 0));
min = max = sum = iteration = 0;
seq_puts(s, "\n=======================\n");
seq_puts(s, "USB MTP IN related VFS read stats:\n");
@@ -1567,7 +1602,7 @@
}
seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
- min, max, sum / iteration);
+ min, max, (iteration ? (sum / iteration) : 0));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
@@ -1791,6 +1826,8 @@
usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
descs, names, THIS_MODULE);
+ mutex_init(&fi_mtp->dev->read_mutex);
+
return &fi_mtp->func_inst;
}
EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 1468d8f..f959c42 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@
/* closing ep0 === shutdown all */
- if (dev->gadget_registered)
+ if (dev->gadget_registered) {
usb_gadget_unregister_driver (&gadgetfs_driver);
+ dev->gadget_registered = false;
+ }
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
- spin_lock (&dev->lock);
+ spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@
default:
break;
}
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4fa5de2..94c8a9f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
- spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
- spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
- if (dum_hcd->old_active && dum->driver->suspend) {
- spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- } else if (!dum_hcd->old_active && dum->driver->resume) {
- spin_unlock(&dum->lock);
+ else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
- spin_lock(&dum->lock);
- }
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -983,7 +976,9 @@
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
+ spin_lock_irq(&dum->lock);
dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -2009,7 +2004,7 @@
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
- desc->u.ss.DeviceRemovable = 0xffff;
+ desc->u.ss.DeviceRemovable = 0;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2021,8 +2016,8 @@
HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
}
static int dummy_hub_control(
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c..33f3987 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2469,11 +2469,8 @@
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
+ if (driver)
driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
usb_reinit(dev);
}
@@ -3347,8 +3344,6 @@
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3389,14 +3384,12 @@
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
- spin_lock(&dev->lock);
return;
}
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fb8fc34..ba78e3f 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1401,7 +1401,13 @@
struct renesas_usb3_request *usb3_req,
int status)
{
- usb3_pn_stop(usb3);
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ if (usb3_pn_change(usb3, usb3_ep->num))
+ usb3_pn_stop(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+
usb3_disable_pipe_irq(usb3, usb3_ep->num);
usb3_request_done(usb3_ep, usb3_req, status);
@@ -1430,30 +1436,46 @@
{
struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+ bool done = false;
if (!usb3_req)
return;
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num))
+ goto out;
+
if (usb3_ep->dir_in) {
/* Do not stop the IN pipe here to detect LSTTR interrupt */
if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
} else {
if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
- usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+ done = true;
}
+
+out:
+ /* need to unlock because usb3_request_done_pipen() locks it */
+ spin_unlock(&usb3->lock);
+
+ if (done)
+ usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
}
static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
{
u32 pn_int_sta;
- if (usb3_pn_change(usb3, num) < 0)
+ spin_lock(&usb3->lock);
+ if (usb3_pn_change(usb3, num) < 0) {
+ spin_unlock(&usb3->lock);
return;
+ }
pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+ spin_unlock(&usb3->lock);
if (pn_int_sta & PN_INT_LSTTR)
usb3_irq_epc_pipen_lsttr(usb3, num);
if (pn_int_sta & PN_INT_BFRDY)
@@ -1707,6 +1729,9 @@
/* hook up the driver */
usb3->driver = driver;
+ pm_runtime_enable(usb3_to_dev(usb3));
+ pm_runtime_get_sync(usb3_to_dev(usb3));
+
renesas_usb3_init_controller(usb3);
return 0;
@@ -1715,14 +1740,14 @@
static int renesas_usb3_stop(struct usb_gadget *gadget)
{
struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
- unsigned long flags;
- spin_lock_irqsave(&usb3->lock, flags);
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
renesas_usb3_stop_controller(usb3);
- spin_unlock_irqrestore(&usb3->lock, flags);
+
+ pm_runtime_put(usb3_to_dev(usb3));
+ pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -1761,9 +1786,6 @@
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
usb_del_gadget_udc(&usb3->gadget);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -1948,9 +1970,6 @@
usb3->workaround_for_vbus = priv->workaround_for_vbus;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
dev_info(&pdev->dev, "probed\n");
return 0;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c..c8989c6 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
#define USB_INTEL_USB3_PSSEN 0xD8
#define USB_INTEL_USB3PRM 0xDC
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG 0xF8
+#define ASMT_DATA_WRITE1_REG 0xFC
+#define ASMT_CONTROL_REG 0xE0
+#define ASMT_CONTROL_WRITE_BIT 0x02
+#define ASMT_WRITEREG_CMD 0x10423
+#define ASMT_FLOWCTL_ADDR 0xFA30
+#define ASMT_FLOWCTL_DATA 0xBA
+#define ASMT_PSEUDO_DATA 0
+
/*
* amd_chipset_gen values represent AMD different chipset generations
*/
@@ -412,6 +422,50 @@
}
EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+ unsigned long retry_count;
+ unsigned char value;
+
+ for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+ pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+ if (value == 0xff) {
+ dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+ return -EIO;
+ }
+
+ if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+ return 0;
+
+ usleep_range(40, 60);
+ }
+
+ dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+ return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send command and address to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+ if (usb_asmedia_wait_write(pdev) != 0)
+ return;
+
+ /* send data to device */
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+ pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+ pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
void usb_amd_quirk_pll_enable(void)
{
usb_amd_quirk_pll(0);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index c622ddf..6463fdb 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,6 +11,7 @@
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
@@ -18,6 +19,7 @@
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
static inline void usb_amd_dev_put(void) {}
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bfa7fa3..7bf78be 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@
time = 30;
break;
default:
- time = 300;
+ time = 50;
break;
}
@@ -1785,6 +1785,7 @@
pipe = td->pipe;
pipe_stop(r8a66597, pipe);
+ /* Select a different address or endpoint */
new_td = td;
do {
list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@
new_td = td;
break;
}
- } while (td != new_td && td->address == new_td->address);
+ } while (td != new_td && td->address == new_td->address &&
+ td->pipe->info.epnum == new_td->pipe->info.epnum);
start_transfer(r8a66597, new_td);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 32aa45e..e9a8c3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -798,6 +798,9 @@
clear_bit(wIndex, &bus_state->resuming_ports);
set_bit(wIndex, &bus_state->rexit_ports);
+
+ xhci_test_and_clear_bit(xhci, port_array, wIndex,
+ PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1535,6 +1538,9 @@
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
+ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+ (hcd->speed < HCD_USB3))
+ t2 &= ~PORT_WAKE_BITS;
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d680eb3..c99121a6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2230,11 +2230,12 @@
{
u32 temp, port_offset, port_count;
int i;
- u8 major_revision;
+ u8 major_revision, minor_revision;
struct xhci_hub *rhub;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
+ minor_revision = XHCI_EXT_PORT_MINOR(temp);
if (major_revision == 0x03) {
rhub = &xhci->usb3_rhub;
@@ -2248,7 +2249,9 @@
return;
}
rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
- rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+ if (rhub->min_rev < minor_revision)
+ rhub->min_rev = minor_revision;
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e7d6752..2383344 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,13 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -135,6 +142,13 @@
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+ xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
@@ -201,6 +215,13 @@
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x1142)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+ xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5d434e0..e185bbe 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -860,13 +860,16 @@
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
- for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
+ ring = ep->stream_info->stream_rings[stream_id];
+ if (!ring)
+ continue;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
- slot_id, ep_index, stream_id + 1);
- xhci_kill_ring_urbs(xhci,
- ep->stream_info->stream_rings[stream_id]);
+ slot_id, ep_index, stream_id);
+ xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec9ff3e..15bf308 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -201,6 +201,9 @@
if (ret)
return ret;
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wait for controller to be ready for doorbell rings");
/*
@@ -1134,6 +1137,9 @@
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
compliance_mode_recovery_timer_init(xhci);
+ if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+ usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 86d578e..757d045 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1668,6 +1668,8 @@
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+#define XHCI_U2_DISABLE_WAKE (1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 358feca..261ed2c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1909,6 +1909,14 @@
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work.work);
+ int error;
+
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0) {
+ dev_err(musb->controller, "Could not enable: %i\n", error);
+
+ return;
+ }
musb_pm_runtime_check_session(musb);
@@ -1916,6 +1924,9 @@
musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
}
static void musb_recover_from_babble(struct musb *musb)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9f125e1..39666fb 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -213,6 +213,12 @@
msecs_to_jiffies(wrp->poll_timeout));
break;
case OTG_STATE_A_WAIT_BCON:
+ /* keep VBUS on for host-only mode */
+ if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(wrp->poll_timeout));
+ break;
+ }
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
/* fall */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 5643613..dca4811 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -182,7 +182,7 @@
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
-#define SWAP_SOURCE_START_TIME 20
+#define FIRST_SOURCE_CAP_TIME 200
#define VDM_BUSY_TIME 50
#define VCONN_ON_TIME 100
@@ -377,6 +377,8 @@
struct list_head svid_handlers;
struct list_head instance;
+
+ u16 ss_lane_svid;
};
static LIST_HEAD(_usbpd); /* useful for debugging */
@@ -445,6 +447,47 @@
extcon_set_state_sync(pd->extcon, EXTCON_USB, 1);
}
+/**
+ * This API allows client driver to request for releasing SS lanes. It should
+ * not be called from atomic context.
+ *
+ * @pd - USBPD handler
+ * @hdlr - client's handler
+ *
+ * @returns int - Success - 0, else negative error code
+ */
+static int usbpd_release_ss_lane(struct usbpd *pd,
+ struct usbpd_svid_handler *hdlr)
+{
+ int ret = 0;
+
+ if (!hdlr || !pd)
+ return -EINVAL;
+
+ usbpd_dbg(&pd->dev, "hdlr:%pK svid:%d", hdlr, hdlr->svid);
+ /*
+ * If USB SS lanes are already used by one client, and other client is
+ * requesting for same or same client requesting again, return -EBUSY.
+ */
+ if (pd->ss_lane_svid) {
+ usbpd_dbg(&pd->dev, "-EBUSY: ss_lanes are already used by(%d)",
+ pd->ss_lane_svid);
+ ret = -EBUSY;
+ goto err_exit;
+ }
+
+ ret = extcon_blocking_sync(pd->extcon, EXTCON_USB_HOST, 0);
+ if (ret) {
+ usbpd_err(&pd->dev, "err(%d) for releasing ss lane", ret);
+ goto err_exit;
+ }
+
+ pd->ss_lane_svid = hdlr->svid;
+
+err_exit:
+ return ret;
+}
+
static int set_power_role(struct usbpd *pd, enum power_role pr)
{
union power_supply_propval val = {0};
@@ -763,10 +806,8 @@
case PE_SRC_STARTUP:
if (pd->current_dr == DR_NONE) {
pd->current_dr = DR_DFP;
- /*
- * Defer starting USB host mode until PE_SRC_READY or
- * when PE_SRC_SEND_CAPABILITIES fails
- */
+ start_usb_host(pd, true);
+ pd->ss_lane_svid = 0x0;
}
dual_role_instance_changed(pd->dual_role);
@@ -799,17 +840,27 @@
pd->pd_phy_opened = true;
}
- pd->current_state = PE_SRC_SEND_CAPABILITIES;
if (pd->in_pr_swap) {
- kick_sm(pd, SWAP_SOURCE_START_TIME);
pd->in_pr_swap = false;
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PR_SWAP, &val);
- break;
}
- /* fall-through */
+ /*
+ * A sink might remove its terminations (during some Type-C
+ * compliance tests or a sink attempting to do Try.SRC)
+ * at this point just after we enabled VBUS. Sending PD
+ * messages now would delay detecting the detach beyond the
+ * required timing. Instead, delay sending out the first
+ * source capabilities to allow for the other side to
+ * completely settle CC debounce and allow HW to detect detach
+ * sooner in the meantime. PD spec allows up to
+ * tFirstSourceCap (250ms).
+ */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+ break;
case PE_SRC_SEND_CAPABILITIES:
kick_sm(pd, 0);
@@ -919,6 +970,7 @@
if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_FLOAT ||
usb_compliance_mode)
start_usb_peripheral(pd);
}
@@ -1076,6 +1128,7 @@
usbpd_dbg(&pd->dev, "registered handler for SVID 0x%04x\n", hdlr->svid);
list_add_tail(&hdlr->entry, &pd->svid_handlers);
+ hdlr->request_usb_ss_lane = usbpd_release_ss_lane;
/* already connected with this SVID discovered? */
if (pd->vdm_state >= DISCOVERED_SVIDS) {
@@ -1302,14 +1355,6 @@
if (svid == 0xFF01)
has_dp = true;
}
-
- /*
- * Finally start USB host now that we have determined
- * if DisplayPort mode is present or not and limit USB
- * to HS-only mode if so.
- */
- start_usb_host(pd, !has_dp);
-
break;
default:
@@ -1326,7 +1371,6 @@
switch (cmd) {
case USBPD_SVDM_DISCOVER_IDENTITY:
case USBPD_SVDM_DISCOVER_SVIDS:
- start_usb_host(pd, true);
break;
default:
break;
@@ -1434,6 +1478,7 @@
static void dr_swap(struct usbpd *pd)
{
reset_vdm_state(pd);
+ usbpd_dbg(&pd->dev, "dr_swap: current_dr(%d)\n", pd->current_dr);
if (pd->current_dr == DR_DFP) {
stop_usb_host(pd);
@@ -1441,9 +1486,9 @@
pd->current_dr = DR_UFP;
} else if (pd->current_dr == DR_UFP) {
stop_usb_peripheral(pd);
+ start_usb_host(pd, true);
pd->current_dr = DR_DFP;
- /* don't start USB host until after SVDM discovery */
usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
}
@@ -1616,7 +1661,6 @@
else if (pd->current_dr == DR_DFP)
stop_usb_host(pd);
- pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
if (pd->current_state == PE_ERROR_RECOVERY)
@@ -1723,11 +1767,7 @@
ARRAY_SIZE(default_src_caps), SOP_MSG);
if (ret) {
pd->caps_count++;
-
- if (pd->caps_count == 10 && pd->current_dr == DR_DFP) {
- /* Likely not PD-capable, start host now */
- start_usb_host(pd, true);
- } else if (pd->caps_count >= PD_CAPS_COUNT) {
+ if (pd->caps_count >= PD_CAPS_COUNT) {
usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
usbpd_set_state(pd, PE_SRC_DISABLED);
@@ -2474,6 +2514,16 @@
if (pd->current_pr == PR_SINK)
return 0;
+ /*
+ * Unexpected if not in PR swap; need to force disconnect from
+ * source so we can turn off VBUS, Vconn, PD PHY etc.
+ */
+ if (pd->current_pr == PR_SRC) {
+ usbpd_info(&pd->dev, "Forcing disconnect from source mode\n");
+ pd->current_pr = PR_NONE;
+ break;
+ }
+
pd->current_pr = PR_SINK;
break;
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 1210188e..37d904f 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -27,29 +27,23 @@
#include <linux/usb/phy.h>
#include <linux/reset.h>
-#define QUSB2PHY_PWR_CTRL1 0x210
+/* QUSB2PHY_PWR_CTRL1 register related bits */
#define PWR_CTRL1_POWR_DOWN BIT(0)
-#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0
+/* QUSB2PHY_PLL_COMMON_STATUS_ONE register related bits */
#define CORE_READY_STATUS BIT(0)
/* Get TUNE value from efuse bit-mask */
#define TUNE_VAL_MASK(val, pos, mask) ((val >> pos) & mask)
-#define QUSB2PHY_INTR_CTRL 0x22C
+/* QUSB2PHY_INTR_CTRL register related bits */
#define DMSE_INTR_HIGH_SEL BIT(4)
#define DPSE_INTR_HIGH_SEL BIT(3)
#define CHG_DET_INTR_EN BIT(2)
#define DMSE_INTR_EN BIT(1)
#define DPSE_INTR_EN BIT(0)
-#define QUSB2PHY_INTR_STAT 0x230
-#define DMSE_INTERRUPT BIT(1)
-#define DPSE_INTERRUPT BIT(0)
-
-#define QUSB2PHY_PORT_TUNE1 0x23c
-
-#define QUSB2PHY_PLL_CORE_INPUT_OVERRIDE 0x0a8
+/* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE register related bits */
#define CORE_PLL_RATE BIT(0)
#define CORE_PLL_RATE_MUX BIT(1)
#define CORE_PLL_EN BIT(2)
@@ -73,8 +67,19 @@
module_param(phy_tune1, uint, 0644);
MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
+enum qusb_phy_reg {
+ PORT_TUNE1,
+ PLL_COMMON_STATUS_ONE,
+ PWR_CTRL1,
+ INTR_CTRL,
+ PLL_CORE_INPUT_OVERRIDE,
+ TEST1,
+ USB2_PHY_REG_MAX,
+};
+
struct qusb_phy {
struct usb_phy phy;
+ struct mutex lock;
void __iomem *base;
void __iomem *efuse_reg;
@@ -92,16 +97,18 @@
int host_init_seq_len;
int *qusb_phy_host_init_seq;
+ unsigned int *phy_reg;
+ int qusb_phy_reg_offset_cnt;
+
u32 tune_val;
- u32 phy_auto_resume_offset;
int efuse_bit_pos;
int efuse_num_of_bits;
- bool power_enabled;
+ int power_enabled_ref;
bool clocks_enabled;
bool cable_connected;
bool suspended;
- bool rm_pulldown;
+ bool dpdm_enable;
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
@@ -165,35 +172,47 @@
return ret;
}
-static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
- bool toggle_vdd)
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
{
int ret = 0;
- dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
- __func__, on ? "on" : "off", qphy->power_enabled);
+ mutex_lock(&qphy->lock);
- if (toggle_vdd && qphy->power_enabled == on) {
- dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
- return 0;
+ dev_dbg(qphy->phy.dev,
+ "%s:req to turn %s regulators. power_enabled_ref:%d\n",
+ __func__, on ? "on" : "off", qphy->power_enabled_ref);
+
+ if (on && ++qphy->power_enabled_ref > 1) {
+ dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+ goto done;
}
- if (!on)
- goto disable_vdda33;
-
- if (toggle_vdd) {
- ret = qusb_phy_config_vdd(qphy, true);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
- ret);
- goto err_vdd;
+ if (!on) {
+ if (on == qphy->power_enabled_ref) {
+ dev_dbg(qphy->phy.dev,
+ "PHYs' regulators are already off\n");
+ goto done;
}
- ret = regulator_enable(qphy->vdd);
- if (ret) {
- dev_err(qphy->phy.dev, "Unable to enable VDD\n");
- goto unconfig_vdd;
- }
+ qphy->power_enabled_ref--;
+ if (!qphy->power_enabled_ref)
+ goto disable_vdda33;
+
+ dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+ goto done;
+ }
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
}
ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
@@ -236,10 +255,9 @@
goto unset_vdd33;
}
- if (toggle_vdd)
- qphy->power_enabled = true;
-
pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+ mutex_unlock(&qphy->lock);
return ret;
disable_vdda33:
@@ -275,22 +293,24 @@
dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
disable_vdd:
- if (toggle_vdd) {
- ret = regulator_disable(qphy->vdd);
- if (ret)
- dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
- ret);
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
unconfig_vdd:
- ret = qusb_phy_config_vdd(qphy, false);
- if (ret)
- dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
- ret);
- }
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
err_vdd:
- if (toggle_vdd)
- qphy->power_enabled = false;
dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+ /* in case of error in turning on regulators */
+ if (qphy->power_enabled_ref)
+ qphy->power_enabled_ref--;
+done:
+ mutex_unlock(&qphy->lock);
return ret;
}
@@ -316,7 +336,7 @@
qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
qphy->efuse_bit_pos, bit_mask);
- reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+ reg = readb_relaxed(qphy->base + qphy->phy_reg[PORT_TUNE1]);
if (qphy->tune_val) {
reg = reg & 0x0f;
reg |= (qphy->tune_val << 4);
@@ -372,7 +392,7 @@
/* Require to get phy pll lock successfully */
usleep_range(150, 160);
- reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]);
dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
if (!(reg & CORE_READY_STATUS)) {
dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
@@ -388,7 +408,7 @@
dev_dbg(phy->dev, "%s\n", __func__);
- ret = qusb_phy_enable_power(qphy, true, true);
+ ret = qusb_phy_enable_power(qphy, true);
if (ret)
return ret;
@@ -421,9 +441,9 @@
}
/* Disable the PHY */
- writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) |
PWR_CTRL1_POWR_DOWN,
- qphy->base + QUSB2PHY_PWR_CTRL1);
+ qphy->base + qphy->phy_reg[PWR_CTRL1]);
if (qphy->qusb_phy_init_seq)
qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
@@ -435,7 +455,7 @@
pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
qphy->tune_val);
writel_relaxed(qphy->tune_val,
- qphy->base + QUSB2PHY_PORT_TUNE1);
+ qphy->base + qphy->phy_reg[PORT_TUNE1]);
}
/* If phy_tune1 modparam set, override tune1 value */
@@ -443,16 +463,16 @@
pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
__func__, phy_tune1);
writel_relaxed(phy_tune1,
- qphy->base + QUSB2PHY_PORT_TUNE1);
+ qphy->base + qphy->phy_reg[PORT_TUNE1]);
}
/* ensure above writes are completed before re-enabling PHY */
wmb();
/* Enable the PHY */
- writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+ writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) &
~PWR_CTRL1_POWR_DOWN,
- qphy->base + QUSB2PHY_PWR_CTRL1);
+ qphy->base + qphy->phy_reg[PWR_CTRL1]);
/* Ensure above write is completed before turning ON ref clk */
wmb();
@@ -460,7 +480,7 @@
/* Require to get phy pll lock successfully */
usleep_range(150, 160);
- reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ reg = readb_relaxed(qphy->base + qphy->phy_reg[PLL_COMMON_STATUS_ONE]);
dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
if (!(reg & CORE_READY_STATUS)) {
dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
@@ -478,9 +498,9 @@
qusb_phy_enable_clocks(qphy, true);
/* Disable the PHY */
- writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ writel_relaxed(readl_relaxed(qphy->base + qphy->phy_reg[PWR_CTRL1]) |
PWR_CTRL1_POWR_DOWN,
- qphy->base + QUSB2PHY_PWR_CTRL1);
+ qphy->base + qphy->phy_reg[PWR_CTRL1]);
/* Makes sure that above write goes through */
wmb();
@@ -525,7 +545,7 @@
(qphy->phy.flags & PHY_HOST_MODE)) {
/* Disable all interrupts */
writel_relaxed(0x00,
- qphy->base + QUSB2PHY_INTR_CTRL);
+ qphy->base + qphy->phy_reg[INTR_CTRL]);
linestate = qusb_phy_get_linestate(qphy);
/*
@@ -537,29 +557,27 @@
* e.g. if currently D+ high, D- low (HS 'J'/Suspend),
* configure the mask to trigger on D+ low OR D- high
*/
- intr_mask = DMSE_INTERRUPT | DPSE_INTERRUPT;
+ intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
if (!(linestate & LINESTATE_DP)) /* D+ low */
intr_mask |= DPSE_INTR_HIGH_SEL;
if (!(linestate & LINESTATE_DM)) /* D- low */
intr_mask |= DMSE_INTR_HIGH_SEL;
writel_relaxed(intr_mask,
- qphy->base + QUSB2PHY_INTR_CTRL);
+ qphy->base + qphy->phy_reg[INTR_CTRL]);
/* hold core PLL into reset */
writel_relaxed(CORE_PLL_EN_FROM_RESET |
CORE_RESET | CORE_RESET_MUX,
- qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+ qphy->base +
+ qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]);
- if (qphy->phy_auto_resume_offset) {
- /* enable phy auto-resume */
- writel_relaxed(0x91,
- qphy->base + qphy->phy_auto_resume_offset);
- /* flush the previous write before next write */
- wmb();
- writel_relaxed(0x90,
- qphy->base + qphy->phy_auto_resume_offset);
- }
+ /* enable phy auto-resume */
+ writel_relaxed(0x91, qphy->base + qphy->phy_reg[TEST1]);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x90, qphy->base + qphy->phy_reg[TEST1]);
+
dev_dbg(phy->dev, "%s: intr_mask = %x\n",
__func__, intr_mask);
@@ -569,10 +587,10 @@
} else { /* Cable disconnect case */
/* Disable all interrupts */
writel_relaxed(0x00,
- qphy->base + QUSB2PHY_INTR_CTRL);
+ qphy->base + qphy->phy_reg[INTR_CTRL]);
qusb_phy_reset(qphy);
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
}
qphy->suspended = true;
} else {
@@ -582,16 +600,15 @@
qusb_phy_enable_clocks(qphy, true);
/* Clear all interrupts on resume */
writel_relaxed(0x00,
- qphy->base + QUSB2PHY_INTR_CTRL);
+ qphy->base + qphy->phy_reg[INTR_CTRL]);
/* bring core PLL out of reset */
- writel_relaxed(CORE_PLL_EN_FROM_RESET,
- qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+ writel_relaxed(CORE_PLL_EN_FROM_RESET, qphy->base +
+ qphy->phy_reg[PLL_CORE_INPUT_OVERRIDE]);
/* Makes sure that above write goes through */
wmb();
} else { /* Cable connect case */
- qusb_phy_enable_power(qphy, true, true);
qusb_phy_enable_clocks(qphy, true);
}
qphy->suspended = false;
@@ -632,15 +649,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, true, false);
- if (ret >= 0) {
- qphy->rm_pulldown = true;
- dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (!qphy->dpdm_enable) {
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = true;
}
return ret;
@@ -651,15 +670,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (!qphy->rm_pulldown) {
- ret = qusb_phy_enable_power(qphy, false, false);
- if (ret >= 0) {
- qphy->rm_pulldown = false;
- dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (qphy->dpdm_enable) {
+ ret = qusb_phy_enable_power(qphy, false);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = false;
}
return ret;
@@ -669,9 +690,9 @@
{
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
- qphy->rm_pulldown);
- return qphy->rm_pulldown;
+ dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+ qphy->dpdm_enable);
+ return qphy->dpdm_enable;
}
static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
@@ -870,6 +891,31 @@
}
size = 0;
+ of_get_property(dev->of_node, "qcom,qusb-phy-reg-offset", &size);
+ if (size) {
+ qphy->phy_reg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (qphy->phy_reg) {
+ qphy->qusb_phy_reg_offset_cnt =
+ size / sizeof(*qphy->phy_reg);
+ if (qphy->qusb_phy_reg_offset_cnt > USB2_PHY_REG_MAX) {
+ dev_err(dev, "invalid reg offset count\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qusb-phy-reg-offset",
+ qphy->phy_reg,
+ qphy->qusb_phy_reg_offset_cnt);
+ } else {
+ dev_err(dev, "err mem alloc for qusb_phy_reg_offset\n");
+ return -ENOMEM;
+ }
+ } else {
+ dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n");
+ return -EINVAL;
+ }
+
+ size = 0;
of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
if (size) {
qphy->qusb_phy_init_seq = devm_kzalloc(dev,
@@ -917,12 +963,6 @@
return ret;
}
- ret = of_property_read_u32(dev->of_node, "qcom,phy-auto-resume-offset",
- &qphy->phy_auto_resume_offset);
- if (ret)
- dev_dbg(dev, "error reading qcom,phy-auto-resume-offset %d\n",
- ret);
-
qphy->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vdd)) {
dev_err(dev, "unable to get vdd supply\n");
@@ -941,6 +981,7 @@
return PTR_ERR(qphy->vdda18);
}
+ mutex_init(&qphy->lock);
platform_set_drvdata(pdev, qphy);
qphy->phy.label = "msm-qusb-phy-v2";
@@ -968,7 +1009,7 @@
usb_remove_phy(&qphy->phy);
qusb_phy_enable_clocks(qphy, false);
- qusb_phy_enable_power(qphy, false, true);
+ qusb_phy_enable_power(qphy, false);
return 0;
}
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 76b034e..e355e35 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -130,7 +130,7 @@
bool cable_connected;
bool suspended;
bool ulpi_mode;
- bool rm_pulldown;
+ bool dpdm_enable;
bool is_se_clk;
struct regulator_desc dpdm_rdesc;
@@ -673,15 +673,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (qphy->rm_pulldown) {
+ if (!qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, true, false);
- if (ret >= 0) {
- qphy->rm_pulldown = true;
- dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = true;
}
return ret;
@@ -692,15 +694,17 @@
int ret = 0;
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s\n", __func__);
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
- if (!qphy->rm_pulldown) {
+ if (qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, false, false);
- if (ret >= 0) {
- qphy->rm_pulldown = false;
- dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
- qphy->rm_pulldown);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ return ret;
}
+ qphy->dpdm_enable = false;
}
return ret;
@@ -710,9 +714,9 @@
{
struct qusb_phy *qphy = rdev_get_drvdata(rdev);
- dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
- qphy->rm_pulldown);
- return qphy->rm_pulldown;
+ dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+ qphy->dpdm_enable);
+ return qphy->dpdm_enable;
}
static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 012a37a..7994208 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
usbhs_platform_call(priv, phy_reset, pdev);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a61..93fba90 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
+ spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
@@ -636,10 +637,16 @@
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ int ret = 0;
- if (!pipe)
- return -EINVAL;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
+ ret = -EINVAL;
+ goto out;
+ }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -647,6 +654,9 @@
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
+out:
+ spin_unlock_irqrestore(&uep->lock, flags);
+
return 0;
}
@@ -696,8 +706,11 @@
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
@@ -706,6 +719,7 @@
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+ spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
@@ -852,10 +866,10 @@
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
- int ret = 0;
+ int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
@@ -887,7 +901,9 @@
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_ep_disable(&dcp->ep);
+ /* disable all eps */
+ usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+ usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
@@ -1069,6 +1085,7 @@
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ spin_lock_init(&uep->lock);
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8bb4875..84b444f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -135,6 +135,7 @@
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3bf61ac..ebe51f11 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1877,6 +1877,10 @@
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index fd509ed6c..652b433 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -158,6 +158,7 @@
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005..6a7720e 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@
/* Make sure driver was initialized */
- if (us->extra == NULL)
+ if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
+ srb->result = DID_ERROR << 16;
+ return;
+ }
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 44ab43f..af10f7b 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -262,7 +262,11 @@
kmem_cache_free(stub_priv_cache, priv);
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
usb_free_urb(urb);
}
}
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 6b1e8c3..be50cef 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -28,7 +28,11 @@
struct urb *urb = priv->urb;
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
list_del(&priv->list);
kmem_cache_free(stub_priv_cache, priv);
usb_free_urb(urb);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf2..d6dc165 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -235,14 +235,19 @@
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
+ int width;
+
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
- desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+
desc->bNbrPorts = VHCI_HC_PORTS;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
+ width = desc->bNbrPorts / 8 + 1;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
+ memset(&desc->u.hs.DeviceRemovable[0], 0, width);
+ memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index d1d70e0..881fc3a 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -419,6 +419,34 @@
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
}
+struct vfio_group_put_work {
+ struct work_struct work;
+ struct vfio_group *group;
+};
+
+static void vfio_group_put_bg(struct work_struct *work)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = container_of(work, struct vfio_group_put_work, work);
+
+ vfio_group_put(do_work->group);
+ kfree(do_work);
+}
+
+static void vfio_group_schedule_put(struct vfio_group *group)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
+ if (WARN_ON(!do_work))
+ return;
+
+ INIT_WORK(&do_work->work, vfio_group_put_bg);
+ do_work->group = group;
+ schedule_work(&do_work->work);
+}
+
/* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group)
{
@@ -743,7 +771,14 @@
break;
}
- vfio_group_put(group);
+ /*
+ * If we're the last reference to the group, the group will be
+ * released, which includes unregistering the iommu group notifier.
+ * We hold a read-lock on that notifier list, unregistering needs
+ * a write-lock... deadlock. Release our reference asynchronously
+ * to avoid that situation.
+ */
+ vfio_group_schedule_put(group);
return NOTIFY_OK;
}
@@ -1716,6 +1751,15 @@
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+bool vfio_external_group_match_file(struct vfio_group *test_group,
+ struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ return (filep->f_op == &vfio_group_fops) && (group == test_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+
int vfio_external_user_iommu_id(struct vfio_group *group)
{
return iommu_group_id(group->iommu_group);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c882357..85d3e64 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1246,6 +1246,8 @@
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
+ long i, ret = 0;
+
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1254,7 +1256,27 @@
table_group->ops->take_ownership(table_group);
+ /* Set all windows to the new group */
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ struct iommu_table *tbl = container->tables[i];
+
+ if (!tbl)
+ continue;
+
+ ret = table_group->ops->set_window(table_group, i, tbl);
+ if (ret)
+ goto release_exit;
+ }
+
return 0;
+
+release_exit:
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+ table_group->ops->unset_window(table_group, i);
+
+ table_group->ops->release_ownership(table_group);
+
+ return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1270,6 +1292,10 @@
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a504e2e0..e3fad30 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -368,6 +368,7 @@
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -378,19 +379,20 @@
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -400,8 +402,11 @@
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
index 8061d8e..75b2f0b 100644
--- a/drivers/video/adf/adf_client.c
+++ b/drivers/video/adf/adf_client.c
@@ -305,8 +305,10 @@
}
done:
- if (ret < 0)
+ if (ret < 0) {
adf_buffer_mapping_cleanup(mapping, buf);
+ memset(mapping, 0, sizeof(*mapping));
+ }
return ret;
}
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index e0c9842..11a72bc 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@
if (!wdt)
return -ENOMEM;
+ spin_lock_init(&wdt->lock);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@
return ret;
}
- spin_lock_init(&wdt->lock);
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e7a3d6..679f79f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -409,9 +409,9 @@
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -567,13 +567,14 @@
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d6950e0..980f328 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -134,9 +134,7 @@
struct page *pages[VSCSI_MAX_GRANTS];
struct se_cmd se_cmd;
-};
-struct scsiback_tmr {
atomic_t tmr_complete;
wait_queue_head_t tmr_wait;
};
@@ -599,26 +597,20 @@
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_cmd *se_cmd = &pending_req->se_cmd;
- struct scsiback_tmr *tmr;
u64 unpacked_lun = pending_req->v2p->lun;
int rc, err = FAILED;
- tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
- if (!tmr) {
- target_put_sess_cmd(se_cmd);
- goto err;
- }
-
- init_waitqueue_head(&tmr->tmr_wait);
+ init_waitqueue_head(&pending_req->tmr_wait);
rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
&pending_req->sense_buffer[0],
- unpacked_lun, tmr, act, GFP_KERNEL,
+ unpacked_lun, NULL, act, GFP_KERNEL,
tag, TARGET_SCF_ACK_KREF);
if (rc)
goto err;
- wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
+ wait_event(pending_req->tmr_wait,
+ atomic_read(&pending_req->tmr_complete));
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
SUCCESS : FAILED;
@@ -626,9 +618,8 @@
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 1);
return;
+
err:
- if (tmr)
- kfree(tmr);
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
}
@@ -1389,12 +1380,6 @@
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
struct se_session *se_sess = se_cmd->se_sess;
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
-
- if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
- struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
- kfree(tmr);
- }
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
@@ -1455,11 +1440,11 @@
static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
- atomic_set(&tmr->tmr_complete, 1);
- wake_up(&tmr->tmr_wait);
+ atomic_set(&pending_req->tmr_complete, 1);
+ wake_up(&pending_req->tmr_wait);
}
static void scsiback_aborted_task(struct se_cmd *se_cmd)
diff --git a/fs/aio.c b/fs/aio.c
index 428484f..0fcb49a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@
* Tell lockdep we inherited freeze protection from submission
* thread.
*/
- __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
file_end_write(file);
}
@@ -1492,7 +1493,8 @@
* by telling it the lock got released so that it doesn't
* complain about held lock when we return to userspace.
*/
- __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
kfree(iovec);
return ret;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index fc09eb7..ffc69dd 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -345,7 +345,7 @@
int status;
token = (autofs_wqt_t) param->fail.token;
- status = param->fail.status ? param->fail.status : -ENOENT;
+ status = param->fail.status < 0 ? param->fail.status : -ENOENT;
return autofs4_wait_release(sbi, token, status);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 2472af2..1fdf4e5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -911,17 +911,60 @@
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
+ /*
+ * If we are loading ET_EXEC or we have already performed
+ * the ET_DYN load_addr calculations, proceed normally.
+ */
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
- /* Try and get dynamic programs out of the way of the
- * default mmap base, as well as whatever program they
- * might try to exec. This is because the brk will
- * follow the loader, and is not movable. */
- load_bias = ELF_ET_DYN_BASE - vaddr;
- if (current->flags & PF_RANDOMIZE)
- load_bias += arch_mmap_rnd();
- load_bias = ELF_PAGESTART(load_bias);
+ /*
+ * This logic is run once for the first LOAD Program
+ * Header for ET_DYN binaries to calculate the
+ * randomization (load_bias) for all the LOAD
+ * Program Headers, and to calculate the entire
+ * size of the ELF mapping (total_size). (Note that
+ * load_addr_set is set to true later once the
+ * initial mapping is performed.)
+ *
+ * There are effectively two types of ET_DYN
+ * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+ * and loaders (ET_DYN without INTERP, since they
+ * _are_ the ELF interpreter). The loaders must
+ * be loaded away from programs since the program
+ * may otherwise collide with the loader (especially
+ * for ET_EXEC which does not have a randomized
+ * position). For example to handle invocations of
+ * "./ld.so someprog" to test out a new version of
+ * the loader, the subsequent program that the
+ * loader loads must avoid the loader itself, so
+ * they cannot share the same load range. Sufficient
+ * room for the brk must be allocated with the
+ * loader as well, since brk must be available with
+ * the loader.
+ *
+ * Therefore, programs are loaded offset from
+ * ELF_ET_DYN_BASE and loaders are loaded into the
+ * independently randomized mmap region (0 load_bias
+ * without MAP_FIXED).
+ */
+ if (elf_interpreter) {
+ load_bias = ELF_ET_DYN_BASE;
+ if (current->flags & PF_RANDOMIZE)
+ load_bias += arch_mmap_rnd();
+ elf_flags |= MAP_FIXED;
+ } else
+ load_bias = 0;
+
+ /*
+ * Since load_bias is used for all subsequent loading
+ * calculations, we must lower it by the first vaddr
+ * so that the remaining calculations based on the
+ * ELF vaddrs will be correctly offset. The result
+ * is then page aligned.
+ */
+ load_bias = ELF_PAGESTART(load_bias - vaddr);
+
total_size = total_mapping_size(elf_phdata,
loc->elf_ex.e_phnum);
if (!total_size) {
@@ -2296,6 +2339,7 @@
goto end_coredump;
}
}
+ dump_truncate(cprm);
if (!elf_core_write_extra_data(cprm))
goto end_coredump;
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 247b8df..8d8370d 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -78,12 +78,6 @@
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (ret)
- return ret;
- }
- ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
@@ -119,6 +113,13 @@
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ int ret;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (ret)
+ return ret;
+ }
return __btrfs_set_acl(NULL, inode, acl, type);
}
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc1..baacc18 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@
{
SHASH_DESC_ON_STACK(shash, tfm);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = tfm;
@@ -47,5 +48,7 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index bddbae7..a2a014b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4480,8 +4480,19 @@
if (found_type > min_type) {
del_item = 1;
} else {
- if (item_end < new_size)
+ if (item_end < new_size) {
+ /*
+ * With NO_HOLES mode, for the following mapping
+ *
+ * [0-4k][hole][8k-12k]
+ *
+ * if truncating isize down to 6k, it ends up
+ * isize being 8k.
+ */
+ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ last_size = new_size;
break;
+ }
if (found_key.offset >= new_size)
del_item = 1;
else
@@ -7235,7 +7246,6 @@
struct extent_map *em = NULL;
int ret;
- down_read(&BTRFS_I(inode)->dio_sem);
if (type != BTRFS_ORDERED_NOCOW) {
em = create_pinned_em(inode, start, len, orig_start,
block_start, block_len, orig_block_len,
@@ -7254,7 +7264,6 @@
em = ERR_PTR(ret);
}
out:
- up_read(&BTRFS_I(inode)->dio_sem);
return em;
}
@@ -8707,6 +8716,7 @@
dio_data.unsubmitted_oe_range_start = (u64)offset;
dio_data.unsubmitted_oe_range_end = (u64)offset;
current->journal_info = &dio_data;
+ down_read(&BTRFS_I(inode)->dio_sem);
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
inode_dio_end(inode);
@@ -8719,6 +8729,7 @@
iter, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
+ up_read(&BTRFS_I(inode)->dio_sem);
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED) {
if (dio_data.reserve)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1afa111..cec2569 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -292,6 +292,11 @@
if (ret < 0)
err = ret;
dput(last);
+ /* last_name no longer match cache index */
+ if (fi->readdir_cache_idx >= 0) {
+ fi->readdir_cache_idx = -1;
+ fi->dir_release_count = 0;
+ }
}
return err;
}
@@ -315,7 +320,7 @@
struct ceph_mds_client *mdsc = fsc->mdsc;
int i;
int err;
- u32 ftype;
+ unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
@@ -362,7 +367,6 @@
/* do we have the correct frag content buffered? */
if (need_send_readdir(fi, ctx->pos)) {
struct ceph_mds_request *req;
- unsigned frag;
int op = ceph_snap(inode) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
@@ -373,8 +377,11 @@
}
if (is_hash_order(ctx->pos)) {
- frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
- NULL, NULL);
+ /* fragtree isn't always accurate. choose frag
+ * based on previous reply when possible. */
+ if (frag == (unsigned)-1)
+ frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
+ NULL, NULL);
} else {
frag = fpos_frag(ctx->pos);
}
@@ -497,6 +504,7 @@
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
ino_t ino;
+ u32 ftype;
BUG_ON(rde->offset < ctx->pos);
@@ -519,15 +527,17 @@
ctx->pos++;
}
+ ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+
if (fi->next_offset > 2) {
- ceph_mdsc_put_request(fi->last_readdir);
- fi->last_readdir = NULL;
+ frag = fi->frag;
goto more;
}
/* more frags? */
if (!ceph_frag_is_rightmost(fi->frag)) {
- unsigned frag = ceph_frag_next(fi->frag);
+ frag = ceph_frag_next(fi->frag);
if (is_hash_order(ctx->pos)) {
loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
fi->next_offset, true);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 87b87e0..efd72e1 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@
struct cifs_fid *fid, __u16 search_flags,
struct cifs_search_info *srch_inf)
{
- return CIFSFindFirst(xid, tcon, path, cifs_sb,
- &fid->netfid, search_flags, srch_inf, true);
+ int rc;
+
+ rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+ &fid->netfid, search_flags, srch_inf, true);
+ if (rc)
+ cifs_dbg(FYI, "find first failed=%d\n", rc);
+ return rc;
}
static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 36334fe..b696824 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -964,7 +964,7 @@
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
kfree(utf16_path);
if (rc) {
- cifs_dbg(VFS, "open dir failed\n");
+ cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc;
}
@@ -974,7 +974,7 @@
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
if (rc) {
- cifs_dbg(VFS, "query directory failed\n");
+ cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
return rc;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index db6d692..314b4ed 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@
ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) {
- sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
- config_item_put(item);
kfree(sl);
return -ENOENT;
}
+ sl->sl_target = config_item_get(item);
list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/coredump.c b/fs/coredump.c
index 8bdda8e..00a900a 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -833,3 +833,21 @@
return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+ struct file *file = cprm->file;
+ loff_t offset;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ offset = file->f_op->llseek(file, 0, SEEK_CUR);
+ if (i_size_read(file->f_mapping->host) < offset)
+ do_truncate(file->f_path.dentry, offset, 0, file);
+ }
+}
+EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/dcache.c b/fs/dcache.c
index 362396a..7171f0d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1133,11 +1133,12 @@
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
- dentry_lru_isolate_shrink, &dispose, UINT_MAX);
+ dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
- } while (freed > 0);
+ cond_resched();
+ } while (list_lru_count(&sb->s_dentry_lru) > 0);
}
EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/exec.c b/fs/exec.c
index c8ca064..3e2de29 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -215,7 +215,24 @@
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
- struct rlimit *rlim;
+ unsigned long ptr_size, limit;
+
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
acct_arg_size(bprm, size / PAGE_SIZE);
@@ -227,20 +244,24 @@
return page;
/*
- * Limit to 1/4-th the stack size for the argv+env strings.
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+ * (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
- rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ limit = _STK_LIM / 4 * 3;
+ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+ if (size > limit)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 79dafa7..069c0dc 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -175,11 +175,8 @@
return acl;
}
-/*
- * inode->i_mutex: down
- */
-int
-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int
+__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int name_index;
void *value = NULL;
@@ -189,13 +186,6 @@
switch(type) {
case ACL_TYPE_ACCESS:
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- inode->i_ctime = current_time(inode);
- mark_inode_dirty(inode);
- }
break;
case ACL_TYPE_DEFAULT:
@@ -222,6 +212,24 @@
}
/*
+ * inode->i_mutex: down
+ */
+int
+ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int error;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
+ return error;
+ inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+ }
+ return __ext2_set_acl(inode, acl, type);
+}
+
+/*
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
*
* dir->i_mutex: down
@@ -238,12 +246,12 @@
return error;
if (default_acl) {
- error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
- error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
}
return error;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 42145be..5dc655e 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -100,7 +100,7 @@
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (!ret || val >= clusters)
+ if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 6fe23af..55aa29c 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -211,7 +211,7 @@
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
+ if (acl && !ipage) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c12f695..88e111a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -948,6 +948,7 @@
{
SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = sbi->s_chksum_driver;
@@ -957,7 +958,9 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 350a2c8..1493ceb 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -741,16 +741,10 @@
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
- O_RDONLY | O_WRONLY | O_RDWR |
- O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
- __O_SYNC | O_DSYNC | FASYNC |
- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE |
- __FMODE_NONOTIFY
- ));
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ HWEIGHT32(
+ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
+ __FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 43040721..40d6107 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
if (invalidate)
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
}
} else {
@@ -560,6 +561,10 @@
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
+ /* Make sure any pending writes are cancelled. */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+ fscache_invalidate_writes(cookie);
+
/* Reset the cookie state if it wasn't relinquished */
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649..a8aa00b 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
/* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e3..7a182c8 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
#define __STATE_NAME(n) fscache_osm_##n
#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@
event = -1;
if (new_state == NO_TRANSIT) {
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
+ _leave(" [dead]");
+ return;
+ }
fscache_enqueue_object(object);
event_mask = object->oob_event_mask;
goto unmask_events;
@@ -239,7 +244,7 @@
object->state = state = new_state;
if (state->work) {
- if (unlikely(state->work == ((void *)2UL))) {
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
@@ -645,6 +650,12 @@
fscache_mark_object_dead(object);
object->oob_event_mask = 0;
+ if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+ /* Reject any new read/write ops and abort any that are pending. */
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ fscache_cancel_all_ops(object);
+ }
+
if (list_empty(&object->dependents) &&
object->n_ops == 0 &&
object->n_children == 0)
@@ -1077,3 +1088,20 @@
}
}
EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead. We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+ int event)
+{
+ if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+ &object->flags))
+ return NO_TRANSIT;
+
+ WARN(true, "FS-Cache object redispatched after death");
+ return NO_TRANSIT;
+}
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 60da84a..5d75cc4 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
-fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o passthrough.o
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 83511cb..658fa9e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -7,6 +7,7 @@
*/
#include "fuse_i.h"
+#include "fuse_passthrough.h"
#include <linux/init.h>
#include <linux/module.h>
@@ -550,9 +551,14 @@
args->out.numargs * sizeof(struct fuse_arg));
fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret && args->out.argvar) {
- BUG_ON(args->out.numargs != 1);
- ret = req->out.args[0].size;
+ if (!ret) {
+ if (args->out.argvar) {
+ WARN_ON(args->out.numargs != 1);
+ ret = req->out.args[0].size;
+ }
+
+ if (req->passthrough_filp != NULL)
+ args->out.passthrough_filp = req->passthrough_filp;
}
fuse_put_request(fc, req);
@@ -1890,6 +1896,9 @@
}
fuse_copy_finish(cs);
+ fuse_setup_passthrough(fc, req);
+
+
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fc8ba62..c7c3c96 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -491,6 +491,7 @@
args.out.args[0].value = &outentry;
args.out.args[1].size = sizeof(outopen);
args.out.args[1].value = &outopen;
+ args.out.passthrough_filp = NULL;
err = fuse_simple_request(fc, &args);
if (err)
goto out_free_ff;
@@ -502,6 +503,8 @@
ff->fh = outopen.fh;
ff->nodeid = outentry.nodeid;
ff->open_flags = outopen.open_flags;
+ if (args.out.passthrough_filp != NULL)
+ ff->passthrough_filp = args.out.passthrough_filp;
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
&outentry.attr, entry_attr_timeout(&outentry), 0);
if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ec5870..75c95659 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -7,6 +7,7 @@
*/
#include "fuse_i.h"
+#include "fuse_passthrough.h"
#include <linux/pagemap.h>
#include <linux/slab.h>
@@ -21,8 +22,10 @@
static const struct file_operations fuse_direct_io_file_operations;
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
- int opcode, struct fuse_open_out *outargp)
+ int opcode, struct fuse_open_out *outargp,
+ struct file **passthrough_filpp)
{
+ int ret_val;
struct fuse_open_in inarg;
FUSE_ARGS(args);
@@ -38,8 +41,14 @@
args.out.numargs = 1;
args.out.args[0].size = sizeof(*outargp);
args.out.args[0].value = outargp;
+ args.out.passthrough_filp = NULL;
- return fuse_simple_request(fc, &args);
+ ret_val = fuse_simple_request(fc, &args);
+
+ if (args.out.passthrough_filp != NULL)
+ *passthrough_filpp = args.out.passthrough_filp;
+
+ return ret_val;
}
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -50,6 +59,11 @@
if (unlikely(!ff))
return NULL;
+ ff->passthrough_filp = NULL;
+ ff->passthrough_enabled = 0;
+ if (fc->passthrough)
+ ff->passthrough_enabled = 1;
+
ff->fc = fc;
ff->reserved_req = fuse_request_alloc(0);
if (unlikely(!ff->reserved_req)) {
@@ -118,6 +132,7 @@
bool isdir)
{
struct fuse_file *ff;
+ struct file *passthrough_filp = NULL;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
ff = fuse_file_alloc(fc);
@@ -130,11 +145,12 @@
struct fuse_open_out outarg;
int err;
- err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
+ err = fuse_send_open(fc, nodeid, file, opcode, &outarg,
+ &(passthrough_filp));
if (!err) {
ff->fh = outarg.fh;
ff->open_flags = outarg.open_flags;
-
+ ff->passthrough_filp = passthrough_filp;
} else if (err != -ENOSYS || isdir) {
fuse_file_free(ff);
return err;
@@ -253,6 +269,8 @@
if (unlikely(!ff))
return;
+ fuse_passthrough_release(ff);
+
req = ff->reserved_req;
fuse_prepare_release(ff, file->f_flags, opcode);
@@ -917,8 +935,10 @@
static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ ssize_t ret_val;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_file *ff = iocb->ki_filp->private_data;
/*
* In auto invalidate mode, always update attributes on read.
@@ -933,7 +953,12 @@
return err;
}
- return generic_file_read_iter(iocb, to);
+ if (ff && ff->passthrough_enabled && ff->passthrough_filp)
+ ret_val = fuse_passthrough_read_iter(iocb, to);
+ else
+ ret_val = generic_file_read_iter(iocb, to);
+
+ return ret_val;
}
static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
@@ -1165,6 +1190,7 @@
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
+ struct fuse_file *ff = file->private_data;
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
ssize_t written_buffered = 0;
@@ -1198,6 +1224,11 @@
if (err)
goto out;
+ if (ff && ff->passthrough_enabled && ff->passthrough_filp) {
+ written = fuse_passthrough_write_iter(iocb, from);
+ goto out;
+ }
+
if (iocb->ki_flags & IOCB_DIRECT) {
loff_t pos = iocb->ki_pos;
written = generic_file_direct_write(iocb, from);
@@ -2069,6 +2100,9 @@
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct fuse_file *ff = file->private_data;
+
+ ff->passthrough_enabled = 0;
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
fuse_link_write_file(file);
@@ -2079,6 +2113,9 @@
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct fuse_file *ff = file->private_data;
+
+ ff->passthrough_enabled = 0;
/* Can't provide the coherency needed for MAP_SHARED */
if (vma->vm_flags & VM_MAYSHARE)
return -ENODEV;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6b30a12..cc2c82c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -153,6 +153,10 @@
/** Has flock been performed on this file? */
bool flock:1;
+
+ /* the read write file */
+ struct file *passthrough_filp;
+ bool passthrough_enabled;
};
/** One input argument of a request */
@@ -232,6 +236,7 @@
unsigned argvar:1;
unsigned numargs;
struct fuse_arg args[2];
+ struct file *passthrough_filp;
} out;
};
@@ -382,6 +387,9 @@
/** Request is stolen from fuse_file->reserved_req */
struct file *stolen_file;
+
+ /** fuse passthrough file */
+ struct file *passthrough_filp;
};
struct fuse_iqueue {
@@ -542,6 +550,9 @@
/** handle fs handles killing suid/sgid/cap on write/chown/trunc */
unsigned handle_killpriv:1;
+ /** passthrough IO. */
+ unsigned passthrough:1;
+
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
diff --git a/fs/fuse/fuse_passthrough.h b/fs/fuse/fuse_passthrough.h
new file mode 100644
index 0000000..12429ac
--- /dev/null
+++ b/fs/fuse/fuse_passthrough.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FS_FUSE_PASSTHROUGH_H
+#define _FS_FUSE_PASSTHROUGH_H
+
+#include "fuse_i.h"
+
+#include <linux/fuse.h>
+#include <linux/file.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req);
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+void fuse_passthrough_release(struct fuse_file *ff);
+
+#endif /* _FS_FUSE_PASSTHROUGH_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6fe6a88..f1512c8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -915,6 +915,12 @@
fc->parallel_dirops = 1;
if (arg->flags & FUSE_HANDLE_KILLPRIV)
fc->handle_killpriv = 1;
+ if (arg->flags & FUSE_PASSTHROUGH) {
+ fc->passthrough = 1;
+ /* Prevent further stacking */
+ fc->sb->s_stack_depth =
+ FILESYSTEM_MAX_STACK_DEPTH;
+ }
if (arg->time_gran && arg->time_gran <= 1000000000)
fc->sb->s_time_gran = arg->time_gran;
if ((arg->flags & FUSE_POSIX_ACL)) {
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 0000000..c92c40b
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "fuse_passthrough.h"
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req)
+{
+ int daemon_fd, fs_stack_depth;
+ unsigned int open_out_index;
+ struct file *passthrough_filp;
+ struct inode *passthrough_inode;
+ struct super_block *passthrough_sb;
+ struct fuse_open_out *open_out;
+
+ req->passthrough_filp = NULL;
+
+ if (!(fc->passthrough))
+ return;
+
+ if ((req->in.h.opcode != FUSE_OPEN) &&
+ (req->in.h.opcode != FUSE_CREATE))
+ return;
+
+ open_out_index = req->in.numargs - 1;
+
+ WARN_ON(open_out_index != 0 && open_out_index != 1);
+ WARN_ON(req->out.args[open_out_index].size != sizeof(*open_out));
+
+ open_out = req->out.args[open_out_index].value;
+
+ daemon_fd = (int)open_out->passthrough_fd;
+ if (daemon_fd < 0)
+ return;
+
+ passthrough_filp = fget_raw(daemon_fd);
+ if (!passthrough_filp)
+ return;
+
+ passthrough_inode = file_inode(passthrough_filp);
+ passthrough_sb = passthrough_inode->i_sb;
+ fs_stack_depth = passthrough_sb->s_stack_depth + 1;
+
+ /* If we reached the stacking limit go through regular io */
+ if (fs_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+ /* Release the passthrough file. */
+ fput(passthrough_filp);
+ pr_err("FUSE: maximum fs stacking depth exceeded, cannot use passthrough for this file\n");
+ return;
+ }
+ req->passthrough_filp = passthrough_filp;
+}
+
+
+static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
+ struct iov_iter *iter, int do_write)
+{
+ ssize_t ret_val;
+ struct fuse_file *ff;
+ struct file *fuse_file, *passthrough_filp;
+ struct inode *fuse_inode, *passthrough_inode;
+ struct fuse_conn *fc;
+
+ ff = iocb->ki_filp->private_data;
+ fuse_file = iocb->ki_filp;
+ passthrough_filp = ff->passthrough_filp;
+ fc = ff->fc;
+
+ /* lock passthrough file to prevent it from being released */
+ get_file(passthrough_filp);
+ iocb->ki_filp = passthrough_filp;
+ fuse_inode = fuse_file->f_path.dentry->d_inode;
+ passthrough_inode = file_inode(passthrough_filp);
+
+ if (do_write) {
+ if (!passthrough_filp->f_op->write_iter)
+ return -EIO;
+
+ ret_val = passthrough_filp->f_op->write_iter(iocb, iter);
+
+ if (ret_val >= 0 || ret_val == -EIOCBQUEUED) {
+ spin_lock(&fc->lock);
+ fsstack_copy_inode_size(fuse_inode, passthrough_inode);
+ spin_unlock(&fc->lock);
+ fsstack_copy_attr_times(fuse_inode, passthrough_inode);
+ }
+ } else {
+ if (!passthrough_filp->f_op->read_iter)
+ return -EIO;
+
+ ret_val = passthrough_filp->f_op->read_iter(iocb, iter);
+ if (ret_val >= 0 || ret_val == -EIOCBQUEUED)
+ fsstack_copy_attr_atime(fuse_inode, passthrough_inode);
+ }
+
+ iocb->ki_filp = fuse_file;
+
+ /* unlock passthrough file */
+ fput(passthrough_filp);
+
+ return ret_val;
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ return fuse_passthrough_read_write_iter(iocb, to, 0);
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ return fuse_passthrough_read_write_iter(iocb, from, 1);
+}
+
+void fuse_passthrough_release(struct fuse_file *ff)
+{
+ if (!(ff->passthrough_filp))
+ return;
+
+ /* Release the passthrough file. */
+ fput(ff->passthrough_filp);
+ ff->passthrough_filp = NULL;
+}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 133f322..7bff6f4 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -80,9 +80,9 @@
static struct rhashtable gl_hash_table;
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -90,6 +90,13 @@
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
@@ -1425,26 +1432,32 @@
* @sdp: the filesystem
* @bucket: the bucket
*
+ * Note that the function can be called multiple times on the same
+ * object. So the user must ensure that the function can cope with
+ * that.
*/
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
- struct rhash_head *pos;
- const struct bucket_table *tbl;
- int i;
+ struct rhashtable_iter iter;
- rcu_read_lock();
- tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
+ rhashtable_walk_enter(&gl_hash_table, &iter);
+
+ do {
+ gl = ERR_PTR(rhashtable_walk_start(&iter));
+ if (gl)
+ continue;
+
+ while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);
- }
- }
- rcu_read_unlock();
- cond_resched();
+
+ rhashtable_walk_stop(&iter);
+ } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
}
/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 51519c2..a04bf95 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -370,6 +370,7 @@
loff_t end;
} gl_vm;
};
+ struct rcu_head gl_rcu;
struct rhash_head gl_node;
};
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index 9b92058..6bb5d7c 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -51,8 +51,8 @@
return acl;
}
-int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
- int type)
+static int __hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
+ int type)
{
int err;
char *xattr_name;
@@ -64,12 +64,6 @@
switch (type) {
case ACL_TYPE_ACCESS:
xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (err)
- return err;
- }
- err = 0;
break;
case ACL_TYPE_DEFAULT:
@@ -105,6 +99,18 @@
return err;
}
+int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int err;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (err)
+ return err;
+ }
+ return __hfsplus_set_posix_acl(inode, acl, type);
+}
+
int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
{
int err = 0;
@@ -122,15 +128,15 @@
return err;
if (default_acl) {
- err = hfsplus_set_posix_acl(inode, default_acl,
- ACL_TYPE_DEFAULT);
+ err = __hfsplus_set_posix_acl(inode, default_acl,
+ ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
}
if (acl) {
if (!err)
- err = hfsplus_set_posix_acl(inode, acl,
- ACL_TYPE_ACCESS);
+ err = __hfsplus_set_posix_acl(inode, acl,
+ ACL_TYPE_ACCESS);
posix_acl_release(acl);
}
return err;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 4fb7b10..704fa0b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -191,7 +191,7 @@
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/mount.h b/fs/mount.h
index d8295f2..3603884 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -58,6 +58,7 @@
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+ struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
diff --git a/fs/namespace.c b/fs/namespace.c
index 5147334..7731f77 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -237,6 +237,7 @@
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
+ INIT_LIST_HEAD(&mnt->mnt_umounting);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 484bebc..0a21150 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -279,7 +279,7 @@
printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
cb_info->users);
- serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
+ serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
if (!serv) {
printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
return ERR_PTR(-ENOMEM);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 53e02b8..d04ec381 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1167,11 +1167,13 @@
/* Force a full look up iff the parent directory has changed */
if (!nfs_is_exclusive_create(dir, flags) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
-
- if (nfs_lookup_verify_inode(inode, flags)) {
+ error = nfs_lookup_verify_inode(inode, flags);
+ if (error) {
if (flags & LOOKUP_RCU)
return -ECHILD;
- goto out_zap_parent;
+ if (error == -ESTALE)
+ goto out_zap_parent;
+ goto out_error;
}
goto out_valid;
}
@@ -1195,8 +1197,10 @@
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error)
+ if (error == -ESTALE || error == -ENOENT)
goto out_bad;
+ if (error)
+ goto out_error;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index bf4ec5e..76ae256 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1278,9 +1278,9 @@
return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- return -EIO;
+ return -ESTALE;
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fc9b049..401ea6e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2343,8 +2343,6 @@
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
return 0;
- /* even though OPEN succeeded, access is denied. Close the file */
- nfs4_close_state(state, fmode);
return -EACCES;
}
@@ -8431,6 +8429,7 @@
size_t max_pages = max_response_pages(server);
dprintk("--> %s\n", __func__);
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
nfs4_free_pages(lgp->args.layout.pages, max_pages);
pnfs_put_layout_hdr(NFS_I(inode)->layout);
put_nfs_open_context(lgp->args.ctx);
@@ -8505,7 +8504,6 @@
/* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
if (status == 0 && lgp->res.layoutp->len)
lseg = pnfs_layout_process(lgp);
- nfs4_sequence_free_slot(&lgp->res.seq_res);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
if (status)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0959c96..9267191 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1079,6 +1079,7 @@
case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE:
+ case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
};
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 636abcb..5e8709a 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2242,13 +2242,13 @@
spin_unlock(&o2hb_live_lock);
}
-static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", o2hb_dead_threshold);
}
-static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
const char *page, size_t count)
{
unsigned long tmp;
@@ -2297,11 +2297,11 @@
}
-CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
+CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
- &o2hb_heartbeat_group_attr_threshold,
+ &o2hb_heartbeat_group_attr_dead_threshold,
&o2hb_heartbeat_group_attr_mode,
NULL,
};
diff --git a/fs/open.c b/fs/open.c
index 568749b..73b7d19 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -915,6 +915,12 @@
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
+ /*
+ * Clear out all open flags we don't know about so that we don't report
+ * them in fcntl(F_GETFD) or similar interfaces.
+ */
+ flags &= VALID_OPEN_FLAGS;
+
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0e10085..e7c8ac4 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1146,6 +1146,7 @@
unsigned int stacklen = 0;
unsigned int i;
bool remote = false;
+ struct cred *cred;
int err;
err = -ENOMEM;
@@ -1309,10 +1310,14 @@
else
sb->s_d_op = &ovl_dentry_operations;
- ufs->creator_cred = prepare_creds();
- if (!ufs->creator_cred)
+ err = -ENOMEM;
+ ufs->creator_cred = cred = prepare_creds();
+ if (!cred)
goto out_put_lower_mnt;
+ /* Never override disk quota limits or use reserved space */
+ cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
+
err = -ENOMEM;
oe = ovl_alloc_entry(numlower);
if (!oe)
diff --git a/fs/pnode.c b/fs/pnode.c
index e4e428d..ddb846f 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -24,6 +24,11 @@
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
+static inline struct mount *last_slave(struct mount *p)
+{
+ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
+}
+
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@@ -164,6 +169,19 @@
}
}
+static struct mount *skip_propagation_subtree(struct mount *m,
+ struct mount *origin)
+{
+ /*
+ * Advance m such that propagation_next will not return
+ * the slaves of m.
+ */
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ m = last_slave(m);
+
+ return m;
+}
+
static struct mount *next_group(struct mount *m, struct mount *origin)
{
while (1) {
@@ -415,68 +433,107 @@
}
}
-/*
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
- */
-static void mark_umount_candidates(struct mount *mnt)
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
-
- BUG_ON(parent == mnt);
-
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
- continue;
- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
- SET_MNT_MARK(child);
- }
- }
+ CLEAR_MNT_MARK(mnt);
+ mnt->mnt.mnt_flags |= MNT_UMOUNT;
+ list_del_init(&mnt->mnt_child);
+ list_del_init(&mnt->mnt_umounting);
+ list_move_tail(&mnt->mnt_list, to_umount);
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
-static void __propagate_umount(struct mount *mnt)
+static bool __propagate_umount(struct mount *mnt,
+ struct list_head *to_umount,
+ struct list_head *to_restore)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
+ bool progress = false;
+ struct mount *child;
- BUG_ON(parent == mnt);
+ /*
+ * The state of the parent won't change if this mount is
+ * already unmounted or marked as without children.
+ */
+ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
+ goto out;
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *topper;
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- /*
- * umount the child only if the child has no children
- * and the child is marked safe to unmount.
- */
- if (!child || !IS_MNT_MARKED(child))
+ /* Verify topper is the only grandchild that has not been
+ * speculatively unmounted.
+ */
+ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
continue;
- CLEAR_MNT_MARK(child);
+ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+ continue;
+ /* Found a mounted child */
+ goto children;
+ }
- /* If there is exactly one mount covering all of child
- * replace child with that mount.
- */
- topper = find_topper(child);
- if (topper)
- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
- topper);
+ /* Mark mounts that can be unmounted if not locked */
+ SET_MNT_MARK(mnt);
+ progress = true;
- if (list_empty(&child->mnt_mounts)) {
- list_del_init(&child->mnt_child);
- child->mnt.mnt_flags |= MNT_UMOUNT;
- list_move_tail(&child->mnt_list, &mnt->mnt_list);
+ /* If a mount is without children and not locked umount it. */
+ if (!IS_MNT_LOCKED(mnt)) {
+ umount_one(mnt, to_umount);
+ } else {
+children:
+ list_move_tail(&mnt->mnt_umounting, to_restore);
+ }
+out:
+ return progress;
+}
+
+static void umount_list(struct list_head *to_umount,
+ struct list_head *to_restore)
+{
+ struct mount *mnt, *child, *tmp;
+ list_for_each_entry(mnt, to_umount, mnt_list) {
+ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
+ /* topper? */
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+ list_move_tail(&child->mnt_umounting, to_restore);
+ else
+ umount_one(child, to_umount);
}
}
}
+static void restore_mounts(struct list_head *to_restore)
+{
+ /* Restore mounts to a clean working state */
+ while (!list_empty(to_restore)) {
+ struct mount *mnt, *parent;
+ struct mountpoint *mp;
+
+ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
+ CLEAR_MNT_MARK(mnt);
+ list_del_init(&mnt->mnt_umounting);
+
+ /* Should this mount be reparented? */
+ mp = mnt->mnt_mp;
+ parent = mnt->mnt_parent;
+ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+ mp = parent->mnt_mp;
+ parent = parent->mnt_parent;
+ }
+ if (parent != mnt->mnt_parent)
+ mnt_change_mountpoint(parent, mp, mnt);
+ }
+}
+
+static void cleanup_umount_visitations(struct list_head *visited)
+{
+ while (!list_empty(visited)) {
+ struct mount *mnt =
+ list_first_entry(visited, struct mount, mnt_umounting);
+ list_del_init(&mnt->mnt_umounting);
+ }
+}
+
/*
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
@@ -487,12 +544,69 @@
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
+ LIST_HEAD(to_restore);
+ LIST_HEAD(to_umount);
+ LIST_HEAD(visited);
- list_for_each_entry_reverse(mnt, list, mnt_list)
- mark_umount_candidates(mnt);
+ /* Find candidates for unmounting */
+ list_for_each_entry_reverse(mnt, list, mnt_list) {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
- list_for_each_entry(mnt, list, mnt_list)
- __propagate_umount(mnt);
+ /*
+ * If this mount has already been visited it is known that it's
+ * entire peer group and all of their slaves in the propagation
+ * tree for the mountpoint has already been visited and there is
+ * no need to visit them again.
+ */
+ if (!list_empty(&mnt->mnt_umounting))
+ continue;
+
+ list_add_tail(&mnt->mnt_umounting, &visited);
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt(&m->mnt,
+ mnt->mnt_mountpoint);
+ if (!child)
+ continue;
+
+ if (!list_empty(&child->mnt_umounting)) {
+ /*
+ * If the child has already been visited it is
+ * know that it's entire peer group and all of
+ * their slaves in the propgation tree for the
+ * mountpoint has already been visited and there
+ * is no need to visit this subtree again.
+ */
+ m = skip_propagation_subtree(m, parent);
+ continue;
+ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
+ /*
+ * We have come accross an partially unmounted
+ * mount in list that has not been visited yet.
+ * Remember it has been visited and continue
+ * about our merry way.
+ */
+ list_add_tail(&child->mnt_umounting, &visited);
+ continue;
+ }
+
+ /* Check the child and parents while progress is made */
+ while (__propagate_umount(child,
+ &to_umount, &to_restore)) {
+ /* Is the parent a umount candidate? */
+ child = child->mnt_parent;
+ if (list_empty(&child->mnt_umounting))
+ break;
+ }
+ }
+ }
+
+ umount_list(&to_umount, &to_restore);
+ restore_mounts(&to_restore);
+ cleanup_umount_visitations(&visited);
+ list_splice_tail(&to_umount, list);
+
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c573113..18f7612 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1632,7 +1632,7 @@
.release = single_release,
};
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_SCHED_AUTOGROUP
/*
@@ -3090,6 +3090,9 @@
REG("mounts", S_IRUGO, proc_mounts_operations),
REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
REG("mountstats", S_IRUSR, proc_mountstats_operations),
+#ifdef CONFIG_PROCESS_RECLAIM
+ REG("reclaim", S_IWUSR, proc_reclaim_operations),
+#endif
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_pid_smaps_operations),
@@ -3386,6 +3389,8 @@
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
+
+ cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5378441..6dfb414 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -209,6 +209,7 @@
extern const struct inode_operations proc_link_inode_operations;
extern const struct inode_operations proc_pid_link_inode_operations;
+extern const struct file_operations proc_reclaim_operations;
extern void proc_init_inodecache(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f998332..c585e7e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -15,6 +15,8 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/mm_inline.h>
+#include <linux/ctype.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
@@ -349,11 +351,7 @@
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
@@ -1530,6 +1528,241 @@
};
#endif /* CONFIG_PROC_PAGE_MONITOR */
+#ifdef CONFIG_PROCESS_RECLAIM
+static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct reclaim_param *rp = walk->private;
+ struct vm_area_struct *vma = rp->vma;
+ pte_t *pte, ptent;
+ spinlock_t *ptl;
+ struct page *page;
+ LIST_HEAD(page_list);
+ int isolated;
+ int reclaimed;
+
+ split_huge_pmd(vma, addr, pmd);
+ if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim)
+ return 0;
+cont:
+ isolated = 0;
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+ if (!pte_present(ptent))
+ continue;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ continue;
+
+ if (isolate_lru_page(page))
+ continue;
+
+ list_add(&page->lru, &page_list);
+ inc_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ isolated++;
+ rp->nr_scanned++;
+ if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim)
+ break;
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ reclaimed = reclaim_pages_from_list(&page_list, vma);
+ rp->nr_reclaimed += reclaimed;
+ rp->nr_to_reclaim -= reclaimed;
+ if (rp->nr_to_reclaim < 0)
+ rp->nr_to_reclaim = 0;
+
+ if (rp->nr_to_reclaim && (addr != end))
+ goto cont;
+
+ cond_resched();
+ return 0;
+}
+
+enum reclaim_type {
+ RECLAIM_FILE,
+ RECLAIM_ANON,
+ RECLAIM_ALL,
+ RECLAIM_RANGE,
+};
+
+struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ struct mm_walk reclaim_walk = {};
+ struct reclaim_param rp;
+
+ rp.nr_reclaimed = 0;
+ rp.nr_scanned = 0;
+ get_task_struct(task);
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = nr_to_reclaim;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (vma->vm_file)
+ continue;
+
+ if (vma->vm_flags & VM_LOCKED)
+ continue;
+
+ if (!rp.nr_to_reclaim)
+ break;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return rp;
+}
+
+static ssize_t reclaim_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char buffer[200];
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ enum reclaim_type type;
+ char *type_buf;
+ struct mm_walk reclaim_walk = {};
+ unsigned long start = 0;
+ unsigned long end = 0;
+ struct reclaim_param rp;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+
+ type_buf = strstrip(buffer);
+ if (!strcmp(type_buf, "file"))
+ type = RECLAIM_FILE;
+ else if (!strcmp(type_buf, "anon"))
+ type = RECLAIM_ANON;
+ else if (!strcmp(type_buf, "all"))
+ type = RECLAIM_ALL;
+ else if (isdigit(*type_buf))
+ type = RECLAIM_RANGE;
+ else
+ goto out_err;
+
+ if (type == RECLAIM_RANGE) {
+ char *token;
+ unsigned long long len, len_in, tmp;
+ token = strsep(&type_buf, " ");
+ if (!token)
+ goto out_err;
+ tmp = memparse(token, &token);
+ if (tmp & ~PAGE_MASK || tmp > ULONG_MAX)
+ goto out_err;
+ start = tmp;
+
+ token = strsep(&type_buf, " ");
+ if (!token)
+ goto out_err;
+ len_in = memparse(token, &token);
+ len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+ if (len > ULONG_MAX)
+ goto out_err;
+ /*
+ * Check to see whether len was rounded up from small -ve
+ * to zero.
+ */
+ if (len_in && !len)
+ goto out_err;
+
+ end = start + len;
+ if (end < start)
+ goto out_err;
+ }
+
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out;
+
+ reclaim_walk.mm = mm;
+ reclaim_walk.pmd_entry = reclaim_pte_range;
+
+ rp.nr_to_reclaim = INT_MAX;
+ rp.nr_reclaimed = 0;
+ reclaim_walk.private = &rp;
+
+ down_read(&mm->mmap_sem);
+ if (type == RECLAIM_RANGE) {
+ vma = find_vma(mm, start);
+ while (vma) {
+ if (vma->vm_start > end)
+ break;
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ rp.vma = vma;
+ walk_page_range(max(vma->vm_start, start),
+ min(vma->vm_end, end),
+ &reclaim_walk);
+ vma = vma->vm_next;
+ }
+ } else {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_vm_hugetlb_page(vma))
+ continue;
+
+ if (type == RECLAIM_ANON && vma->vm_file)
+ continue;
+
+ if (type == RECLAIM_FILE && !vma->vm_file)
+ continue;
+
+ rp.vma = vma;
+ walk_page_range(vma->vm_start, vma->vm_end,
+ &reclaim_walk);
+ }
+ }
+
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+out:
+ put_task_struct(task);
+ return count;
+
+out_err:
+ return -EINVAL;
+}
+
+const struct file_operations proc_reclaim_operations = {
+ .write = reclaim_write,
+ .llseek = noop_llseek,
+};
+#endif
+
#ifdef CONFIG_NUMA
struct numa_maps {
diff --git a/fs/read_write.c b/fs/read_write.c
index 190e0d36..e479e24 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1232,7 +1232,7 @@
if (!(file->f_mode & FMODE_CAN_WRITE))
goto out;
- ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+ ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
out:
if (ret > 0)
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 3d2256a..d92a1dc 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -37,7 +37,14 @@
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
reiserfs_write_unlock(inode->i_sb);
if (error == 0) {
+ if (type == ACL_TYPE_ACCESS && acl) {
+ error = posix_acl_update_mode(inode, &inode->i_mode,
+ &acl);
+ if (error)
+ goto unlock;
+ }
error = __reiserfs_set_acl(&th, inode, type, acl);
+unlock:
reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
@@ -241,11 +248,6 @@
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- }
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38..0186fe6 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ u64 id = 0;
+
+ /* When calling huge_encode_dev(),
+ * use sb->s_bdev->bd_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK defined
+ * use sb->s_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD defined
+ * leave id as 0 when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD undefined
+ */
+ if (sb->s_bdev)
+ id = huge_encode_dev(sb->s_bdev->bd_dev);
+ else if (sb->s_dev)
+ id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
+#ifdef CONFIG_ROMFS_ON_MTD
+ /* Use same dev ID from the underlying mtdblock device */
+ if (sb->s_mtd)
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index d48da41..683b492 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -773,13 +773,9 @@
* afterwards in the other cases: we fsstack_copy_inode_size from
* the lower level.
*/
- if (current->mm)
- down_write(¤t->mm->mmap_sem);
if (ia->ia_valid & ATTR_SIZE) {
err = inode_newsize_ok(&tmp, ia->ia_size);
if (err) {
- if (current->mm)
- up_write(¤t->mm->mmap_sem);
goto out;
}
truncate_setsize(inode, ia->ia_size);
@@ -802,8 +798,6 @@
err = notify_change2(lower_mnt, lower_dentry, &lower_ia, /* note: lower_ia */
NULL);
inode_unlock(d_inode(lower_dentry));
- if (current->mm)
- up_write(¤t->mm->mmap_sem);
if (err)
goto out;
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 3c5b51d..80825b2 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -364,41 +364,34 @@
return err;
}
-/* A feature which supports mount_nodev() with options */
-static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
- struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct vfsmount *, struct super_block *,
- const char *, void *, int))
+struct sdcardfs_mount_private {
+ struct vfsmount *mnt;
+ const char *dev_name;
+ void *raw_data;
+};
+static int __sdcardfs_fill_super(
+ struct super_block *sb,
+ void *_priv, int silent)
{
- int error;
- struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+ struct sdcardfs_mount_private *priv = _priv;
- if (IS_ERR(s))
- return ERR_CAST(s);
-
- s->s_flags = flags;
-
- error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0);
- if (error) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
- s->s_flags |= MS_ACTIVE;
- return dget(s->s_root);
+ return sdcardfs_read_super(priv->mnt,
+ sb, priv->dev_name, priv->raw_data, silent);
}
static struct dentry *sdcardfs_mount(struct vfsmount *mnt,
struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- /*
- * dev_name is a lower_path_name,
- * raw_data is a option string.
- */
- return mount_nodev_with_options(mnt, fs_type, flags, dev_name,
- raw_data, sdcardfs_read_super);
+ struct sdcardfs_mount_private priv = {
+ .mnt = mnt,
+ .dev_name = dev_name,
+ .raw_data = raw_data
+ };
+
+ return mount_nodev(fs_type, flags,
+ &priv, __sdcardfs_fill_super);
}
static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
@@ -423,7 +416,7 @@
list_del(&sbi->list);
mutex_unlock(&sdcardfs_super_list_lock);
}
- generic_shutdown_super(sb);
+ kill_anon_super(sb);
}
static struct file_system_type sdcardfs_fs_type = {
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 00a0f65..6da0c21 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -174,19 +174,6 @@
return 1;
}
-/* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name()
- */
-int open_flags_to_access_mode(int open_flags)
-{
- if ((open_flags & O_ACCMODE) == O_RDONLY)
- return 0; /* R_OK */
- if ((open_flags & O_ACCMODE) == O_WRONLY)
- return 1; /* W_OK */
- /* Probably O_RDRW, but treat as default to be safe */
- return 1; /* R_OK | W_OK */
-}
-
static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
appid_t value)
{
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 3687b22..4e0ce49 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -499,7 +499,6 @@
extern appid_t get_ext_gid(const char *app_name);
extern appid_t is_excluded(const char *app_name, userid_t userid);
extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
-extern int open_flags_to_access_mode(int open_flags);
extern int packagelist_init(void);
extern void packagelist_exit(void);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 91bc76dc..7d764e3 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -576,7 +576,7 @@
/* Make sure to also account for extended attributes */
len += host_ui->data_len;
- dent = kmalloc(len, GFP_NOFS);
+ dent = kzalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -952,7 +952,7 @@
if (twoparents)
len += plen;
- dent1 = kmalloc(len, GFP_NOFS);
+ dent1 = kzalloc(len, GFP_NOFS);
if (!dent1)
return -ENOMEM;
@@ -1102,7 +1102,7 @@
len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
if (move)
len += plen;
- dent = kmalloc(len, GFP_NOFS);
+ dent = kzalloc(len, GFP_NOFS);
if (!dent)
return -ENOMEM;
@@ -1466,7 +1466,7 @@
hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
- xent = kmalloc(len, GFP_NOFS);
+ xent = kzalloc(len, GFP_NOFS);
if (!xent)
return -ENOMEM;
@@ -1573,7 +1573,7 @@
aligned_len1 = ALIGN(len1, 8);
aligned_len = aligned_len1 + ALIGN(len2, 8);
- ino = kmalloc(aligned_len, GFP_NOFS);
+ ino = kzalloc(aligned_len, GFP_NOFS);
if (!ino)
return -ENOMEM;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 129b18a..0359435 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1243,8 +1243,8 @@
return err;
}
set_size:
- truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
@@ -1261,9 +1261,9 @@
udf_get_block);
if (err)
return err;
+ truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
- truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 69c867c0..2cde073 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@
struct uffd_msg msg;
wait_queue_t wq;
struct userfaultfd_ctx *ctx;
+ bool waken;
};
struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@
if (len && (start > uwq->msg.arg.pagefault.address ||
start + len <= uwq->msg.arg.pagefault.address))
goto out;
+ WRITE_ONCE(uwq->waken, true);
+ /*
+ * The implicit smp_mb__before_spinlock in try_to_wake_up()
+ * renders uwq->waken visible to other CPUs before the task is
+ * waken.
+ */
ret = wake_up_state(wq->private, mode);
if (ret)
/*
@@ -264,6 +271,7 @@
struct userfaultfd_wait_queue uwq;
int ret;
bool must_wait, return_to_userland;
+ long blocking_state;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
@@ -333,10 +341,13 @@
uwq.wq.private = current;
uwq.msg = userfault_msg(fe->address, fe->flags, reason);
uwq.ctx = ctx;
+ uwq.waken = false;
return_to_userland =
(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+ blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+ TASK_KILLABLE;
spin_lock(&ctx->fault_pending_wqh.lock);
/*
@@ -349,8 +360,7 @@
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
- set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
- TASK_KILLABLE);
+ set_current_state(blocking_state);
spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
@@ -362,6 +372,29 @@
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
+
+ /*
+ * False wakeups can orginate even from rwsem before
+ * up_read() however userfaults will wait either for a
+ * targeted wakeup on the specific uwq waitqueue from
+ * wake_userfault() or for signals or for uffd
+ * release.
+ */
+ while (!READ_ONCE(uwq.waken)) {
+ /*
+ * This needs the full smp_store_mb()
+ * guarantee as the state write must be
+ * visible to other CPUs before reading
+ * uwq.waken from other CPUs.
+ */
+ set_current_state(blocking_state);
+ if (READ_ONCE(uwq.waken) ||
+ READ_ONCE(ctx->released) ||
+ (return_to_userland ? signal_pending(current) :
+ fatal_signal_pending(current)))
+ break;
+ schedule();
+ }
}
__set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b468e04..7034e17 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -170,8 +170,8 @@
return acl;
}
-STATIC int
-__xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+int
+__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct xfs_inode *ip = XFS_I(inode);
unsigned char *ea_name;
@@ -268,5 +268,5 @@
}
set_acl:
- return __xfs_set_acl(inode, type, acl);
+ return __xfs_set_acl(inode, acl, type);
}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 286fa89..0432731 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -24,6 +24,7 @@
#ifdef CONFIG_XFS_POSIX_ACL
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
#else
static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
{
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f5e0f60..a1247c3 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -190,12 +190,12 @@
#ifdef CONFIG_XFS_POSIX_ACL
if (default_acl) {
- error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
if (error)
goto out_cleanup_inode;
}
if (acl) {
- error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
if (error)
goto out_cleanup_inode;
}
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e9fb2e8..0c4f9c67 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -776,6 +776,7 @@
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
+ bool registered;
/* currently active master for this device. Protected by master_mutex */
struct drm_master *master;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1c12875..b28c4a3 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -345,6 +345,8 @@
* core drm connector interfaces. Everything added from this callback
* should be unregistered in the early_unregister callback.
*
+ * This is called while holding drm_connector->mutex.
+ *
* Returns:
*
* 0 on success, or a negative error code on failure.
@@ -359,6 +361,8 @@
* late_register(). It is called from drm_connector_unregister(),
* early in the driver unload sequence to disable userspace access
* before data structures are torndown.
+ *
+ * This is called while holding drm_connector->mutex.
*/
void (*early_unregister)(struct drm_connector *connector);
@@ -511,7 +515,6 @@
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -574,6 +577,13 @@
char *name;
/**
+ * @mutex: Lock for general connector state, but currently only protects
+ * @registered. Most of the connector state is still protected by the
+ * mutex in &drm_mode_config.
+ */
+ struct mutex mutex;
+
+ /**
* @index: Compacted connector index, which matches the position inside
* the mode_config.list for drivers not supporting hot-add/removing. Can
* be used as an array index. It is invariant over the lifetime of the
@@ -586,6 +596,10 @@
bool interlace_allowed;
bool doublescan_allowed;
bool stereo_allowed;
+ /**
+ * @registered: Is this connector exposed (registered) with userspace?
+ * Protected by @mutex.
+ */
bool registered;
struct list_head modes; /* list of modes on this connector */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 32bd104..1bcf8f7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -212,6 +212,11 @@
#define DRM_EDID_HDMI_DC_30 (1 << 4)
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+/* YCBCR 420 deep color modes */
+#define DRM_EDID_YCBCR420_DC_48 (1 << 2)
+#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
+#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
+
/* ELD Header Block */
#define DRM_ELD_HEADER_BLOCK_SIZE 4
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 75ddcfa..8108c98 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -17,23 +17,25 @@
/* DSI PLL clocks */
#define VCO_CLK_0 0
-#define BITCLK_SRC_0_CLK 1
-#define BYTECLK_SRC_0_CLK 2
-#define POST_BIT_DIV_0_CLK 3
-#define POST_VCO_DIV_0_CLK 4
-#define BYTECLK_MUX_0_CLK 5
-#define PCLK_SRC_MUX_0_CLK 6
-#define PCLK_SRC_0_CLK 7
-#define PCLK_MUX_0_CLK 8
-#define VCO_CLK_1 9
-#define BITCLK_SRC_1_CLK 10
-#define BYTECLK_SRC_1_CLK 11
-#define POST_BIT_DIV_1_CLK 12
-#define POST_VCO_DIV_1_CLK 13
-#define BYTECLK_MUX_1_CLK 14
-#define PCLK_SRC_MUX_1_CLK 15
-#define PCLK_SRC_1_CLK 16
-#define PCLK_MUX_1_CLK 17
+#define PLL_OUT_DIV_0_CLK 1
+#define BITCLK_SRC_0_CLK 2
+#define BYTECLK_SRC_0_CLK 3
+#define POST_BIT_DIV_0_CLK 4
+#define POST_VCO_DIV_0_CLK 5
+#define BYTECLK_MUX_0_CLK 6
+#define PCLK_SRC_MUX_0_CLK 7
+#define PCLK_SRC_0_CLK 8
+#define PCLK_MUX_0_CLK 9
+#define VCO_CLK_1 10
+#define PLL_OUT_DIV_1_CLK 11
+#define BITCLK_SRC_1_CLK 12
+#define BYTECLK_SRC_1_CLK 13
+#define POST_BIT_DIV_1_CLK 14
+#define POST_VCO_DIV_1_CLK 15
+#define BYTECLK_MUX_1_CLK 16
+#define PCLK_SRC_MUX_1_CLK 17
+#define PCLK_SRC_1_CLK 18
+#define PCLK_MUX_1_CLK 19
/* DP PLL clocks */
#define DP_VCO_CLK 0
diff --git a/include/dt-bindings/clock/qcom,aop-qmp.h b/include/dt-bindings/clock/qcom,aop-qmp.h
index b88dc36..7898c47 100644
--- a/include/dt-bindings/clock/qcom,aop-qmp.h
+++ b/include/dt-bindings/clock/qcom,aop-qmp.h
@@ -25,5 +25,5 @@
/* clocks id */
#define QDSS_CLK 0
-
+#define QDSS_AO_CLK 1
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 678a885..339d470 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -201,7 +201,17 @@
#define GCC_SDCC1_ICE_CORE_CLK 183
#define GCC_SDCC1_APPS_CLK_SRC 184
#define GCC_SDCC1_ICE_CORE_CLK_SRC 185
-
+#define GCC_APC_VS_CLK 186
+#define GCC_GPU_VS_CLK 187
+#define GCC_MSS_VS_CLK 188
+#define GCC_VDDA_VS_CLK 189
+#define GCC_VDDCX_VS_CLK 190
+#define GCC_VDDMX_VS_CLK 191
+#define GCC_VS_CTRL_AHB_CLK 192
+#define GCC_VS_CTRL_CLK 193
+#define GCC_VS_CTRL_CLK_SRC 194
+#define GCC_VSENSOR_CLK_SRC 195
+#define GPLL4 196
/* GCC reset clocks */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 323beaf..74a742a 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -44,6 +44,7 @@
#define GPU_CC_GMU_CLK_SRC 26
#define GPU_CC_CX_GFX3D_CLK 27
#define GPU_CC_CX_GFX3D_SLV_CLK 28
+#define GPU_CC_PLL0 29
/* GPUCC reset clock registers */
#define GPUCC_GPU_CC_ACD_BCR 0
@@ -55,7 +56,6 @@
#define GPUCC_GPU_CC_XO_BCR 6
/* GFX3D clock registers */
-#define GPU_CC_PLL0 0
#define GPU_CC_PLL0_OUT_EVEN 1
#define GPU_CC_GX_GFX3D_CLK_SRC 2
#define GPU_CC_GX_GFX3D_CLK 3
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 8bd30d4..bc87beb 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -250,7 +250,8 @@
#define MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP 146
#define MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
#define MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
-#define MSM_BUS_MASTER_MASTER_LAST 149
+#define MSM_BUS_MASTER_GIC 149
+#define MSM_BUS_MASTER_MASTER_LAST 150
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -330,7 +331,8 @@
#define MSM_BUS_A2NOC_SNOC_SLV 10065
#define MSM_BUS_SNOC_INT_2 10066
#define MSM_BUS_A0NOC_QDSS_INT 10067
-#define MSM_BUS_INT_LAST 10068
+#define MSM_BUS_SLAVE_ANOC_PCIE_A1NOC_SNOC 10068
+#define MSM_BUS_INT_LAST 10069
#define MSM_BUS_INT_TEST_ID 20000
#define MSM_BUS_INT_TEST_LAST 20050
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fb910c6..0693c3e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -508,6 +508,7 @@
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
+#define QUEUE_FLAG_FAST 27 /* fast block device (e.g. ram based) */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -598,6 +599,7 @@
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
+#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c201017..97498be 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -243,6 +243,8 @@
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 8ee110a..a52b65a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -494,6 +494,7 @@
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
+#define CLK_DIVIDER_ROUND_KHZ BIT(7)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 29f9e77..d04f178 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -18,13 +18,15 @@
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
+extern const char *cma_get_name(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a12..28ffa94 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 0538291..10842bb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -266,9 +266,13 @@
#ifdef CONFIG_OF
extern struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node);
#else
static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node) { return NULL; }
+static inlint struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node) { return NULL; }
#endif
#ifdef CONFIG_PID_NS
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9f93d18..edf88bd 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -40,6 +40,7 @@
CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
+ CPUHP_CORE_CTL_ISOLATION_DEAD,
CPUHP_CPUIDLE_COUPLED_PREPARE,
CPUHP_POWERPC_PMAC_PREPARE,
CPUHP_POWERPC_MMU_CTX_PREPARE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b8eb25b..b49f866 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -103,6 +103,13 @@
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define num_isolated_cpus() cpumask_weight(cpu_isolated_mask)
+#define num_online_uniso_cpus() \
+({ \
+ cpumask_t mask; \
+ \
+ cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask); \
+ cpumask_weight(&mask); \
+})
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -114,6 +121,7 @@
#define num_present_cpus() 1U
#define num_active_cpus() 1U
#define num_isolated_cpus() 0U
+#define num_online_uniso_cpus() 1U
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
@@ -243,6 +251,23 @@
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+
+/**
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask poiter
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
+ (cpu) < nr_cpumask_bits; \
+ (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
diff --git a/include/linux/device.h b/include/linux/device.h
index d469121..f43db28 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -375,6 +375,7 @@
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
+ * @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -403,6 +404,7 @@
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 1731c3a..95fe239 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -65,6 +65,7 @@
#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN 36
#define DIAG_IOCTL_REGISTER_CALLBACK 37
#define DIAG_IOCTL_HDLC_TOGGLE 38
+#define DIAG_IOCTL_QUERY_PD_LOGGING 39
/* PC Tools IDs */
#define APQ8060_TOOLS_ID 4062
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0c..a9a16f2 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -202,6 +202,7 @@
/* Internal data. Please do not set. */
struct device dev;
struct raw_notifier_head *nh;
+ struct blocking_notifier_head *bnh;
struct list_head entry;
int max_supported;
spinlock_t lock; /* could be called by irq handler */
@@ -289,6 +290,10 @@
struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int extcon_register_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb);
+extern int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -306,7 +311,8 @@
/* Following API to get information of extcon device */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
+extern int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id,
+ bool val);
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -413,6 +419,20 @@
return 0;
}
+static inline int extcon_register_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_blocking_notifier(struct extcon_dev *edev,
+ unsigned int id,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
static inline int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329..1b48d9c 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552..4c467ef 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f8041f9de..46cd745 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -41,6 +41,7 @@
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_CMA 0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -54,8 +55,9 @@
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Page mobility and placement hints
*
@@ -274,7 +276,12 @@
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+ return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+ ((gfp_flags & __GFP_CMA) != 0);
+#endif
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
new file mode 100644
index 0000000..20f5cba
--- /dev/null
+++ b/include/linux/hdcp_qseecom.h
@@ -0,0 +1,224 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDCP_QSEECOM_H
+#define __HDCP_QSEECOM_H
+#include <linux/types.h>
+
+#define HDCP_MAX_MESSAGE_PARTS 4
+
+/**
+ * enum hdcp_lib_wakeup_cmd - commands for interacting with HDCP driver
+ * @HDCP_LIB_WKUP_CMD_INVALID: initialization value
+ * @HDCP_LIB_WKUP_CMD_START: start authentication
+ * @HDCP_LIB_WKUP_CMD_STOP: stop authentication
+ * @HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS: sending message to sink succeeded
+ * @HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED: sending message to sink failed
+ * @HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS: receiving message from sink succeeded
+ * @HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED: receiving message from sink failed
+ * @HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT: receiving message from sink timed out
+ * @HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE: start content stream processing
+ * @HDCP_LIB_WKUP_CMD_LINK_FAILED: link failure notification
+ */
+enum hdcp_lib_wakeup_cmd {
+ HDCP_LIB_WKUP_CMD_INVALID,
+ HDCP_LIB_WKUP_CMD_START,
+ HDCP_LIB_WKUP_CMD_STOP,
+ HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS,
+ HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED,
+ HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS,
+ HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED,
+ HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT,
+ HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE,
+ HDCP_LIB_WKUP_CMD_LINK_FAILED,
+};
+
+/**
+ * enum hdcp_wakeup_cmd - commands for interacting with display transport layer
+ * @HDCP_WKUP_CMD_INVALID: initialization value
+ * @HDCP_WKUP_CMD_SEND_MESSAGE: send message to sink
+ * @HDCP_WKUP_CMD_RECV_MESSAGE: receive message from sink
+ * @HDCP_WKUP_CMD_STATUS_SUCCESS: successfully communicated with TrustZone
+ * @HDCP_WKUP_CMD_STATUS_FAILED: failed to communicate with TrustZone
+ * @HDCP_WKUP_CMD_LINK_POLL: poll the HDCP link
+ * @HDCP_WKUP_CMD_AUTHENTICATE: start authentication
+ */
+enum hdcp_wakeup_cmd {
+ HDCP_WKUP_CMD_INVALID,
+ HDCP_WKUP_CMD_SEND_MESSAGE,
+ HDCP_WKUP_CMD_RECV_MESSAGE,
+ HDCP_WKUP_CMD_STATUS_SUCCESS,
+ HDCP_WKUP_CMD_STATUS_FAILED,
+ HDCP_WKUP_CMD_LINK_POLL,
+ HDCP_WKUP_CMD_AUTHENTICATE
+};
+
+/**
+ * struct hdcp_lib_wakeup_data - command and data send to HDCP driver
+ * @cmd: command type
+ * @context: void pointer to the HDCP driver instance
+ * @recvd_msg_buf: message received from the sink
+ * @recvd_msg_len: length of message received from the sink
+ * @timeout: time out value for timed transactions
+ */
+struct hdcp_lib_wakeup_data {
+ enum hdcp_lib_wakeup_cmd cmd;
+ void *context;
+ char *recvd_msg_buf;
+ uint32_t recvd_msg_len;
+ uint32_t timeout;
+};
+
+/**
+ * struct hdcp_msg_part - a single part of an HDCP 2.2 message
+ * @name: user readable message name
+ * @offset: message part offset
+ * @length message part length
+ */
+struct hdcp_msg_part {
+ char *name;
+ uint32_t offset;
+ uint32_t length;
+};
+
+/**
+ * struct hdcp_msg_data - a full HDCP 2.2 message containing one or more parts
+ * @num_messages: total number of parts in a full message
+ * @messages: array containing num_messages parts
+ * @rx_status: value of rx_status register
+ */
+struct hdcp_msg_data {
+ uint32_t num_messages;
+ struct hdcp_msg_part messages[HDCP_MAX_MESSAGE_PARTS];
+ uint8_t rx_status;
+};
+
+/**
+ * struct hdcp_wakeup_data - command and data sent to display transport layer
+ * @cmd: command type
+ * @context: void pointer to the display transport layer
+ * @send_msg_buf: buffer containing message to be sent to sink
+ * @send_msg_len: length of the message to be sent to sink
+ * @timeout: timeout value for timed transactions
+ * @abort_mask: mask used to determine whether HDCP link is valid
+ * @message_data: a pointer to the message description
+ */
+struct hdcp_wakeup_data {
+ enum hdcp_wakeup_cmd cmd;
+ void *context;
+ char *send_msg_buf;
+ uint32_t send_msg_len;
+ uint32_t timeout;
+ uint8_t abort_mask;
+ const struct hdcp_msg_data *message_data;
+};
+
+static inline char *hdcp_cmd_to_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case HDCP_WKUP_CMD_SEND_MESSAGE:
+ return "HDCP_WKUP_CMD_SEND_MESSAGE";
+ case HDCP_WKUP_CMD_RECV_MESSAGE:
+ return "HDCP_WKUP_CMD_RECV_MESSAGE";
+ case HDCP_WKUP_CMD_STATUS_SUCCESS:
+ return "HDCP_WKUP_CMD_STATUS_SUCCESS";
+ case HDCP_WKUP_CMD_STATUS_FAILED:
+ return "HDCP_WKUP_CMD_STATUS_FAIL";
+ case HDCP_WKUP_CMD_LINK_POLL:
+ return "HDCP_WKUP_CMD_LINK_POLL";
+ case HDCP_WKUP_CMD_AUTHENTICATE:
+ return "HDCP_WKUP_CMD_AUTHENTICATE";
+ default:
+ return "???";
+ }
+}
+
+static inline char *hdcp_lib_cmd_to_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case HDCP_LIB_WKUP_CMD_START:
+ return "HDCP_LIB_WKUP_CMD_START";
+ case HDCP_LIB_WKUP_CMD_STOP:
+ return "HDCP_LIB_WKUP_CMD_STOP";
+ case HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS:
+ return "HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS";
+ case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED:
+ return "HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED";
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS:
+ return "HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS";
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED:
+ return "HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED";
+ case HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT:
+ return "HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT";
+ case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE:
+ return "HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE";
+ case HDCP_LIB_WKUP_CMD_LINK_FAILED:
+ return "HDCP_LIB_WKUP_CMD_LINK_FAILED";
+ default:
+ return "???";
+ }
+}
+
+/**
+ * struct hdcp_txmtr_ops - interface to HDCP Driver
+ * @wakeup: wake the HDCP driver with a new command
+ * @feature_supported: checks for HDCP support on the target device
+ */
+struct hdcp_txmtr_ops {
+ int (*wakeup)(struct hdcp_lib_wakeup_data *data);
+ bool (*feature_supported)(void *phdcpcontext);
+};
+
+/**
+ * struct hdcp_client_ops - call back functions to display transport layer
+ * @wakeup: wake up display transport layer with a new command
+ * @notify_lvl_change notify of encryption level changes
+ */
+struct hdcp_client_ops {
+ int (*wakeup)(struct hdcp_wakeup_data *data);
+ void (*notify_lvl_change)(void *client_ctx, int min_lvl);
+};
+
+/**
+ * enum hdcp_device_type - display interface types
+ * @HDCP_TXMTR_HDMI: HDMI interface
+ * @HDCP_TXMTR_DP: DisplayPort interface
+ */
+enum hdcp_device_type {
+ HDCP_TXMTR_HDMI = 0x8001,
+ HDCP_TXMTR_DP = 0x8002
+};
+
+/**
+ * struct hdcp_register_data - data used in HDCP driver clients' registration
+ * @client_ops: call back functions from the client
+ * @txmtr_ops: HDCP driver interface
+ * @device_type: display interface type of the client
+ * @client_ctx: void pointer to client data object
+ * @hdcp_ctx: void pointer to HDCP driver reference for client use
+ */
+struct hdcp_register_data {
+ struct hdcp_client_ops *client_ops;
+ struct hdcp_txmtr_ops *txmtr_ops;
+ enum hdcp_device_type device_type;
+ void *client_ctx;
+ void **hdcp_ctx;
+};
+
+int hdcp_library_register(struct hdcp_register_data *data);
+void hdcp_library_deregister(void *phdcpcontext);
+bool hdcp1_check_if_supported_load_app(void);
+int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb);
+int hdcp1_set_enc(bool enable);
+void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp);
+void hdcp1_notify_topology(void);
+#endif /* __HDCP_QSEECOM_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4c70716..61aff32 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -187,9 +187,24 @@
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a80516f..aa2b4e4 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1576,6 +1576,9 @@
#define WLAN_AUTH_SHARED_KEY 1
#define WLAN_AUTH_FT 2
#define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_FILS_SK 4
+#define WLAN_AUTH_FILS_SK_PFS 5
+#define WLAN_AUTH_FILS_PK 6
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1715,6 +1718,9 @@
WLAN_STATUS_REJECT_DSE_BAND = 96,
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+ /* 802.11ai */
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
};
@@ -2073,6 +2079,15 @@
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define FILS_NONCE_LEN 16
+#define FILS_MAX_KEK_LEN 64
+
+#define FILS_ERP_MAX_USERNAME_LEN 16
+#define FILS_ERP_MAX_REALM_LEN 253
+#define FILS_ERP_MAX_RRK_LEN 64
+
+#define PMK_MAX_LEN 48
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2296,31 +2311,37 @@
};
-/* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02
-/* reserved: 0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
+#define SUITE(oui, id) (((oui) << 8) | (id))
-#define WLAN_CIPHER_SUITE_SMS4 0x00147201
+/* cipher suite selectors */
+#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2)
+/* reserved: SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
+
+#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
/* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X 0x000FAC01
-#define WLAN_AKM_SUITE_PSK 0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06
-#define WLAN_AKM_SUITE_TDLS 0x000FAC07
-#define WLAN_AKM_SUITE_SAE 0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09
+#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
#define WLAN_MAX_KEY_LEN 32
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7bdddb3..0b8aedf 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -141,6 +141,7 @@
DOMAIN_ATTR_EARLY_MAP,
DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ DOMAIN_ATTR_CB_STALL_DISABLE,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 0668534..6e664f6 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -40,6 +40,14 @@
};
/**
+* enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point
+*/
+enum ipa_ipv6ct_en_type {
+ IPA_BYPASS_IPV6CT,
+ IPA_ENABLE_IPV6CT,
+};
+
+/**
* enum ipa_mode_type - mode setting type in IPA end-point
* @BASIC: basic mode
* @ENABLE_FRAMING_HDLC: not currently supported
@@ -119,6 +127,19 @@
};
/**
+ * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in
+ * IPA end-point
+ * @conn_track_en: Defines speculative conn_track action, means if specific
+ * pipe needs to have UL/DL IPv6 Connection Tracking or Bybass
+ * IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking
+ * 1: IPv6 UL/DL Connection Tracking.
+ * Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_conn_track {
+ enum ipa_ipv6ct_en_type conn_track_en;
+};
+
+/**
* struct ipa_ep_cfg_hdr - header configuration in IPA end-point
*
* @hdr_len:Header length in bytes to be added/removed. Assuming
@@ -387,6 +408,7 @@
/**
* struct ipa_ep_cfg - configuration of IPA end-point
* @nat: NAT parmeters
+ * @conn_track: IPv6CT parmeters
* @hdr: Header parameters
* @hdr_ext: Extended header parameters
* @mode: Mode parameters
@@ -400,6 +422,7 @@
*/
struct ipa_ep_cfg {
struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_conn_track conn_track;
struct ipa_ep_cfg_hdr hdr;
struct ipa_ep_cfg_hdr_ext hdr_ext;
struct ipa_ep_cfg_mode mode;
@@ -1172,6 +1195,9 @@
int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+int ipa_cfg_ep_conn_track(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ep_conn_track);
+
int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
@@ -1558,6 +1584,12 @@
return -EPERM;
}
+static inline int ipa_cfg_ep_conn_track(u32 clnt_hdl,
+ const struct ipa_ep_cfg_conn_track *ep_conn_track)
+{
+ return -EPERM
+}
+
static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
{
diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h
index 5d30a97..e7f75b7 100644
--- a/include/linux/ipa_odu_bridge.h
+++ b/include/linux/ipa_odu_bridge.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,7 +39,85 @@
u32 ipa_desc_size;
};
-#if defined CONFIG_IPA || defined CONFIG_IPA3
+/**
+ * struct ipa_bridge_init_params - parameters for IPA bridge initialization API
+ *
+ * @info: structure contains initialization information
+ * @wakeup_request: callback to client to indicate there is downlink data
+ * available. Client is expected to call ipa_bridge_resume() to start
+ * receiving data
+ */
+struct ipa_bridge_init_params {
+ struct odu_bridge_params info;
+ void (*wakeup_request)(void *);
+};
+
+#ifdef CONFIG_IPA3
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl);
+
+int ipa_bridge_connect(u32 hdl);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth);
+
+int ipa_bridge_disconnect(u32 hdl);
+
+int ipa_bridge_suspend(u32 hdl);
+
+int ipa_bridge_resume(u32 hdl);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+int ipa_bridge_cleanup(u32 hdl);
+
+#else
+
+static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_connect(u32 hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_disconnect(u32 hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_suspend(u32 hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_resume(u32 hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+static inline int ipa_bridge_cleanup(u32 hdl)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+/* Below API is deprecated. Please use the API above */
+# if defined CONFIG_IPA || defined CONFIG_IPA3
int odu_bridge_init(struct odu_bridge_params *params);
diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h
index 0277e87..85d0ce9 100644
--- a/include/linux/ipa_uc_offload.h
+++ b/include/linux/ipa_uc_offload.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -163,6 +163,20 @@
u32 max_supported_bw_mbps;
};
+/**
+ * struct ipa_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify: callback
+ * @proto: uC offload protocol type
+ */
+struct ipa_uc_ready_params {
+ bool is_uC_ready;
+ void *priv;
+ ipa_uc_ready_cb notify;
+ enum ipa_uc_offload_proto proto;
+};
+
#if defined CONFIG_IPA || defined CONFIG_IPA3
/**
@@ -223,6 +237,19 @@
*/
int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param);
+
+/*
+ * To de-register uC ready callback
+ */
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto);
+
#else /* (CONFIG_IPA || CONFIG_IPA3) */
static inline int ipa_uc_offload_reg_intf(
@@ -254,6 +281,15 @@
return -EPERM;
}
+static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param)
+{
+ return -EPERM;
+}
+
+static void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+}
+
#endif /* CONFIG_IPA3 */
#endif /* _IPA_UC_OFFLOAD_H_ */
diff --git a/include/linux/ipc_router.h b/include/linux/ipc_router.h
index 8adf723..c18290f 100644
--- a/include/linux/ipc_router.h
+++ b/include/linux/ipc_router.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -144,6 +144,7 @@
uint32_t num_rx;
unsigned long num_tx_bytes;
unsigned long num_rx_bytes;
+ uint32_t last_served_svc_id;
void *priv;
};
@@ -269,6 +270,14 @@
*/
int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb);
+/**
+ * msm_ipc_router_set_ws_allowed() - To Enable/disable the wakeup source allowed
+ * flag
+ * @flag: Flag to set/clear the wakeup soruce allowed
+ *
+ */
+void msm_ipc_router_set_ws_allowed(bool flag);
+
#else
struct msm_ipc_port *msm_ipc_router_create_port(
@@ -341,6 +350,8 @@
return -ENODEV;
}
+void msm_ipc_router_set_ws_allowed(bool flag) { }
+
#endif
#endif
diff --git a/include/linux/ipc_router_xprt.h b/include/linux/ipc_router_xprt.h
index e33a10a..c4b7854 100644
--- a/include/linux/ipc_router_xprt.h
+++ b/include/linux/ipc_router_xprt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@
* @pkt_fragment_q: Queue of SKBs containing payload.
* @length: Length of data in the chain of SKBs
* @ref: Reference count for the packet.
+ * @ws_need: Flag to check wakeup soruce need
*/
struct rr_packet {
struct list_head list;
@@ -108,6 +109,7 @@
struct sk_buff_head *pkt_fragment_q;
uint32_t length;
struct kref ref;
+ bool ws_need;
};
/**
@@ -125,6 +127,7 @@
* @close: Method to close the XPRT.
* @sft_close_done: Method to indicate to the XPRT that handling of reset
* event is complete.
+ * @get_ws_info: Method to get the wakeup soruce inforamtion of the XPRT
*/
struct msm_ipc_router_xprt {
char *name;
@@ -143,6 +146,7 @@
struct msm_ipc_router_xprt *xprt);
int (*close)(struct msm_ipc_router_xprt *xprt);
void (*sft_close_done)(struct msm_ipc_router_xprt *xprt);
+ bool (*get_ws_info)(struct msm_ipc_router_xprt *xprt);
};
void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6..b01d294 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,6 +1,7 @@
#ifndef _LIBFDT_ENV_H
#define _LIBFDT_ENV_H
+#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/byteorder.h>
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cb0ba9f..fa7fd03 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
#endif
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
diff --git a/include/linux/log2.h b/include/linux/log2.h
index f38fae2..c373295f 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -194,6 +194,17 @@
* ... and so on.
*/
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+#define order_base_2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) ? 0 : \
+ ilog2((n) - 1) + 1) : \
+ __order_base_2(n) \
+)
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 558adfa..8f5af30 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1921,6 +1921,13 @@
}
#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+/* Currently required to handle SELinux runtime hook disable. */
+#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
+#define __lsm_ro_after_init
+#else
+#define __lsm_ro_after_init __ro_after_init
+#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
+
extern int __init security_module_enable(const char *module);
extern void __init capability_add_hooks(void);
#ifdef CONFIG_SECURITY_YAMA
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 78f01ea..86a2dc6 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -49,5 +49,6 @@
bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
bool mbox_controller_is_idle(struct mbox_chan *chan); /* atomic */
+void mbox_chan_debug(struct mbox_chan *chan);
#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 30a4ed2..7827c68 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -49,6 +49,8 @@
* Used only if txdone_poll:=true && txdone_irq:=false
* @peek_data: Atomic check for any received data. Return true if controller
* has some data to push to the client. False otherwise.
+ * @debug: Allow chan to be debugged when the client detects a channel is
+ * locked up.
*/
struct mbox_chan_ops {
int (*send_data)(struct mbox_chan *chan, void *data);
@@ -90,6 +92,7 @@
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
bool (*is_idle)(struct mbox_controller *mbox);
+ void (*debug)(struct mbox_chan *chan);
/* Internal to API */
struct hrtimer poll_hrt;
struct list_head node;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d6ebc01..37e5178 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -415,6 +415,11 @@
for (idx = 0, rgn = &memblock_type->regions[0]; \
idx < memblock_type->cnt; \
idx++, rgn = &memblock_type->regions[idx])
+#define for_each_memblock_rev(memblock_type, region) \
+ for (region = memblock.memblock_type.regions + \
+ memblock.memblock_type.cnt - 1; \
+ region >= memblock.memblock_type.regions; \
+ region--)
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
diff --git a/include/linux/mfd/msm-cdc-pinctrl.h b/include/linux/mfd/msm-cdc-pinctrl.h
index 14b18fe..7eabefb 100644
--- a/include/linux/mfd/msm-cdc-pinctrl.h
+++ b/include/linux/mfd/msm-cdc-pinctrl.h
@@ -16,11 +16,13 @@
#include <linux/types.h>
#include <linux/of.h>
-#ifdef CONFIG_MSM_CDC_PINCTRL
+#if IS_ENABLED(CONFIG_MSM_CDC_PINCTRL)
extern int msm_cdc_pinctrl_select_sleep_state(struct device_node *np);
extern int msm_cdc_pinctrl_select_active_state(struct device_node *np);
extern bool msm_cdc_pinctrl_get_state(struct device_node *np);
extern int msm_cdc_get_gpio_state(struct device_node *np);
+int msm_cdc_pinctrl_drv_init(void);
+void msm_cdc_pinctrl_drv_exit(void);
#else
int msm_cdc_pinctrl_select_sleep_state(struct device_node *np)
@@ -35,7 +37,13 @@
{
return 0;
}
-#
+int msm_cdc_pinctrl_drv_init(void)
+{
+ return 0;
+}
+void msm_cdc_pinctrl_drv_exit(void)
+{
+}
#endif
#endif
diff --git a/include/linux/mfd/wcd9335/irq.h b/include/linux/mfd/wcd9335/irq.h
new file mode 100644
index 0000000..c666d31
--- /dev/null
+++ b/include/linux/mfd/wcd9335/irq.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD9335_IRQ_H_
+#define __WCD9335_IRQ_H_
+
+enum {
+ /* INTR_REG 0 */
+ WCD9335_IRQ_FLL_LOCK_LOSS = 1,
+ WCD9335_IRQ_HPH_PA_OCPL_FAULT,
+ WCD9335_IRQ_HPH_PA_OCPR_FAULT,
+ WCD9335_IRQ_EAR_PA_OCP_FAULT,
+ WCD9335_IRQ_HPH_PA_CNPL_COMPLETE,
+ WCD9335_IRQ_HPH_PA_CNPR_COMPLETE,
+ WCD9335_IRQ_EAR_PA_CNP_COMPLETE,
+ /* INTR_REG 1 */
+ WCD9335_IRQ_MBHC_SW_DET,
+ WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
+ WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
+ WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
+ WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+ WCD9335_IRQ_RESERVED_0,
+ WCD9335_IRQ_RESERVED_1,
+ WCD9335_IRQ_RESERVED_2,
+ /* INTR_REG 2 */
+ WCD9335_IRQ_LINE_PA1_CNP_COMPLETE,
+ WCD9335_IRQ_LINE_PA2_CNP_COMPLETE,
+ WCD9335_IRQ_LINE_PA3_CNP_COMPLETE,
+ WCD9335_IRQ_LINE_PA4_CNP_COMPLETE,
+ WCD9335_IRQ_SOUNDWIRE,
+ WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE,
+ WCD9335_IRQ_RCO_ERROR,
+ WCD9335_IRQ_SVA_ERROR,
+ /* INTR_REG 3 */
+ WCD9335_IRQ_MAD_AUDIO,
+ WCD9335_IRQ_MAD_BEACON,
+ WCD9335_IRQ_MAD_ULTRASOUND,
+ WCD9335_IRQ_VBAT_ATTACK,
+ WCD9335_IRQ_VBAT_RESTORE,
+ WCD9335_IRQ_SVA_OUTBOX1,
+ WCD9335_IRQ_SVA_OUTBOX2,
+ WCD9335_NUM_IRQS,
+};
+
+#endif
diff --git a/include/linux/mfd/wcd934x/irq.h b/include/linux/mfd/wcd934x/irq.h
new file mode 100644
index 0000000..1a18be3
--- /dev/null
+++ b/include/linux/mfd/wcd934x/irq.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WCD934X_IRQ_H_
+#define __WCD934X_IRQ_H_
+
+enum {
+ /* INTR_REG 0 */
+ WCD934X_IRQ_MISC = 1,
+ WCD934X_IRQ_HPH_PA_OCPL_FAULT,
+ WCD934X_IRQ_HPH_PA_OCPR_FAULT,
+ WCD934X_IRQ_EAR_PA_OCP_FAULT,
+ WCD934X_IRQ_HPH_PA_CNPL_COMPLETE,
+ WCD934X_IRQ_HPH_PA_CNPR_COMPLETE,
+ WCD934X_IRQ_EAR_PA_CNP_COMPLETE,
+ /* INTR_REG 1 */
+ WCD934X_IRQ_MBHC_SW_DET,
+ WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
+ WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
+ WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
+ WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
+ WCD934X_IRQ_RESERVED_0,
+ WCD934X_IRQ_RESERVED_1,
+ WCD934X_IRQ_RESERVED_2,
+ /* INTR_REG 2 */
+ WCD934X_IRQ_LINE_PA1_CNP_COMPLETE,
+ WCD934X_IRQ_LINE_PA2_CNP_COMPLETE,
+ WCD934X_IRQ_SLNQ_ANALOG_ERROR,
+ WCD934X_IRQ_RESERVED_3,
+ WCD934X_IRQ_SOUNDWIRE,
+ WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE,
+ WCD934X_IRQ_RCO_ERROR,
+ WCD934X_IRQ_CPE_ERROR,
+ /* INTR_REG 3 */
+ WCD934X_IRQ_MAD_AUDIO,
+ WCD934X_IRQ_MAD_BEACON,
+ WCD934X_IRQ_MAD_ULTRASOUND,
+ WCD934X_IRQ_VBAT_ATTACK,
+ WCD934X_IRQ_VBAT_RESTORE,
+ WCD934X_IRQ_CPE1_INTR,
+ WCD934X_IRQ_RESERVED_4,
+ WCD934X_IRQ_SLNQ_DIGITAL,
+ WCD934X_NUM_IRQS,
+};
+
+#endif
diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h
index c6c8d24..b994010 100644
--- a/include/linux/mfd/wcd9xxx/core.h
+++ b/include/linux/mfd/wcd9xxx/core.h
@@ -350,7 +350,7 @@
int (*post_reset)(struct wcd9xxx *wcd9xxx);
void *ssr_priv;
- bool dev_up;
+ unsigned long dev_up;
u32 num_of_supplies;
struct regulator_bulk_data *supplies;
@@ -434,4 +434,7 @@
{
return 0;
}
+
+int wcd9xxx_init(void);
+void wcd9xxx_exit(void);
#endif
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index f188e85..cfe4724 100644
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -179,6 +179,8 @@
int irq_base;
int num_irqs;
int reset_gpio;
+ bool has_buck_vsel_gpio;
+ struct device_node *buck_vsel_ctl_np;
struct device_node *wcd_rst_np;
struct wcd9xxx_amic amic_settings;
struct slim_device slimbus_slave_device;
diff --git a/include/linux/mfd/wcd9xxx/wcd9330_registers.h b/include/linux/mfd/wcd9xxx/wcd9330_registers.h
deleted file mode 100644
index c37d25f..0000000
--- a/include/linux/mfd/wcd9xxx/wcd9330_registers.h
+++ /dev/null
@@ -1,1626 +0,0 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef WCD9330_REGISTERS_H
-#define WCD9330_REGISTERS_H
-
-#include <linux/types.h>
-
-#define TOMTOM_A_CHIP_CTL (0x000)
-#define TOMTOM_A_CHIP_CTL__POR (0x38)
-#define TOMTOM_A_CHIP_STATUS (0x001)
-#define TOMTOM_A_CHIP_STATUS__POR (0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_0 (0x004)
-#define TOMTOM_A_CHIP_ID_BYTE_0__POR (0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_1 (0x005)
-#define TOMTOM_A_CHIP_ID_BYTE_1__POR (0x00)
-#define TOMTOM_A_CHIP_ID_BYTE_2 (0x006)
-#define TOMTOM_A_CHIP_ID_BYTE_2__POR (0x05)
-#define TOMTOM_A_CHIP_ID_BYTE_3 (0x007)
-#define TOMTOM_A_CHIP_ID_BYTE_3__POR (0x01)
-#define TOMTOM_A_CHIP_I2C_SLAVE_ID (0x008)
-#define TOMTOM_A_CHIP_I2C_SLAVE_ID__POR (0x01)
-#define TOMTOM_A_SLAVE_ID_1 (0x00C)
-#define TOMTOM_A_SLAVE_ID_1__POR (0x77)
-#define TOMTOM_A_SLAVE_ID_2 (0x00D)
-#define TOMTOM_A_SLAVE_ID_2__POR (0x66)
-#define TOMTOM_A_SLAVE_ID_3 (0x00E)
-#define TOMTOM_A_SLAVE_ID_3__POR (0x55)
-#define TOMTOM_A_PIN_CTL_OE0 (0x010)
-#define TOMTOM_A_PIN_CTL_OE0__POR (0x00)
-#define TOMTOM_A_PIN_CTL_OE1 (0x011)
-#define TOMTOM_A_PIN_CTL_OE1__POR (0x00)
-#define TOMTOM_A_PIN_CTL_OE2 (0x012)
-#define TOMTOM_A_PIN_CTL_OE2__POR (0x00)
-#define TOMTOM_A_PIN_CTL_DATA0 (0x013)
-#define TOMTOM_A_PIN_CTL_DATA0__POR (0x00)
-#define TOMTOM_A_PIN_CTL_DATA1 (0x014)
-#define TOMTOM_A_PIN_CTL_DATA1__POR (0x00)
-#define TOMTOM_A_PIN_CTL_DATA2 (0x015)
-#define TOMTOM_A_PIN_CTL_DATA2__POR (0x00)
-#define TOMTOM_A_HDRIVE_GENERIC (0x018)
-#define TOMTOM_A_HDRIVE_GENERIC__POR (0x00)
-#define TOMTOM_A_HDRIVE_OVERRIDE (0x019)
-#define TOMTOM_A_HDRIVE_OVERRIDE__POR (0x08)
-#define TOMTOM_A_ANA_CSR_WAIT_STATE (0x01C)
-#define TOMTOM_A_ANA_CSR_WAIT_STATE__POR (0x44)
-#define TOMTOM_A_PROCESS_MONITOR_CTL0 (0x020)
-#define TOMTOM_A_PROCESS_MONITOR_CTL0__POR (0x80)
-#define TOMTOM_A_PROCESS_MONITOR_CTL1 (0x021)
-#define TOMTOM_A_PROCESS_MONITOR_CTL1__POR (0x00)
-#define TOMTOM_A_PROCESS_MONITOR_CTL2 (0x022)
-#define TOMTOM_A_PROCESS_MONITOR_CTL2__POR (0x00)
-#define TOMTOM_A_PROCESS_MONITOR_CTL3 (0x023)
-#define TOMTOM_A_PROCESS_MONITOR_CTL3__POR (0x01)
-#define TOMTOM_A_QFUSE_CTL (0x028)
-#define TOMTOM_A_QFUSE_CTL__POR (0x00)
-#define TOMTOM_A_QFUSE_STATUS (0x029)
-#define TOMTOM_A_QFUSE_STATUS__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT0 (0x02A)
-#define TOMTOM_A_QFUSE_DATA_OUT0__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT1 (0x02B)
-#define TOMTOM_A_QFUSE_DATA_OUT1__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT2 (0x02C)
-#define TOMTOM_A_QFUSE_DATA_OUT2__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT3 (0x02D)
-#define TOMTOM_A_QFUSE_DATA_OUT3__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT4 (0x02E)
-#define TOMTOM_A_QFUSE_DATA_OUT4__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT5 (0x02F)
-#define TOMTOM_A_QFUSE_DATA_OUT5__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT6 (0x030)
-#define TOMTOM_A_QFUSE_DATA_OUT6__POR (0x00)
-#define TOMTOM_A_QFUSE_DATA_OUT7 (0x031)
-#define TOMTOM_A_QFUSE_DATA_OUT7__POR (0x00)
-#define TOMTOM_A_CDC_CTL (0x034)
-#define TOMTOM_A_CDC_CTL__POR (0x00)
-#define TOMTOM_A_LEAKAGE_CTL (0x03C)
-#define TOMTOM_A_LEAKAGE_CTL__POR (0x04)
-#define TOMTOM_A_SVASS_MEM_PTR0 (0x044)
-#define TOMTOM_A_SVASS_MEM_PTR0__POR (0x00)
-#define TOMTOM_A_SVASS_MEM_PTR1 (0x045)
-#define TOMTOM_A_SVASS_MEM_PTR1__POR (0x00)
-#define TOMTOM_A_SVASS_MEM_PTR2 (0x046)
-#define TOMTOM_A_SVASS_MEM_PTR2__POR (0x00)
-#define TOMTOM_A_SVASS_MEM_CTL (0x048)
-#define TOMTOM_A_SVASS_MEM_CTL__POR (0x04)
-#define TOMTOM_A_SVASS_MEM_BANK (0x049)
-#define TOMTOM_A_SVASS_MEM_BANK__POR (0x00)
-#define TOMTOM_A_DMIC_B1_CTL (0x04A)
-#define TOMTOM_A_DMIC_B1_CTL__POR (0x00)
-#define TOMTOM_A_DMIC_B2_CTL (0x04B)
-#define TOMTOM_A_DMIC_B2_CTL__POR (0x00)
-#define TOMTOM_A_SVASS_CLKRST_CTL (0x04C)
-#define TOMTOM_A_SVASS_CLKRST_CTL__POR (0x00)
-#define TOMTOM_A_SVASS_CPAR_CFG (0x04D)
-#define TOMTOM_A_SVASS_CPAR_CFG__POR (0x00)
-#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD (0x04E)
-#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR (0x14)
-#define TOMTOM_A_SVASS_CPAR_WDOG_CFG (0x04F)
-#define TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR (0x00)
-#define TOMTOM_A_SVASS_CFG (0x050)
-#define TOMTOM_A_SVASS_CFG__POR (0x01)
-#define TOMTOM_A_SVASS_SPE_CFG (0x051)
-#define TOMTOM_A_SVASS_SPE_CFG__POR (0x04)
-#define TOMTOM_A_SVASS_STATUS (0x052)
-#define TOMTOM_A_SVASS_STATUS__POR (0x00)
-#define TOMTOM_A_SVASS_INT_MASK (0x053)
-#define TOMTOM_A_SVASS_INT_MASK__POR (0x3F)
-#define TOMTOM_A_SVASS_INT_STATUS (0x054)
-#define TOMTOM_A_SVASS_INT_STATUS__POR (0x00)
-#define TOMTOM_A_SVASS_INT_CLR (0x055)
-#define TOMTOM_A_SVASS_INT_CLR__POR (0x00)
-#define TOMTOM_A_SVASS_DEBUG (0x056)
-#define TOMTOM_A_SVASS_DEBUG__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_BKUP_INT (0x057)
-#define TOMTOM_A_SVASS_SPE_BKUP_INT__POR (0x00)
-#define TOMTOM_A_SVASS_MEM_ACC (0x058)
-#define TOMTOM_A_SVASS_MEM_ACC__POR (0x00)
-#define TOMTOM_A_MEM_LEAKAGE_CTL (0x059)
-#define TOMTOM_A_MEM_LEAKAGE_CTL__POR (0x04)
-#define TOMTOM_A_SVASS_SPE_INBOX_TRG (0x05A)
-#define TOMTOM_A_SVASS_SPE_INBOX_TRG__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_0 (0x060)
-#define TOMTOM_A_SVASS_SPE_INBOX_0__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_1 (0x061)
-#define TOMTOM_A_SVASS_SPE_INBOX_1__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_2 (0x062)
-#define TOMTOM_A_SVASS_SPE_INBOX_2__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_3 (0x063)
-#define TOMTOM_A_SVASS_SPE_INBOX_3__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_4 (0x064)
-#define TOMTOM_A_SVASS_SPE_INBOX_4__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_5 (0x065)
-#define TOMTOM_A_SVASS_SPE_INBOX_5__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_6 (0x066)
-#define TOMTOM_A_SVASS_SPE_INBOX_6__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_7 (0x067)
-#define TOMTOM_A_SVASS_SPE_INBOX_7__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_8 (0x068)
-#define TOMTOM_A_SVASS_SPE_INBOX_8__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_9 (0x069)
-#define TOMTOM_A_SVASS_SPE_INBOX_9__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_10 (0x06A)
-#define TOMTOM_A_SVASS_SPE_INBOX_10__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_INBOX_11 (0x06B)
-#define TOMTOM_A_SVASS_SPE_INBOX_11__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_0 (0x070)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_0__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_1 (0x071)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_1__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_2 (0x072)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_2__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_3 (0x073)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_3__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_4 (0x074)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_4__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_5 (0x075)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_5__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_6 (0x076)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_6__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_7 (0x077)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_7__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_8 (0x078)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_8__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_9 (0x079)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_9__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_10 (0x07A)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_10__POR (0x00)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_11 (0x07B)
-#define TOMTOM_A_SVASS_SPE_OUTBOX_11__POR (0x00)
-#define TOMTOM_A_INTR_MODE (0x090)
-#define TOMTOM_A_INTR_MODE__POR (0x00)
-#define TOMTOM_A_INTR1_MASK0 (0x094)
-#define TOMTOM_A_INTR1_MASK0__POR (0xFF)
-#define TOMTOM_A_INTR1_MASK1 (0x095)
-#define TOMTOM_A_INTR1_MASK1__POR (0xFF)
-#define TOMTOM_A_INTR1_MASK2 (0x096)
-#define TOMTOM_A_INTR1_MASK2__POR (0xFF)
-#define TOMTOM_A_INTR1_MASK3 (0x097)
-#define TOMTOM_A_INTR1_MASK3__POR (0xFF)
-#define TOMTOM_A_INTR1_STATUS0 (0x098)
-#define TOMTOM_A_INTR1_STATUS0__POR (0x00)
-#define TOMTOM_A_INTR1_STATUS1 (0x099)
-#define TOMTOM_A_INTR1_STATUS1__POR (0x00)
-#define TOMTOM_A_INTR1_STATUS2 (0x09A)
-#define TOMTOM_A_INTR1_STATUS2__POR (0x00)
-#define TOMTOM_A_INTR1_STATUS3 (0x09B)
-#define TOMTOM_A_INTR1_STATUS3__POR (0x00)
-#define TOMTOM_A_INTR1_CLEAR0 (0x09C)
-#define TOMTOM_A_INTR1_CLEAR0__POR (0x00)
-#define TOMTOM_A_INTR1_CLEAR1 (0x09D)
-#define TOMTOM_A_INTR1_CLEAR1__POR (0x00)
-#define TOMTOM_A_INTR1_CLEAR2 (0x09E)
-#define TOMTOM_A_INTR1_CLEAR2__POR (0x00)
-#define TOMTOM_A_INTR1_CLEAR3 (0x09F)
-#define TOMTOM_A_INTR1_CLEAR3__POR (0x00)
-#define TOMTOM_A_INTR1_LEVEL0 (0x0A0)
-#define TOMTOM_A_INTR1_LEVEL0__POR (0x01)
-#define TOMTOM_A_INTR1_LEVEL1 (0x0A1)
-#define TOMTOM_A_INTR1_LEVEL1__POR (0x00)
-#define TOMTOM_A_INTR1_LEVEL2 (0x0A2)
-#define TOMTOM_A_INTR1_LEVEL2__POR (0x40)
-#define TOMTOM_A_INTR1_LEVEL3 (0x0A3)
-#define TOMTOM_A_INTR1_LEVEL3__POR (0x00)
-#define TOMTOM_A_INTR1_TEST0 (0x0A4)
-#define TOMTOM_A_INTR1_TEST0__POR (0x00)
-#define TOMTOM_A_INTR1_TEST1 (0x0A5)
-#define TOMTOM_A_INTR1_TEST1__POR (0x00)
-#define TOMTOM_A_INTR1_TEST2 (0x0A6)
-#define TOMTOM_A_INTR1_TEST2__POR (0x00)
-#define TOMTOM_A_INTR1_TEST3 (0x0A7)
-#define TOMTOM_A_INTR1_TEST3__POR (0x00)
-#define TOMTOM_A_INTR1_SET0 (0x0A8)
-#define TOMTOM_A_INTR1_SET0__POR (0x00)
-#define TOMTOM_A_INTR1_SET1 (0x0A9)
-#define TOMTOM_A_INTR1_SET1__POR (0x00)
-#define TOMTOM_A_INTR1_SET2 (0x0AA)
-#define TOMTOM_A_INTR1_SET2__POR (0x00)
-#define TOMTOM_A_INTR1_SET3 (0x0AB)
-#define TOMTOM_A_INTR1_SET3__POR (0x00)
-#define TOMTOM_A_INTR2_MASK0 (0x0B0)
-#define TOMTOM_A_INTR2_MASK0__POR (0xFF)
-#define TOMTOM_A_INTR2_STATUS0 (0x0B2)
-#define TOMTOM_A_INTR2_STATUS0__POR (0x00)
-#define TOMTOM_A_INTR2_CLEAR0 (0x0B4)
-#define TOMTOM_A_INTR2_CLEAR0__POR (0x00)
-#define TOMTOM_A_INTR2_LEVEL0 (0x0B6)
-#define TOMTOM_A_INTR2_LEVEL0__POR (0x00)
-#define TOMTOM_A_INTR2_TEST0 (0x0B8)
-#define TOMTOM_A_INTR2_TEST0__POR (0x00)
-#define TOMTOM_A_INTR2_SET0 (0x0BA)
-#define TOMTOM_A_INTR2_SET0__POR (0x00)
-#define TOMTOM_A_CDC_TX_I2S_SCK_MODE (0x0C0)
-#define TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR (0x00)
-#define TOMTOM_A_CDC_TX_I2S_WS_MODE (0x0C1)
-#define TOMTOM_A_CDC_TX_I2S_WS_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_DATA0_MODE (0x0C4)
-#define TOMTOM_A_CDC_DMIC_DATA0_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_CLK0_MODE (0x0C5)
-#define TOMTOM_A_CDC_DMIC_CLK0_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_DATA1_MODE (0x0C6)
-#define TOMTOM_A_CDC_DMIC_DATA1_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_CLK1_MODE (0x0C7)
-#define TOMTOM_A_CDC_DMIC_CLK1_MODE__POR (0x00)
-#define TOMTOM_A_CDC_RX_I2S_SCK_MODE (0x0C8)
-#define TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR (0x00)
-#define TOMTOM_A_CDC_RX_I2S_WS_MODE (0x0C9)
-#define TOMTOM_A_CDC_RX_I2S_WS_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_DATA2_MODE (0x0CA)
-#define TOMTOM_A_CDC_DMIC_DATA2_MODE__POR (0x00)
-#define TOMTOM_A_CDC_DMIC_CLK2_MODE (0x0CB)
-#define TOMTOM_A_CDC_DMIC_CLK2_MODE__POR (0x00)
-#define TOMTOM_A_CDC_INTR1_MODE (0x0CC)
-#define TOMTOM_A_CDC_INTR1_MODE__POR (0x00)
-#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE (0x0CD)
-#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR (0x00)
-#define TOMTOM_A_CDC_INTR2_MODE (0x0CE)
-#define TOMTOM_A_CDC_INTR2_MODE__POR (0x00)
-#define TOMTOM_A_CDC_RF_PA_ON_MODE (0x0CF)
-#define TOMTOM_A_CDC_RF_PA_ON_MODE__POR (0x00)
-#define TOMTOM_A_CDC_BOOST_MODE (0x0D0)
-#define TOMTOM_A_CDC_BOOST_MODE__POR (0x00)
-#define TOMTOM_A_CDC_JTCK_MODE (0x0D1)
-#define TOMTOM_A_CDC_JTCK_MODE__POR (0x00)
-#define TOMTOM_A_CDC_JTDI_MODE (0x0D2)
-#define TOMTOM_A_CDC_JTDI_MODE__POR (0x00)
-#define TOMTOM_A_CDC_JTMS_MODE (0x0D3)
-#define TOMTOM_A_CDC_JTMS_MODE__POR (0x00)
-#define TOMTOM_A_CDC_JTDO_MODE (0x0D4)
-#define TOMTOM_A_CDC_JTDO_MODE__POR (0x00)
-#define TOMTOM_A_CDC_JTRST_MODE (0x0D5)
-#define TOMTOM_A_CDC_JTRST_MODE__POR (0x00)
-#define TOMTOM_A_CDC_BIST_MODE_MODE (0x0D6)
-#define TOMTOM_A_CDC_BIST_MODE_MODE__POR (0x00)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_1 (0x0E0)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR (0x00)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_2 (0x0E1)
-#define TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1 (0x0E2)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2 (0x0E3)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3 (0x0E4)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4 (0x0E5)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5 (0x0E6)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6 (0x0E7)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7 (0x0E8)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8 (0x0E9)
-#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR (0x0EA)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR (0x00)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL (0x0EB)
-#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR (0x40)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_1 (0x0EC)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_2 (0x0ED)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_3 (0x0EE)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_4 (0x0EF)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_5 (0x0F0)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_6 (0x0F1)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR (0x00)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_7 (0x0F2)
-#define TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_1 (0x0F3)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_2 (0x0F4)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_3 (0x0F5)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_4 (0x0F6)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_5 (0x0F7)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_6 (0x0F8)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_7 (0x0F9)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_8 (0x0FA)
-#define TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR (0x0FB)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR (0x00)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL (0x0FC)
-#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR (0x00)
-#define TOMTOM_A_CDC_MAD_INP_SEL (0x0FD)
-#define TOMTOM_A_CDC_MAD_INP_SEL__POR (0x00)
-#define TOMTOM_A_BIAS_REF_CTL (0x100)
-#define TOMTOM_A_BIAS_REF_CTL__POR (0x1C)
-#define TOMTOM_A_BIAS_CENTRAL_BG_CTL (0x101)
-#define TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR (0x50)
-#define TOMTOM_A_BIAS_PRECHRG_CTL (0x102)
-#define TOMTOM_A_BIAS_PRECHRG_CTL__POR (0x07)
-#define TOMTOM_A_BIAS_CURR_CTL_1 (0x103)
-#define TOMTOM_A_BIAS_CURR_CTL_1__POR (0x52)
-#define TOMTOM_A_BIAS_CURR_CTL_2 (0x104)
-#define TOMTOM_A_BIAS_CURR_CTL_2__POR (0x00)
-#define TOMTOM_A_BIAS_OSC_BG_CTL (0x105)
-#define TOMTOM_A_BIAS_OSC_BG_CTL__POR (0x36)
-#define TOMTOM_A_CLK_BUFF_EN1 (0x108)
-#define TOMTOM_A_CLK_BUFF_EN1__POR (0x04)
-#define TOMTOM_A_CLK_BUFF_EN2 (0x109)
-#define TOMTOM_A_CLK_BUFF_EN2__POR (0x02)
-#define TOMTOM_A_LDO_L_MODE_1 (0x10A)
-#define TOMTOM_A_LDO_L_MODE_1__POR (0x08)
-#define TOMTOM_A_LDO_L_MODE_2 (0x10B)
-#define TOMTOM_A_LDO_L_MODE_2__POR (0x50)
-#define TOMTOM_A_LDO_L_CTRL_1 (0x10C)
-#define TOMTOM_A_LDO_L_CTRL_1__POR (0x70)
-#define TOMTOM_A_LDO_L_CTRL_2 (0x10D)
-#define TOMTOM_A_LDO_L_CTRL_2__POR (0x55)
-#define TOMTOM_A_LDO_L_CTRL_3 (0x10E)
-#define TOMTOM_A_LDO_L_CTRL_3__POR (0x56)
-#define TOMTOM_A_LDO_L_CTRL_4 (0x10F)
-#define TOMTOM_A_LDO_L_CTRL_4__POR (0x55)
-#define TOMTOM_A_LDO_H_MODE_1 (0x110)
-#define TOMTOM_A_LDO_H_MODE_1__POR (0x65)
-#define TOMTOM_A_LDO_H_MODE_2 (0x111)
-#define TOMTOM_A_LDO_H_MODE_2__POR (0xA8)
-#define TOMTOM_A_LDO_H_LOOP_CTL (0x112)
-#define TOMTOM_A_LDO_H_LOOP_CTL__POR (0x6B)
-#define TOMTOM_A_LDO_H_COMP_1 (0x113)
-#define TOMTOM_A_LDO_H_COMP_1__POR (0x84)
-#define TOMTOM_A_LDO_H_COMP_2 (0x114)
-#define TOMTOM_A_LDO_H_COMP_2__POR (0xE0)
-#define TOMTOM_A_LDO_H_BIAS_1 (0x115)
-#define TOMTOM_A_LDO_H_BIAS_1__POR (0x6D)
-#define TOMTOM_A_LDO_H_BIAS_2 (0x116)
-#define TOMTOM_A_LDO_H_BIAS_2__POR (0xA5)
-#define TOMTOM_A_LDO_H_BIAS_3 (0x117)
-#define TOMTOM_A_LDO_H_BIAS_3__POR (0x60)
-#define TOMTOM_A_VBAT_CLK (0x118)
-#define TOMTOM_A_VBAT_CLK__POR (0x03)
-#define TOMTOM_A_VBAT_LOOP (0x119)
-#define TOMTOM_A_VBAT_LOOP__POR (0x02)
-#define TOMTOM_A_VBAT_REF (0x11A)
-#define TOMTOM_A_VBAT_REF__POR (0x20)
-#define TOMTOM_A_VBAT_ADC_TEST (0x11B)
-#define TOMTOM_A_VBAT_ADC_TEST__POR (0x00)
-#define TOMTOM_A_VBAT_FE (0x11C)
-#define TOMTOM_A_VBAT_FE__POR (0x48)
-#define TOMTOM_A_VBAT_BIAS_1 (0x11D)
-#define TOMTOM_A_VBAT_BIAS_1__POR (0x03)
-#define TOMTOM_A_VBAT_BIAS_2 (0x11E)
-#define TOMTOM_A_VBAT_BIAS_2__POR (0x00)
-#define TOMTOM_A_VBAT_ADC_DATA_MSB (0x11F)
-#define TOMTOM_A_VBAT_ADC_DATA_MSB__POR (0x00)
-#define TOMTOM_A_VBAT_ADC_DATA_LSB (0x120)
-#define TOMTOM_A_VBAT_ADC_DATA_LSB__POR (0x00)
-#define TOMTOM_A_FLL_NREF (0x121)
-#define TOMTOM_A_FLL_NREF__POR (0x12)
-#define TOMTOM_A_FLL_KDCO_TUNE (0x122)
-#define TOMTOM_A_FLL_KDCO_TUNE__POR (0x05)
-#define TOMTOM_A_FLL_LOCK_THRESH (0x123)
-#define TOMTOM_A_FLL_LOCK_THRESH__POR (0xC2)
-#define TOMTOM_A_FLL_LOCK_DET_COUNT (0x124)
-#define TOMTOM_A_FLL_LOCK_DET_COUNT__POR (0x40)
-#define TOMTOM_A_FLL_DAC_THRESHOLD (0x125)
-#define TOMTOM_A_FLL_DAC_THRESHOLD__POR (0xC8)
-#define TOMTOM_A_FLL_TEST_DCO_FREERUN (0x126)
-#define TOMTOM_A_FLL_TEST_DCO_FREERUN__POR (0x00)
-#define TOMTOM_A_FLL_TEST_ENABLE (0x127)
-#define TOMTOM_A_FLL_TEST_ENABLE__POR (0x00)
-#define TOMTOM_A_MICB_CFILT_1_CTL (0x128)
-#define TOMTOM_A_MICB_CFILT_1_CTL__POR (0x40)
-#define TOMTOM_A_MICB_CFILT_1_VAL (0x129)
-#define TOMTOM_A_MICB_CFILT_1_VAL__POR (0x80)
-#define TOMTOM_A_MICB_CFILT_1_PRECHRG (0x12A)
-#define TOMTOM_A_MICB_CFILT_1_PRECHRG__POR (0x38)
-#define TOMTOM_A_MICB_1_CTL (0x12B)
-#define TOMTOM_A_MICB_1_CTL__POR (0x16)
-#define TOMTOM_A_MICB_1_INT_RBIAS (0x12C)
-#define TOMTOM_A_MICB_1_INT_RBIAS__POR (0x24)
-#define TOMTOM_A_MICB_1_MBHC (0x12D)
-#define TOMTOM_A_MICB_1_MBHC__POR (0x01)
-#define TOMTOM_A_MICB_CFILT_2_CTL (0x12E)
-#define TOMTOM_A_MICB_CFILT_2_CTL__POR (0x41)
-#define TOMTOM_A_MICB_CFILT_2_VAL (0x12F)
-#define TOMTOM_A_MICB_CFILT_2_VAL__POR (0x80)
-#define TOMTOM_A_MICB_CFILT_2_PRECHRG (0x130)
-#define TOMTOM_A_MICB_CFILT_2_PRECHRG__POR (0x38)
-#define TOMTOM_A_MICB_2_CTL (0x131)
-#define TOMTOM_A_MICB_2_CTL__POR (0x16)
-#define TOMTOM_A_MICB_2_INT_RBIAS (0x132)
-#define TOMTOM_A_MICB_2_INT_RBIAS__POR (0x24)
-#define TOMTOM_A_MICB_2_MBHC (0x133)
-#define TOMTOM_A_MICB_2_MBHC__POR (0x02)
-#define TOMTOM_A_MICB_CFILT_3_CTL (0x134)
-#define TOMTOM_A_MICB_CFILT_3_CTL__POR (0x40)
-#define TOMTOM_A_MICB_CFILT_3_VAL (0x135)
-#define TOMTOM_A_MICB_CFILT_3_VAL__POR (0x80)
-#define TOMTOM_A_MICB_CFILT_3_PRECHRG (0x136)
-#define TOMTOM_A_MICB_CFILT_3_PRECHRG__POR (0x38)
-#define TOMTOM_A_MICB_3_CTL (0x137)
-#define TOMTOM_A_MICB_3_CTL__POR (0x16)
-#define TOMTOM_A_MICB_3_INT_RBIAS (0x138)
-#define TOMTOM_A_MICB_3_INT_RBIAS__POR (0x24)
-#define TOMTOM_A_MICB_3_MBHC (0x139)
-#define TOMTOM_A_MICB_3_MBHC__POR (0x00)
-#define TOMTOM_A_MICB_4_CTL (0x13A)
-#define TOMTOM_A_MICB_4_CTL__POR (0x16)
-#define TOMTOM_A_MICB_4_INT_RBIAS (0x13B)
-#define TOMTOM_A_MICB_4_INT_RBIAS__POR (0x24)
-#define TOMTOM_A_MICB_4_MBHC (0x13C)
-#define TOMTOM_A_MICB_4_MBHC__POR (0x01)
-#define TOMTOM_A_SPKR_DRV2_EN (0x13D)
-#define TOMTOM_A_SPKR_DRV2_EN__POR (0x6F)
-#define TOMTOM_A_SPKR_DRV2_GAIN (0x13E)
-#define TOMTOM_A_SPKR_DRV2_GAIN__POR (0x00)
-#define TOMTOM_A_SPKR_DRV2_DAC_CTL (0x13F)
-#define TOMTOM_A_SPKR_DRV2_DAC_CTL__POR (0x04)
-#define TOMTOM_A_SPKR_DRV2_OCP_CTL (0x140)
-#define TOMTOM_A_SPKR_DRV2_OCP_CTL__POR (0x97)
-#define TOMTOM_A_SPKR_DRV2_CLIP_DET (0x141)
-#define TOMTOM_A_SPKR_DRV2_CLIP_DET__POR (0x01)
-#define TOMTOM_A_SPKR_DRV2_DBG_DAC (0x142)
-#define TOMTOM_A_SPKR_DRV2_DBG_DAC__POR (0x05)
-#define TOMTOM_A_SPKR_DRV2_DBG_PA (0x143)
-#define TOMTOM_A_SPKR_DRV2_DBG_PA__POR (0x18)
-#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG (0x144)
-#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR (0x00)
-#define TOMTOM_A_SPKR_DRV2_BIAS_LDO (0x145)
-#define TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR (0x45)
-#define TOMTOM_A_SPKR_DRV2_BIAS_INT (0x146)
-#define TOMTOM_A_SPKR_DRV2_BIAS_INT__POR (0xA5)
-#define TOMTOM_A_SPKR_DRV2_BIAS_PA (0x147)
-#define TOMTOM_A_SPKR_DRV2_BIAS_PA__POR (0x55)
-#define TOMTOM_A_SPKR_DRV2_STATUS_OCP (0x148)
-#define TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR (0x00)
-#define TOMTOM_A_SPKR_DRV2_STATUS_PA (0x149)
-#define TOMTOM_A_SPKR_DRV2_STATUS_PA__POR (0x00)
-#define TOMTOM_A_MBHC_INSERT_DETECT (0x14A)
-#define TOMTOM_A_MBHC_INSERT_DETECT__POR (0x00)
-#define TOMTOM_A_MBHC_INSERT_DET_STATUS (0x14B)
-#define TOMTOM_A_MBHC_INSERT_DET_STATUS__POR (0x00)
-#define TOMTOM_A_TX_COM_BIAS (0x14C)
-#define TOMTOM_A_TX_COM_BIAS__POR (0xF0)
-#define TOMTOM_A_MBHC_INSERT_DETECT2 (0x14D)
-#define TOMTOM_A_MBHC_INSERT_DETECT2__POR (0xD0)
-#define TOMTOM_A_MBHC_SCALING_MUX_1 (0x14E)
-#define TOMTOM_A_MBHC_SCALING_MUX_1__POR (0x00)
-#define TOMTOM_A_MBHC_SCALING_MUX_2 (0x14F)
-#define TOMTOM_A_MBHC_SCALING_MUX_2__POR (0x80)
-#define TOMTOM_A_MAD_ANA_CTRL (0x150)
-#define TOMTOM_A_MAD_ANA_CTRL__POR (0xF1)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1 (0x151)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR (0x00)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2 (0x152)
-#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR (0x80)
-#define TOMTOM_A_TX_1_GAIN (0x153)
-#define TOMTOM_A_TX_1_GAIN__POR (0x02)
-#define TOMTOM_A_TX_1_2_TEST_EN (0x154)
-#define TOMTOM_A_TX_1_2_TEST_EN__POR (0xCC)
-#define TOMTOM_A_TX_2_GAIN (0x155)
-#define TOMTOM_A_TX_2_GAIN__POR (0x02)
-#define TOMTOM_A_TX_1_2_ADC_IB (0x156)
-#define TOMTOM_A_TX_1_2_ADC_IB__POR (0x44)
-#define TOMTOM_A_TX_1_2_ATEST_REFCTRL (0x157)
-#define TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR (0x00)
-#define TOMTOM_A_TX_1_2_TEST_CTL (0x158)
-#define TOMTOM_A_TX_1_2_TEST_CTL__POR (0x38)
-#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN (0x159)
-#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR (0xFC)
-#define TOMTOM_A_TX_1_2_TXFE_CLKDIV (0x15A)
-#define TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR (0x55)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH1 (0x15B)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR (0x00)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH2 (0x15C)
-#define TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR (0x00)
-#define TOMTOM_A_TX_3_GAIN (0x15D)
-#define TOMTOM_A_TX_3_GAIN__POR (0x02)
-#define TOMTOM_A_TX_3_4_TEST_EN (0x15E)
-#define TOMTOM_A_TX_3_4_TEST_EN__POR (0xCC)
-#define TOMTOM_A_TX_4_GAIN (0x15F)
-#define TOMTOM_A_TX_4_GAIN__POR (0x02)
-#define TOMTOM_A_TX_3_4_ADC_IB (0x160)
-#define TOMTOM_A_TX_3_4_ADC_IB__POR (0x44)
-#define TOMTOM_A_TX_3_4_ATEST_REFCTRL (0x161)
-#define TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR (0x00)
-#define TOMTOM_A_TX_3_4_TEST_CTL (0x162)
-#define TOMTOM_A_TX_3_4_TEST_CTL__POR (0x38)
-#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN (0x163)
-#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR (0xFC)
-#define TOMTOM_A_TX_3_4_TXFE_CKDIV (0x164)
-#define TOMTOM_A_TX_3_4_TXFE_CKDIV__POR (0x55)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH3 (0x165)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR (0x00)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH4 (0x166)
-#define TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR (0x00)
-#define TOMTOM_A_TX_5_GAIN (0x167)
-#define TOMTOM_A_TX_5_GAIN__POR (0x02)
-#define TOMTOM_A_TX_5_6_TEST_EN (0x168)
-#define TOMTOM_A_TX_5_6_TEST_EN__POR (0xCC)
-#define TOMTOM_A_TX_6_GAIN (0x169)
-#define TOMTOM_A_TX_6_GAIN__POR (0x02)
-#define TOMTOM_A_TX_5_6_ADC_IB (0x16A)
-#define TOMTOM_A_TX_5_6_ADC_IB__POR (0x44)
-#define TOMTOM_A_TX_5_6_ATEST_REFCTRL (0x16B)
-#define TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR (0x00)
-#define TOMTOM_A_TX_5_6_TEST_CTL (0x16C)
-#define TOMTOM_A_TX_5_6_TEST_CTL__POR (0x38)
-#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN (0x16D)
-#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR (0xFC)
-#define TOMTOM_A_TX_5_6_TXFE_CKDIV (0x16E)
-#define TOMTOM_A_TX_5_6_TXFE_CKDIV__POR (0x55)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH5 (0x16F)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR (0x00)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH6 (0x170)
-#define TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR (0x00)
-#define TOMTOM_A_TX_7_MBHC_EN (0x171)
-#define TOMTOM_A_TX_7_MBHC_EN__POR (0x0C)
-#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL (0x172)
-#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR (0x00)
-#define TOMTOM_A_TX_7_MBHC_ADC (0x173)
-#define TOMTOM_A_TX_7_MBHC_ADC__POR (0x44)
-#define TOMTOM_A_TX_7_MBHC_TEST_CTL (0x174)
-#define TOMTOM_A_TX_7_MBHC_TEST_CTL__POR (0x38)
-#define TOMTOM_A_TX_7_MBHC_SAR_ERR (0x175)
-#define TOMTOM_A_TX_7_MBHC_SAR_ERR__POR (0x00)
-#define TOMTOM_A_TX_7_TXFE_CLKDIV (0x176)
-#define TOMTOM_A_TX_7_TXFE_CLKDIV__POR (0x8B)
-#define TOMTOM_A_RCO_CTRL (0x177)
-#define TOMTOM_A_RCO_CTRL__POR (0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL1 (0x178)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL1__POR (0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL2 (0x179)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL2__POR (0x00)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL3 (0x17A)
-#define TOMTOM_A_RCO_CALIBRATION_CTRL3__POR (0x00)
-#define TOMTOM_A_RCO_TEST_CTRL (0x17B)
-#define TOMTOM_A_RCO_TEST_CTRL__POR (0x00)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT1 (0x17C)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT1__POR (0x00)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT2 (0x17D)
-#define TOMTOM_A_RCO_CALIBRATION_RESULT2__POR (0x00)
-#define TOMTOM_A_BUCK_MODE_1 (0x181)
-#define TOMTOM_A_BUCK_MODE_1__POR (0x21)
-#define TOMTOM_A_BUCK_MODE_2 (0x182)
-#define TOMTOM_A_BUCK_MODE_2__POR (0xFF)
-#define TOMTOM_A_BUCK_MODE_3 (0x183)
-#define TOMTOM_A_BUCK_MODE_3__POR (0xCE)
-#define TOMTOM_A_BUCK_MODE_4 (0x184)
-#define TOMTOM_A_BUCK_MODE_4__POR (0x3A)
-#define TOMTOM_A_BUCK_MODE_5 (0x185)
-#define TOMTOM_A_BUCK_MODE_5__POR (0x00)
-#define TOMTOM_A_BUCK_CTRL_VCL_1 (0x186)
-#define TOMTOM_A_BUCK_CTRL_VCL_1__POR (0x08)
-#define TOMTOM_A_BUCK_CTRL_VCL_2 (0x187)
-#define TOMTOM_A_BUCK_CTRL_VCL_2__POR (0xA3)
-#define TOMTOM_A_BUCK_CTRL_VCL_3 (0x188)
-#define TOMTOM_A_BUCK_CTRL_VCL_3__POR (0x82)
-#define TOMTOM_A_BUCK_CTRL_CCL_1 (0x189)
-#define TOMTOM_A_BUCK_CTRL_CCL_1__POR (0x5B)
-#define TOMTOM_A_BUCK_CTRL_CCL_2 (0x18A)
-#define TOMTOM_A_BUCK_CTRL_CCL_2__POR (0xDC)
-#define TOMTOM_A_BUCK_CTRL_CCL_3 (0x18B)
-#define TOMTOM_A_BUCK_CTRL_CCL_3__POR (0x6A)
-#define TOMTOM_A_BUCK_CTRL_CCL_4 (0x18C)
-#define TOMTOM_A_BUCK_CTRL_CCL_4__POR (0x51)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1 (0x18D)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR (0x50)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2 (0x18E)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR (0x64)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3 (0x18F)
-#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR (0x77)
-#define TOMTOM_A_BUCK_TMUX_A_D (0x190)
-#define TOMTOM_A_BUCK_TMUX_A_D__POR (0x00)
-#define TOMTOM_A_NCP_BUCKREF (0x191)
-#define TOMTOM_A_NCP_BUCKREF__POR (0x00)
-#define TOMTOM_A_NCP_EN (0x192)
-#define TOMTOM_A_NCP_EN__POR (0xFE)
-#define TOMTOM_A_NCP_CLK (0x193)
-#define TOMTOM_A_NCP_CLK__POR (0x94)
-#define TOMTOM_A_NCP_STATIC (0x194)
-#define TOMTOM_A_NCP_STATIC__POR (0x28)
-#define TOMTOM_A_NCP_VTH_LOW (0x195)
-#define TOMTOM_A_NCP_VTH_LOW__POR (0x88)
-#define TOMTOM_A_NCP_VTH_HIGH (0x196)
-#define TOMTOM_A_NCP_VTH_HIGH__POR (0xA0)
-#define TOMTOM_A_NCP_ATEST (0x197)
-#define TOMTOM_A_NCP_ATEST__POR (0x00)
-#define TOMTOM_A_NCP_DTEST (0x198)
-#define TOMTOM_A_NCP_DTEST__POR (0x10)
-#define TOMTOM_A_NCP_DLY1 (0x199)
-#define TOMTOM_A_NCP_DLY1__POR (0x06)
-#define TOMTOM_A_NCP_DLY2 (0x19A)
-#define TOMTOM_A_NCP_DLY2__POR (0x06)
-#define TOMTOM_A_RX_AUX_SW_CTL (0x19B)
-#define TOMTOM_A_RX_AUX_SW_CTL__POR (0x00)
-#define TOMTOM_A_RX_PA_AUX_IN_CONN (0x19C)
-#define TOMTOM_A_RX_PA_AUX_IN_CONN__POR (0x00)
-#define TOMTOM_A_RX_COM_TIMER_DIV (0x19E)
-#define TOMTOM_A_RX_COM_TIMER_DIV__POR (0xE8)
-#define TOMTOM_A_RX_COM_OCP_CTL (0x19F)
-#define TOMTOM_A_RX_COM_OCP_CTL__POR (0x1F)
-#define TOMTOM_A_RX_COM_OCP_COUNT (0x1A0)
-#define TOMTOM_A_RX_COM_OCP_COUNT__POR (0x77)
-#define TOMTOM_A_RX_COM_DAC_CTL (0x1A1)
-#define TOMTOM_A_RX_COM_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_COM_BIAS (0x1A2)
-#define TOMTOM_A_RX_COM_BIAS__POR (0x20)
-#define TOMTOM_A_RX_HPH_AUTO_CHOP (0x1A4)
-#define TOMTOM_A_RX_HPH_AUTO_CHOP__POR (0x38)
-#define TOMTOM_A_RX_HPH_CHOP_CTL (0x1A5)
-#define TOMTOM_A_RX_HPH_CHOP_CTL__POR (0xA4)
-#define TOMTOM_A_RX_HPH_BIAS_PA (0x1A6)
-#define TOMTOM_A_RX_HPH_BIAS_PA__POR (0x7A)
-#define TOMTOM_A_RX_HPH_BIAS_LDO (0x1A7)
-#define TOMTOM_A_RX_HPH_BIAS_LDO__POR (0x87)
-#define TOMTOM_A_RX_HPH_BIAS_CNP (0x1A8)
-#define TOMTOM_A_RX_HPH_BIAS_CNP__POR (0x8A)
-#define TOMTOM_A_RX_HPH_BIAS_WG_OCP (0x1A9)
-#define TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR (0x2A)
-#define TOMTOM_A_RX_HPH_OCP_CTL (0x1AA)
-#define TOMTOM_A_RX_HPH_OCP_CTL__POR (0x69)
-#define TOMTOM_A_RX_HPH_CNP_EN (0x1AB)
-#define TOMTOM_A_RX_HPH_CNP_EN__POR (0x80)
-#define TOMTOM_A_RX_HPH_CNP_WG_CTL (0x1AC)
-#define TOMTOM_A_RX_HPH_CNP_WG_CTL__POR (0xDA)
-#define TOMTOM_A_RX_HPH_CNP_WG_TIME (0x1AD)
-#define TOMTOM_A_RX_HPH_CNP_WG_TIME__POR (0x15)
-#define TOMTOM_A_RX_HPH_L_GAIN (0x1AE)
-#define TOMTOM_A_RX_HPH_L_GAIN__POR (0xC0)
-#define TOMTOM_A_RX_HPH_L_TEST (0x1AF)
-#define TOMTOM_A_RX_HPH_L_TEST__POR (0x02)
-#define TOMTOM_A_RX_HPH_L_PA_CTL (0x1B0)
-#define TOMTOM_A_RX_HPH_L_PA_CTL__POR (0x42)
-#define TOMTOM_A_RX_HPH_L_DAC_CTL (0x1B1)
-#define TOMTOM_A_RX_HPH_L_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_HPH_L_ATEST (0x1B2)
-#define TOMTOM_A_RX_HPH_L_ATEST__POR (0x00)
-#define TOMTOM_A_RX_HPH_L_STATUS (0x1B3)
-#define TOMTOM_A_RX_HPH_L_STATUS__POR (0x00)
-#define TOMTOM_A_RX_HPH_R_GAIN (0x1B4)
-#define TOMTOM_A_RX_HPH_R_GAIN__POR (0x00)
-#define TOMTOM_A_RX_HPH_R_TEST (0x1B5)
-#define TOMTOM_A_RX_HPH_R_TEST__POR (0x02)
-#define TOMTOM_A_RX_HPH_R_PA_CTL (0x1B6)
-#define TOMTOM_A_RX_HPH_R_PA_CTL__POR (0x42)
-#define TOMTOM_A_RX_HPH_R_DAC_CTL (0x1B7)
-#define TOMTOM_A_RX_HPH_R_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_HPH_R_ATEST (0x1B8)
-#define TOMTOM_A_RX_HPH_R_ATEST__POR (0x00)
-#define TOMTOM_A_RX_HPH_R_STATUS (0x1B9)
-#define TOMTOM_A_RX_HPH_R_STATUS__POR (0x00)
-#define TOMTOM_A_RX_EAR_BIAS_PA (0x1BA)
-#define TOMTOM_A_RX_EAR_BIAS_PA__POR (0x76)
-#define TOMTOM_A_RX_EAR_BIAS_CMBUFF (0x1BB)
-#define TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR (0xA0)
-#define TOMTOM_A_RX_EAR_EN (0x1BC)
-#define TOMTOM_A_RX_EAR_EN__POR (0x00)
-#define TOMTOM_A_RX_EAR_GAIN (0x1BD)
-#define TOMTOM_A_RX_EAR_GAIN__POR (0x02)
-#define TOMTOM_A_RX_EAR_CMBUFF (0x1BE)
-#define TOMTOM_A_RX_EAR_CMBUFF__POR (0x05)
-#define TOMTOM_A_RX_EAR_ICTL (0x1BF)
-#define TOMTOM_A_RX_EAR_ICTL__POR (0x40)
-#define TOMTOM_A_RX_EAR_CCOMP (0x1C0)
-#define TOMTOM_A_RX_EAR_CCOMP__POR (0x08)
-#define TOMTOM_A_RX_EAR_VCM (0x1C1)
-#define TOMTOM_A_RX_EAR_VCM__POR (0x03)
-#define TOMTOM_A_RX_EAR_CNP (0x1C2)
-#define TOMTOM_A_RX_EAR_CNP__POR (0xC0)
-#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST (0x1C3)
-#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR (0x00)
-#define TOMTOM_A_RX_EAR_STATUS (0x1C5)
-#define TOMTOM_A_RX_EAR_STATUS__POR (0x04)
-#define TOMTOM_A_RX_LINE_BIAS_PA (0x1C6)
-#define TOMTOM_A_RX_LINE_BIAS_PA__POR (0x78)
-#define TOMTOM_A_RX_BUCK_BIAS1 (0x1C7)
-#define TOMTOM_A_RX_BUCK_BIAS1__POR (0x42)
-#define TOMTOM_A_RX_BUCK_BIAS2 (0x1C8)
-#define TOMTOM_A_RX_BUCK_BIAS2__POR (0x84)
-#define TOMTOM_A_RX_LINE_COM (0x1C9)
-#define TOMTOM_A_RX_LINE_COM__POR (0x80)
-#define TOMTOM_A_RX_LINE_CNP_EN (0x1CA)
-#define TOMTOM_A_RX_LINE_CNP_EN__POR (0x00)
-#define TOMTOM_A_RX_LINE_CNP_WG_CTL (0x1CB)
-#define TOMTOM_A_RX_LINE_CNP_WG_CTL__POR (0x00)
-#define TOMTOM_A_RX_LINE_CNP_WG_TIME (0x1CC)
-#define TOMTOM_A_RX_LINE_CNP_WG_TIME__POR (0x04)
-#define TOMTOM_A_RX_LINE_1_GAIN (0x1CD)
-#define TOMTOM_A_RX_LINE_1_GAIN__POR (0x00)
-#define TOMTOM_A_RX_LINE_1_TEST (0x1CE)
-#define TOMTOM_A_RX_LINE_1_TEST__POR (0x02)
-#define TOMTOM_A_RX_LINE_1_DAC_CTL (0x1CF)
-#define TOMTOM_A_RX_LINE_1_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_LINE_1_STATUS (0x1D0)
-#define TOMTOM_A_RX_LINE_1_STATUS__POR (0x00)
-#define TOMTOM_A_RX_LINE_2_GAIN (0x1D1)
-#define TOMTOM_A_RX_LINE_2_GAIN__POR (0x00)
-#define TOMTOM_A_RX_LINE_2_TEST (0x1D2)
-#define TOMTOM_A_RX_LINE_2_TEST__POR (0x02)
-#define TOMTOM_A_RX_LINE_2_DAC_CTL (0x1D3)
-#define TOMTOM_A_RX_LINE_2_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_LINE_2_STATUS (0x1D4)
-#define TOMTOM_A_RX_LINE_2_STATUS__POR (0x00)
-#define TOMTOM_A_RX_LINE_3_GAIN (0x1D5)
-#define TOMTOM_A_RX_LINE_3_GAIN__POR (0x00)
-#define TOMTOM_A_RX_LINE_3_TEST (0x1D6)
-#define TOMTOM_A_RX_LINE_3_TEST__POR (0x02)
-#define TOMTOM_A_RX_LINE_3_DAC_CTL (0x1D7)
-#define TOMTOM_A_RX_LINE_3_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_LINE_3_STATUS (0x1D8)
-#define TOMTOM_A_RX_LINE_3_STATUS__POR (0x00)
-#define TOMTOM_A_RX_LINE_4_GAIN (0x1D9)
-#define TOMTOM_A_RX_LINE_4_GAIN__POR (0x00)
-#define TOMTOM_A_RX_LINE_4_TEST (0x1DA)
-#define TOMTOM_A_RX_LINE_4_TEST__POR (0x02)
-#define TOMTOM_A_RX_LINE_4_DAC_CTL (0x1DB)
-#define TOMTOM_A_RX_LINE_4_DAC_CTL__POR (0x00)
-#define TOMTOM_A_RX_LINE_4_STATUS (0x1DC)
-#define TOMTOM_A_RX_LINE_4_STATUS__POR (0x00)
-#define TOMTOM_A_RX_LINE_CNP_DBG (0x1DD)
-#define TOMTOM_A_RX_LINE_CNP_DBG__POR (0x00)
-#define TOMTOM_A_SPKR_DRV1_EN (0x1DF)
-#define TOMTOM_A_SPKR_DRV1_EN__POR (0x6F)
-#define TOMTOM_A_SPKR_DRV1_GAIN (0x1E0)
-#define TOMTOM_A_SPKR_DRV1_GAIN__POR (0x00)
-#define TOMTOM_A_SPKR_DRV1_DAC_CTL (0x1E1)
-#define TOMTOM_A_SPKR_DRV1_DAC_CTL__POR (0x04)
-#define TOMTOM_A_SPKR_DRV1_OCP_CTL (0x1E2)
-#define TOMTOM_A_SPKR_DRV1_OCP_CTL__POR (0x97)
-#define TOMTOM_A_SPKR_DRV1_CLIP_DET (0x1E3)
-#define TOMTOM_A_SPKR_DRV1_CLIP_DET__POR (0x01)
-#define TOMTOM_A_SPKR_DRV1_IEC (0x1E4)
-#define TOMTOM_A_SPKR_DRV1_IEC__POR (0x00)
-#define TOMTOM_A_SPKR_DRV1_DBG_DAC (0x1E5)
-#define TOMTOM_A_SPKR_DRV1_DBG_DAC__POR (0x05)
-#define TOMTOM_A_SPKR_DRV1_DBG_PA (0x1E6)
-#define TOMTOM_A_SPKR_DRV1_DBG_PA__POR (0x18)
-#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG (0x1E7)
-#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR (0x00)
-#define TOMTOM_A_SPKR_DRV1_BIAS_LDO (0x1E8)
-#define TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR (0x45)
-#define TOMTOM_A_SPKR_DRV1_BIAS_INT (0x1E9)
-#define TOMTOM_A_SPKR_DRV1_BIAS_INT__POR (0xA5)
-#define TOMTOM_A_SPKR_DRV1_BIAS_PA (0x1EA)
-#define TOMTOM_A_SPKR_DRV1_BIAS_PA__POR (0x55)
-#define TOMTOM_A_SPKR_DRV1_STATUS_OCP (0x1EB)
-#define TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR (0x00)
-#define TOMTOM_A_SPKR_DRV1_STATUS_PA (0x1EC)
-#define TOMTOM_A_SPKR_DRV1_STATUS_PA__POR (0x00)
-#define TOMTOM_A_SPKR1_PROT_EN (0x1ED)
-#define TOMTOM_A_SPKR1_PROT_EN__POR (0x00)
-#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN (0x1EE)
-#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR (0x44)
-#define TOMTOM_A_SPKR1_PROT_ATEST (0x1EF)
-#define TOMTOM_A_SPKR1_PROT_ATEST__POR (0x00)
-#define TOMTOM_A_SPKR1_PROT_LDO_CTRL (0x1F0)
-#define TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR (0x00)
-#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL (0x1F1)
-#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR (0x00)
-#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL (0x1F2)
-#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR (0x00)
-#define TOMTOM_A_SPKR2_PROT_EN (0x1F3)
-#define TOMTOM_A_SPKR2_PROT_EN__POR (0x00)
-#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN (0x1F4)
-#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR (0x44)
-#define TOMTOM_A_SPKR2_PROT_ATEST (0x1F5)
-#define TOMTOM_A_SPKR2_PROT_ATEST__POR (0x00)
-#define TOMTOM_A_SPKR2_PROT_LDO_CTRL (0x1F6)
-#define TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR (0x00)
-#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL (0x1F7)
-#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR (0x00)
-#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL (0x1F8)
-#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR (0x00)
-#define TOMTOM_A_MBHC_HPH (0x1FE)
-#define TOMTOM_A_MBHC_HPH__POR (0x44)
-#define TOMTOM_A_CDC_ANC1_B1_CTL (0x200)
-#define TOMTOM_A_CDC_ANC1_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_B1_CTL (0x280)
-#define TOMTOM_A_CDC_ANC2_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_SHIFT (0x201)
-#define TOMTOM_A_CDC_ANC1_SHIFT__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_SHIFT (0x281)
-#define TOMTOM_A_CDC_ANC2_SHIFT__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL (0x202)
-#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL (0x282)
-#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL (0x203)
-#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL (0x283)
-#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL (0x204)
-#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL (0x284)
-#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL (0x206)
-#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL (0x286)
-#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL (0x207)
-#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL (0x287)
-#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_SPARE (0x209)
-#define TOMTOM_A_CDC_ANC1_SPARE__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_SPARE (0x289)
-#define TOMTOM_A_CDC_ANC2_SPARE__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_SMLPF_CTL (0x20A)
-#define TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_SMLPF_CTL (0x28A)
-#define TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_DCFLT_CTL (0x20B)
-#define TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_DCFLT_CTL (0x28B)
-#define TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_GAIN_CTL (0x20C)
-#define TOMTOM_A_CDC_ANC1_GAIN_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_GAIN_CTL (0x28C)
-#define TOMTOM_A_CDC_ANC2_GAIN_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC1_B2_CTL (0x20D)
-#define TOMTOM_A_CDC_ANC1_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_ANC2_B2_CTL (0x28D)
-#define TOMTOM_A_CDC_ANC2_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER (0x220)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER (0x228)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER (0x230)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER (0x238)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER (0x240)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER (0x248)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER (0x250)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER (0x258)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER (0x260)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER (0x268)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR (0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN (0x221)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN (0x229)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN (0x231)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN (0x239)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN (0x241)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN (0x249)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN (0x251)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN (0x259)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN (0x261)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN (0x269)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR (0x00)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG (0x222)
-#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG (0x22A)
-#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG (0x232)
-#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG (0x23A)
-#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG (0x242)
-#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG (0x24A)
-#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG (0x252)
-#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG (0x25A)
-#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG (0x262)
-#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG (0x26A)
-#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR (0x00)
-#define TOMTOM_A_CDC_TX1_MUX_CTL (0x223)
-#define TOMTOM_A_CDC_TX1_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX2_MUX_CTL (0x22B)
-#define TOMTOM_A_CDC_TX2_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX3_MUX_CTL (0x233)
-#define TOMTOM_A_CDC_TX3_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX4_MUX_CTL (0x23B)
-#define TOMTOM_A_CDC_TX4_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX5_MUX_CTL (0x243)
-#define TOMTOM_A_CDC_TX5_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX6_MUX_CTL (0x24B)
-#define TOMTOM_A_CDC_TX6_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX7_MUX_CTL (0x253)
-#define TOMTOM_A_CDC_TX7_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX8_MUX_CTL (0x25B)
-#define TOMTOM_A_CDC_TX8_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX9_MUX_CTL (0x263)
-#define TOMTOM_A_CDC_TX9_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX10_MUX_CTL (0x26B)
-#define TOMTOM_A_CDC_TX10_MUX_CTL__POR (0x48)
-#define TOMTOM_A_CDC_TX1_CLK_FS_CTL (0x224)
-#define TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX2_CLK_FS_CTL (0x22C)
-#define TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX3_CLK_FS_CTL (0x234)
-#define TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX4_CLK_FS_CTL (0x23C)
-#define TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX5_CLK_FS_CTL (0x244)
-#define TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX6_CLK_FS_CTL (0x24C)
-#define TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX7_CLK_FS_CTL (0x254)
-#define TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX8_CLK_FS_CTL (0x25C)
-#define TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX9_CLK_FS_CTL (0x264)
-#define TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX10_CLK_FS_CTL (0x26C)
-#define TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR (0x03)
-#define TOMTOM_A_CDC_TX1_DMIC_CTL (0x225)
-#define TOMTOM_A_CDC_TX1_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX2_DMIC_CTL (0x22D)
-#define TOMTOM_A_CDC_TX2_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX3_DMIC_CTL (0x235)
-#define TOMTOM_A_CDC_TX3_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX4_DMIC_CTL (0x23D)
-#define TOMTOM_A_CDC_TX4_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX5_DMIC_CTL (0x245)
-#define TOMTOM_A_CDC_TX5_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX6_DMIC_CTL (0x24D)
-#define TOMTOM_A_CDC_TX6_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX7_DMIC_CTL (0x255)
-#define TOMTOM_A_CDC_TX7_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX8_DMIC_CTL (0x25D)
-#define TOMTOM_A_CDC_TX8_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX9_DMIC_CTL (0x265)
-#define TOMTOM_A_CDC_TX9_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TX10_DMIC_CTL (0x26D)
-#define TOMTOM_A_CDC_TX10_DMIC_CTL__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0 (0x270)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1 (0x271)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2 (0x272)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3 (0x273)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4 (0x274)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5 (0x275)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6 (0x276)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7 (0x277)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B1_CTL (0x278)
-#define TOMTOM_A_CDC_DEBUG_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B2_CTL (0x279)
-#define TOMTOM_A_CDC_DEBUG_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B3_CTL (0x27A)
-#define TOMTOM_A_CDC_DEBUG_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B4_CTL (0x27B)
-#define TOMTOM_A_CDC_DEBUG_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B5_CTL (0x27C)
-#define TOMTOM_A_CDC_DEBUG_B5_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B6_CTL (0x27D)
-#define TOMTOM_A_CDC_DEBUG_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_DEBUG_B7_CTL (0x27E)
-#define TOMTOM_A_CDC_DEBUG_B7_CTL__POR (0x00)
-#define TOMTOM_A_CDC_SRC1_PDA_CFG (0x2A0)
-#define TOMTOM_A_CDC_SRC1_PDA_CFG__POR (0x00)
-#define TOMTOM_A_CDC_SRC2_PDA_CFG (0x2A8)
-#define TOMTOM_A_CDC_SRC2_PDA_CFG__POR (0x00)
-#define TOMTOM_A_CDC_SRC1_FS_CTL (0x2A1)
-#define TOMTOM_A_CDC_SRC1_FS_CTL__POR (0x1B)
-#define TOMTOM_A_CDC_SRC2_FS_CTL (0x2A9)
-#define TOMTOM_A_CDC_SRC2_FS_CTL__POR (0x1B)
-#define TOMTOM_A_CDC_RX1_B1_CTL (0x2B0)
-#define TOMTOM_A_CDC_RX1_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX2_B1_CTL (0x2B8)
-#define TOMTOM_A_CDC_RX2_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX3_B1_CTL (0x2C0)
-#define TOMTOM_A_CDC_RX3_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX4_B1_CTL (0x2C8)
-#define TOMTOM_A_CDC_RX4_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX5_B1_CTL (0x2D0)
-#define TOMTOM_A_CDC_RX5_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX6_B1_CTL (0x2D8)
-#define TOMTOM_A_CDC_RX6_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX7_B1_CTL (0x2E0)
-#define TOMTOM_A_CDC_RX7_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX1_B2_CTL (0x2B1)
-#define TOMTOM_A_CDC_RX1_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX2_B2_CTL (0x2B9)
-#define TOMTOM_A_CDC_RX2_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX3_B2_CTL (0x2C1)
-#define TOMTOM_A_CDC_RX3_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX4_B2_CTL (0x2C9)
-#define TOMTOM_A_CDC_RX4_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX5_B2_CTL (0x2D1)
-#define TOMTOM_A_CDC_RX5_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX6_B2_CTL (0x2D9)
-#define TOMTOM_A_CDC_RX6_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX7_B2_CTL (0x2E1)
-#define TOMTOM_A_CDC_RX7_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX1_B3_CTL (0x2B2)
-#define TOMTOM_A_CDC_RX1_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX2_B3_CTL (0x2BA)
-#define TOMTOM_A_CDC_RX2_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX3_B3_CTL (0x2C2)
-#define TOMTOM_A_CDC_RX3_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX4_B3_CTL (0x2CA)
-#define TOMTOM_A_CDC_RX4_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX5_B3_CTL (0x2D2)
-#define TOMTOM_A_CDC_RX5_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX6_B3_CTL (0x2DA)
-#define TOMTOM_A_CDC_RX6_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX7_B3_CTL (0x2E2)
-#define TOMTOM_A_CDC_RX7_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX1_B4_CTL (0x2B3)
-#define TOMTOM_A_CDC_RX1_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX2_B4_CTL (0x2BB)
-#define TOMTOM_A_CDC_RX2_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX3_B4_CTL (0x2C3)
-#define TOMTOM_A_CDC_RX3_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX4_B4_CTL (0x2CB)
-#define TOMTOM_A_CDC_RX4_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX5_B4_CTL (0x2D3)
-#define TOMTOM_A_CDC_RX5_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX6_B4_CTL (0x2DB)
-#define TOMTOM_A_CDC_RX6_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX7_B4_CTL (0x2E3)
-#define TOMTOM_A_CDC_RX7_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX1_B5_CTL (0x2B4)
-#define TOMTOM_A_CDC_RX1_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX2_B5_CTL (0x2BC)
-#define TOMTOM_A_CDC_RX2_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX3_B5_CTL (0x2C4)
-#define TOMTOM_A_CDC_RX3_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX4_B5_CTL (0x2CC)
-#define TOMTOM_A_CDC_RX4_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX5_B5_CTL (0x2D4)
-#define TOMTOM_A_CDC_RX5_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX6_B5_CTL (0x2DC)
-#define TOMTOM_A_CDC_RX6_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX7_B5_CTL (0x2E4)
-#define TOMTOM_A_CDC_RX7_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX1_B6_CTL (0x2B5)
-#define TOMTOM_A_CDC_RX1_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX2_B6_CTL (0x2BD)
-#define TOMTOM_A_CDC_RX2_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX3_B6_CTL (0x2C5)
-#define TOMTOM_A_CDC_RX3_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX4_B6_CTL (0x2CD)
-#define TOMTOM_A_CDC_RX4_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX5_B6_CTL (0x2D5)
-#define TOMTOM_A_CDC_RX5_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX6_B6_CTL (0x2DD)
-#define TOMTOM_A_CDC_RX6_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX7_B6_CTL (0x2E5)
-#define TOMTOM_A_CDC_RX7_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL (0x2B6)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL (0x2BE)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL (0x2C6)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL (0x2CE)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL (0x2D6)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL (0x2DE)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL (0x2E6)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL (0x2B7)
-#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL (0x2BF)
-#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL (0x2C7)
-#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL (0x2CF)
-#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL (0x2D7)
-#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL (0x2DF)
-#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL (0x2E7)
-#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_CFG (0x2E8)
-#define TOMTOM_A_CDC_VBAT_CFG__POR (0x1A)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL1 (0x2E9)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL1__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL2 (0x2EA)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL2__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL3 (0x2EB)
-#define TOMTOM_A_CDC_VBAT_ADC_CAL3__POR (0x04)
-#define TOMTOM_A_CDC_VBAT_PK_EST1 (0x2EC)
-#define TOMTOM_A_CDC_VBAT_PK_EST1__POR (0xE0)
-#define TOMTOM_A_CDC_VBAT_PK_EST2 (0x2ED)
-#define TOMTOM_A_CDC_VBAT_PK_EST2__POR (0x01)
-#define TOMTOM_A_CDC_VBAT_PK_EST3 (0x2EE)
-#define TOMTOM_A_CDC_VBAT_PK_EST3__POR (0x40)
-#define TOMTOM_A_CDC_VBAT_RF_PROC1 (0x2EF)
-#define TOMTOM_A_CDC_VBAT_RF_PROC1__POR (0x2A)
-#define TOMTOM_A_CDC_VBAT_RF_PROC2 (0x2F0)
-#define TOMTOM_A_CDC_VBAT_RF_PROC2__POR (0x86)
-#define TOMTOM_A_CDC_VBAT_TAC1 (0x2F1)
-#define TOMTOM_A_CDC_VBAT_TAC1__POR (0x70)
-#define TOMTOM_A_CDC_VBAT_TAC2 (0x2F2)
-#define TOMTOM_A_CDC_VBAT_TAC2__POR (0x18)
-#define TOMTOM_A_CDC_VBAT_TAC3 (0x2F3)
-#define TOMTOM_A_CDC_VBAT_TAC3__POR (0x18)
-#define TOMTOM_A_CDC_VBAT_TAC4 (0x2F4)
-#define TOMTOM_A_CDC_VBAT_TAC4__POR (0x03)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD1 (0x2F5)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR (0x01)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD2 (0x2F6)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD3 (0x2F7)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR (0x64)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD4 (0x2F8)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR (0x01)
-#define TOMTOM_A_CDC_VBAT_DEBUG1 (0x2F9)
-#define TOMTOM_A_CDC_VBAT_DEBUG1__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON (0x2FA)
-#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR (0x00)
-#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL (0x2FB)
-#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL (0x300)
-#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_RX_RESET_CTL (0x301)
-#define TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL (0x302)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL (0x303)
-#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_RX_I2S_CTL (0x306)
-#define TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR (0x03)
-#define TOMTOM_A_CDC_CLK_TX_I2S_CTL (0x307)
-#define TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR (0x03)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL (0x308)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL (0x309)
-#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL (0x30A)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL (0x30B)
-#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_OTHR_CTL (0x30C)
-#define TOMTOM_A_CDC_CLK_OTHR_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL (0x30E)
-#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_RX_B1_CTL (0x30F)
-#define TOMTOM_A_CDC_CLK_RX_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_RX_B2_CTL (0x310)
-#define TOMTOM_A_CDC_CLK_RX_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_MCLK_CTL (0x311)
-#define TOMTOM_A_CDC_CLK_MCLK_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_PDM_CTL (0x312)
-#define TOMTOM_A_CDC_CLK_PDM_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLK_SD_CTL (0x313)
-#define TOMTOM_A_CDC_CLK_SD_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_B1_CTL (0x320)
-#define TOMTOM_A_CDC_CLSH_B1_CTL__POR (0xE4)
-#define TOMTOM_A_CDC_CLSH_B2_CTL (0x321)
-#define TOMTOM_A_CDC_CLSH_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_B3_CTL (0x322)
-#define TOMTOM_A_CDC_CLSH_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS (0x323)
-#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD (0x324)
-#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR (0x12)
-#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD (0x325)
-#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR (0x0C)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD (0x326)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR (0x18)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD (0x327)
-#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR (0x23)
-#define TOMTOM_A_CDC_CLSH_K_ADDR (0x328)
-#define TOMTOM_A_CDC_CLSH_K_ADDR__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_K_DATA (0x329)
-#define TOMTOM_A_CDC_CLSH_K_DATA__POR (0xA4)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L (0x32A)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR (0xD7)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U (0x32B)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR (0x05)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L (0x32C)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR (0x60)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U (0x32D)
-#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR (0x09)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR (0x32E)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH (0x32F)
-#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR (0x330)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR (0x00)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH (0x331)
-#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL (0x340)
-#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL (0x350)
-#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL (0x341)
-#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL (0x351)
-#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL (0x342)
-#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL (0x352)
-#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL (0x343)
-#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL (0x353)
-#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL (0x344)
-#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL (0x354)
-#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL (0x345)
-#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL (0x355)
-#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL (0x346)
-#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL (0x356)
-#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL (0x347)
-#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL (0x357)
-#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_CTL (0x348)
-#define TOMTOM_A_CDC_IIR1_CTL__POR (0x40)
-#define TOMTOM_A_CDC_IIR2_CTL (0x358)
-#define TOMTOM_A_CDC_IIR2_CTL__POR (0x40)
-#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL (0x349)
-#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL (0x359)
-#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL (0x34A)
-#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL (0x35A)
-#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL (0x34B)
-#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL (0x35B)
-#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_TOP_GAIN_UPDATE (0x360)
-#define TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR (0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B1_CTL (0x361)
-#define TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B2_CTL (0x362)
-#define TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B3_CTL (0x363)
-#define TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_PA_RAMP_B4_CTL (0x364)
-#define TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL (0x365)
-#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL (0x366)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_COMP0_B1_CTL (0x368)
-#define TOMTOM_A_CDC_COMP0_B1_CTL__POR (0x30)
-#define TOMTOM_A_CDC_COMP1_B1_CTL (0x370)
-#define TOMTOM_A_CDC_COMP1_B1_CTL__POR (0x30)
-#define TOMTOM_A_CDC_COMP2_B1_CTL (0x378)
-#define TOMTOM_A_CDC_COMP2_B1_CTL__POR (0x30)
-#define TOMTOM_A_CDC_COMP0_B2_CTL (0x369)
-#define TOMTOM_A_CDC_COMP0_B2_CTL__POR (0xB5)
-#define TOMTOM_A_CDC_COMP1_B2_CTL (0x371)
-#define TOMTOM_A_CDC_COMP1_B2_CTL__POR (0xB5)
-#define TOMTOM_A_CDC_COMP2_B2_CTL (0x379)
-#define TOMTOM_A_CDC_COMP2_B2_CTL__POR (0xB5)
-#define TOMTOM_A_CDC_COMP0_B3_CTL (0x36A)
-#define TOMTOM_A_CDC_COMP0_B3_CTL__POR (0x28)
-#define TOMTOM_A_CDC_COMP1_B3_CTL (0x372)
-#define TOMTOM_A_CDC_COMP1_B3_CTL__POR (0x28)
-#define TOMTOM_A_CDC_COMP2_B3_CTL (0x37A)
-#define TOMTOM_A_CDC_COMP2_B3_CTL__POR (0x28)
-#define TOMTOM_A_CDC_COMP0_B4_CTL (0x36B)
-#define TOMTOM_A_CDC_COMP0_B4_CTL__POR (0x37)
-#define TOMTOM_A_CDC_COMP1_B4_CTL (0x373)
-#define TOMTOM_A_CDC_COMP1_B4_CTL__POR (0x37)
-#define TOMTOM_A_CDC_COMP2_B4_CTL (0x37B)
-#define TOMTOM_A_CDC_COMP2_B4_CTL__POR (0x37)
-#define TOMTOM_A_CDC_COMP0_B5_CTL (0x36C)
-#define TOMTOM_A_CDC_COMP0_B5_CTL__POR (0x7F)
-#define TOMTOM_A_CDC_COMP1_B5_CTL (0x374)
-#define TOMTOM_A_CDC_COMP1_B5_CTL__POR (0x7F)
-#define TOMTOM_A_CDC_COMP2_B5_CTL (0x37C)
-#define TOMTOM_A_CDC_COMP2_B5_CTL__POR (0x7F)
-#define TOMTOM_A_CDC_COMP0_B6_CTL (0x36D)
-#define TOMTOM_A_CDC_COMP0_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_COMP1_B6_CTL (0x375)
-#define TOMTOM_A_CDC_COMP1_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_COMP2_B6_CTL (0x37D)
-#define TOMTOM_A_CDC_COMP2_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS (0x36E)
-#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR (0x03)
-#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS (0x376)
-#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR (0x03)
-#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS (0x37E)
-#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR (0x03)
-#define TOMTOM_A_CDC_COMP0_FS_CFG (0x36F)
-#define TOMTOM_A_CDC_COMP0_FS_CFG__POR (0x03)
-#define TOMTOM_A_CDC_COMP1_FS_CFG (0x377)
-#define TOMTOM_A_CDC_COMP1_FS_CFG__POR (0x03)
-#define TOMTOM_A_CDC_COMP2_FS_CFG (0x37F)
-#define TOMTOM_A_CDC_COMP2_FS_CFG__POR (0x03)
-#define TOMTOM_A_CDC_CONN_RX1_B1_CTL (0x380)
-#define TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX1_B2_CTL (0x381)
-#define TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX1_B3_CTL (0x382)
-#define TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B1_CTL (0x383)
-#define TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B2_CTL (0x384)
-#define TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX2_B3_CTL (0x385)
-#define TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX3_B1_CTL (0x386)
-#define TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX3_B2_CTL (0x387)
-#define TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX4_B1_CTL (0x388)
-#define TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX4_B2_CTL (0x389)
-#define TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX5_B1_CTL (0x38A)
-#define TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX5_B2_CTL (0x38B)
-#define TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX6_B1_CTL (0x38C)
-#define TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX6_B2_CTL (0x38D)
-#define TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B1_CTL (0x38E)
-#define TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B2_CTL (0x38F)
-#define TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX7_B3_CTL (0x390)
-#define TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_ANC_B1_CTL (0x391)
-#define TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_ANC_B2_CTL (0x392)
-#define TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_B1_CTL (0x393)
-#define TOMTOM_A_CDC_CONN_TX_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_B2_CTL (0x394)
-#define TOMTOM_A_CDC_CONN_TX_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_B3_CTL (0x395)
-#define TOMTOM_A_CDC_CONN_TX_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_B4_CTL (0x396)
-#define TOMTOM_A_CDC_CONN_TX_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL (0x397)
-#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL (0x398)
-#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL (0x399)
-#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL (0x39A)
-#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL (0x39B)
-#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL (0x39C)
-#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL (0x39D)
-#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL (0x39E)
-#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL (0x39F)
-#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL (0x3A0)
-#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL (0x3A1)
-#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL (0x3A2)
-#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL (0x3A3)
-#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL (0x3A4)
-#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL (0x3A5)
-#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL (0x3A6)
-#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL (0x3A7)
-#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL (0x3A8)
-#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL (0x3A9)
-#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL (0x3AA)
-#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL (0x3AB)
-#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL (0x3AC)
-#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL (0x3AD)
-#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL (0x3AE)
-#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL (0x3AF)
-#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_CLSH_CTL (0x3B0)
-#define TOMTOM_A_CDC_CONN_CLSH_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CONN_MISC (0x3B1)
-#define TOMTOM_A_CDC_CONN_MISC__POR (0x01)
-#define TOMTOM_A_CDC_CONN_RX8_B1_CTL (0x3B3)
-#define TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL (0x3B4)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR (0x81)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST (0x3B5)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR (0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD (0x3B6)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR (0xFF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS (0x3B7)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK (0x3B8)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR (0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING (0x3B9)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR (0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL (0x3BA)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR (0x81)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST (0x3BB)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR (0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD (0x3BC)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR (0xFF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS (0x3BD)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK (0x3BE)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR (0x04)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING (0x3BF)
-#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR (0x04)
-#define TOMTOM_A_CDC_MBHC_EN_CTL (0x3C0)
-#define TOMTOM_A_CDC_MBHC_EN_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG (0x3C1)
-#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG (0x3C2)
-#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR (0x06)
-#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL (0x3C3)
-#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR (0x03)
-#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL (0x3C4)
-#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR (0x09)
-#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL (0x3C5)
-#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR (0x1E)
-#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL (0x3C6)
-#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR (0x45)
-#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL (0x3C7)
-#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR (0x04)
-#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL (0x3C8)
-#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR (0x78)
-#define TOMTOM_A_CDC_MBHC_B1_STATUS (0x3C9)
-#define TOMTOM_A_CDC_MBHC_B1_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_B2_STATUS (0x3CA)
-#define TOMTOM_A_CDC_MBHC_B2_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_B3_STATUS (0x3CB)
-#define TOMTOM_A_CDC_MBHC_B3_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_B4_STATUS (0x3CC)
-#define TOMTOM_A_CDC_MBHC_B4_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_B5_STATUS (0x3CD)
-#define TOMTOM_A_CDC_MBHC_B5_STATUS__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_B1_CTL (0x3CE)
-#define TOMTOM_A_CDC_MBHC_B1_CTL__POR (0xC0)
-#define TOMTOM_A_CDC_MBHC_B2_CTL (0x3CF)
-#define TOMTOM_A_CDC_MBHC_B2_CTL__POR (0x5D)
-#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL (0x3D0)
-#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL (0x3D1)
-#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL (0x3D2)
-#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL (0x3D3)
-#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL (0x3D4)
-#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL (0x3D5)
-#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL (0x3D6)
-#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR (0xFF)
-#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL (0x3D7)
-#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR (0x07)
-#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL (0x3D8)
-#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR (0xFF)
-#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL (0x3D9)
-#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR (0x7F)
-#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL (0x3DA)
-#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL (0x3DB)
-#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR (0x80)
-#define TOMTOM_A_CDC_MBHC_CLK_CTL (0x3DC)
-#define TOMTOM_A_CDC_MBHC_CLK_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_INT_CTL (0x3DD)
-#define TOMTOM_A_CDC_MBHC_INT_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_DEBUG_CTL (0x3DE)
-#define TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR (0x00)
-#define TOMTOM_A_CDC_MBHC_SPARE (0x3DF)
-#define TOMTOM_A_CDC_MBHC_SPARE__POR (0x00)
-#define TOMTOM_A_CDC_RX8_B1_CTL (0x3E0)
-#define TOMTOM_A_CDC_RX8_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX8_B2_CTL (0x3E1)
-#define TOMTOM_A_CDC_RX8_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX8_B3_CTL (0x3E2)
-#define TOMTOM_A_CDC_RX8_B3_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX8_B4_CTL (0x3E3)
-#define TOMTOM_A_CDC_RX8_B4_CTL__POR (0x0B)
-#define TOMTOM_A_CDC_RX8_B5_CTL (0x3E4)
-#define TOMTOM_A_CDC_RX8_B5_CTL__POR (0x78)
-#define TOMTOM_A_CDC_RX8_B6_CTL (0x3E5)
-#define TOMTOM_A_CDC_RX8_B6_CTL__POR (0x80)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL (0x3E6)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR (0x00)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL (0x3E7)
-#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0 (0x3E8)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1 (0x3E9)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2 (0x3EA)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3 (0x3EB)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4 (0x3EC)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5 (0x3ED)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6 (0x3EE)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR (0x00)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7 (0x3EF)
-#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR (0x00)
-#define TOMTOM_A_CDC_BOOST_MODE_CTL (0x3F0)
-#define TOMTOM_A_CDC_BOOST_MODE_CTL__POR (0x00)
-#define TOMTOM_A_CDC_BOOST_THRESHOLD (0x3F1)
-#define TOMTOM_A_CDC_BOOST_THRESHOLD__POR (0x02)
-#define TOMTOM_A_CDC_BOOST_TAP_SEL (0x3F2)
-#define TOMTOM_A_CDC_BOOST_TAP_SEL__POR (0x00)
-#define TOMTOM_A_CDC_BOOST_HOLD_TIME (0x3F3)
-#define TOMTOM_A_CDC_BOOST_HOLD_TIME__POR (0x02)
-#define TOMTOM_A_CDC_BOOST_TRGR_EN (0x3F4)
-#define TOMTOM_A_CDC_BOOST_TRGR_EN__POR (0x00)
-
-/* SLIMBUS Slave Registers */
-#define TOMTOM_SLIM_PGD_PORT_INT_EN0 (0x30)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0 (0x34)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_1 (0x35)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_0 (0x36)
-#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1 (0x37)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0 (0x38)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_1 (0x39)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_0 (0x3A)
-#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_1 (0x3B)
-#define TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0 (0x60)
-#define TOMTOM_SLIM_PGD_PORT_INT_TX_SOURCE0 (0x70)
-
-/* Macros for Packing Register Writes into a U32 */
-#define TOMTOM_PACKED_REG_SIZE sizeof(u32)
-
-#define TOMTOM_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\
- ((mask & 0xff) << 8)|((reg & 0xffff) << 16))
-#define TOMTOM_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \
- do { \
- ((reg) = ((packed >> 16) & (0xffff))); \
- ((mask) = ((packed >> 8) & (0xff))); \
- ((val) = ((packed) & (0xff))); \
- } while (0)
-
-#define TOMTOM_SB_PGD_PORT_TX_BASE 0x50
-#define TOMTOM_SB_PGD_PORT_RX_BASE 0x40
-#define WCD9330_MAX_REGISTER 0x3FF
-extern const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1];
-#endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
index 1e428a1..99ce603 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h
@@ -32,4 +32,6 @@
int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res);
void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res);
+int wcd9xxx_irq_drv_init(void);
+void wcd9xxx_irq_drv_exit(void);
#endif
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
index d0ac0ac..7a13dd1 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h
@@ -33,108 +33,8 @@
typedef int (*codec_type_fn)(struct wcd9xxx *,
struct wcd9xxx_codec_type *);
-#ifdef CONFIG_WCD934X_CODEC
-extern int wcd934x_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd934x_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type);
-#endif
+codec_bringdown_fn wcd9xxx_bringdown_fn(int type);
+codec_bringup_fn wcd9xxx_bringup_fn(int type);
+codec_type_fn wcd9xxx_get_codec_info_fn(int type);
-#ifdef CONFIG_WCD9335_CODEC
-extern int wcd9335_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd9335_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type);
-#endif
-
-#ifdef CONFIG_WCD9330_CODEC
-extern int wcd9330_bringup(struct wcd9xxx *wcd9xxx);
-extern int wcd9330_bringdown(struct wcd9xxx *wcd9xxx);
-extern int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type);
-#endif
-
-static inline codec_bringdown_fn wcd9xxx_bringdown_fn(int type)
-{
- codec_bringdown_fn cdc_bdown_fn;
-
- switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
- case WCD934X:
- cdc_bdown_fn = wcd934x_bringdown;
- break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
- case WCD9335:
- cdc_bdown_fn = wcd9335_bringdown;
- break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
- case WCD9330:
- cdc_bdown_fn = wcd9330_bringdown;
- break;
-#endif
- default:
- cdc_bdown_fn = NULL;
- break;
- }
-
- return cdc_bdown_fn;
-}
-
-static inline codec_bringup_fn wcd9xxx_bringup_fn(int type)
-{
- codec_bringup_fn cdc_bup_fn;
-
- switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
- case WCD934X:
- cdc_bup_fn = wcd934x_bringup;
- break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
- case WCD9335:
- cdc_bup_fn = wcd9335_bringup;
- break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
- case WCD9330:
- cdc_bup_fn = wcd9330_bringup;
- break;
-#endif
- default:
- cdc_bup_fn = NULL;
- break;
- }
-
- return cdc_bup_fn;
-}
-
-static inline codec_type_fn wcd9xxx_get_codec_info_fn(int type)
-{
- codec_type_fn cdc_type_fn;
-
- switch (type) {
-#ifdef CONFIG_WCD934X_CODEC
- case WCD934X:
- cdc_type_fn = wcd934x_get_codec_info;
- break;
-#endif
-#ifdef CONFIG_WCD9335_CODEC
- case WCD9335:
- cdc_type_fn = wcd9335_get_codec_info;
- break;
-#endif
-#ifdef CONFIG_WCD9330_CODEC
- case WCD9330:
- cdc_type_fn = wcd9330_get_codec_info;
- break;
-#endif
- default:
- cdc_type_fn = NULL;
- break;
- }
-
- return cdc_type_fn;
-}
#endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e..f541da6 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
+#define PHY_ID_KSZ8795 0x00221550
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e1a903a..6a620e0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -788,6 +788,7 @@
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f7b0dab..9b21e2a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -76,6 +76,10 @@
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
+#ifndef lm_alias
+#define lm_alias(x) __va(__pa_symbol(x))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
@@ -478,16 +482,16 @@
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
- unsigned long addr = (unsigned long)x;
- return addr >= VMALLOC_START && addr < VMALLOC_END;
+#ifdef CONFIG_MMU
+extern int is_vmalloc_addr(const void *x);
#else
- return false;
-#endif
+static inline int is_vmalloc_addr(const void *x)
+{
+ return 0;
}
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -1357,39 +1361,11 @@
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{
return !vma->vm_ops;
}
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2128,6 +2104,7 @@
pgoff_t offset,
unsigned long size);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -2156,6 +2133,30 @@
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -2410,7 +2411,6 @@
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
@@ -2451,5 +2451,19 @@
static inline void setup_nr_node_ids(void) {}
#endif
+#ifdef CONFIG_PROCESS_RECLAIM
+struct reclaim_param {
+ struct vm_area_struct *vma;
+ /* Number of pages scanned */
+ int nr_scanned;
+ /* max pages to reclaim */
+ int nr_to_reclaim;
+ /* pages reclaimed */
+ int nr_reclaimed;
+};
+extern struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0f2e651..b718105 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -233,6 +233,7 @@
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9200069..f214b0c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -480,6 +480,7 @@
int clk_requests; /* internal reference counter */
unsigned int clk_delay; /* number of MCI clk hold cycles */
bool clk_gated; /* clock gated */
+ struct workqueue_struct *clk_gate_wq; /* clock gate work queue */
struct delayed_work clk_gate_work; /* delayed clock gate */
unsigned int clk_old; /* old clock value cache */
spinlock_t clk_lock; /* lock for clk fields */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6744eb4..ed0099c9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -39,8 +39,6 @@
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
- MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -57,6 +55,8 @@
*/
MIGRATE_CMA,
#endif
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
@@ -65,13 +65,22 @@
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
+# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
+# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \
@@ -368,6 +377,10 @@
struct pglist_data *zone_pgdat;
struct per_cpu_pageset __percpu *pageset;
+#ifdef CONFIG_CMA
+ bool cma_alloc;
+#endif
+
#ifndef CONFIG_SPARSEMEM
/*
* Flags for a pageblock_nr_pages block. See pageblock-flags.h.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a47c29e..d92d9a6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -865,11 +865,15 @@
* of useless work if you return NETDEV_TX_BUSY.
* Required; cannot be NULL.
*
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- * netdev_features_t features);
- * Adjusts the requested feature flags according to device-specific
- * constraints, and returns the resulting flags. Must not modify
- * the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
+ * Called by core transmit path to determine if device is capable of
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
@@ -1027,6 +1031,12 @@
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
@@ -1099,15 +1109,6 @@
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
- * Called by core transmit path to determine if device is capable of
- * performing offload operations on a given packet. This is to give
- * the device an opportunity to implement any restrictions that cannot
- * be otherwise expressed by feature flags. The check is called with
- * the set of features that the stack has calculated and it returns
- * those the driver believes to be appropriate.
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
* int queue_index, u32 maxrate);
* Called when a user wants to set a max-rate limitation of specific
@@ -2831,6 +2832,8 @@
unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;
+ unsigned int gro_coalesced;
+
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
#endif
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index dde3b13..3ca2526 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
#include <linux/sched.h>
#include <asm/irq.h>
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT 0
+#define SOFT_WATCHDOG_ENABLED_BIT 1
+#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
*
@@ -14,11 +31,8 @@
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
*/
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
-#include <asm/nmi.h>
-#endif
-
#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#include <asm/nmi.h>
extern void touch_nmi_watchdog(void);
#else
static inline void touch_nmi_watchdog(void)
@@ -106,9 +120,17 @@
extern int soft_watchdog_enabled;
extern int watchdog_user_enabled;
extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
+#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
diff --git a/include/linux/oom.h b/include/linux/oom.h
index b4e36e9..b986840 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -79,6 +79,9 @@
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+extern void dump_tasks(struct mem_cgroup *memcg,
+ const nodemask_t *nodemask);
+
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a38772a..1b71179 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -178,6 +178,11 @@
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /*
+ * Resume before calling the driver's system suspend hooks, disabling
+ * the direct_complete optimization.
+ */
+ PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
};
enum pci_irq_reroute_variant {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index bd22670..6c9b1e0 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -401,6 +401,9 @@
u32 advertising;
u32 lp_advertising;
+ /* Energy efficient ethernet modes which should be prohibited */
+ u32 eee_broken_modes;
+
int autoneg;
int link_timeout;
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 9788360..0ea3e1b 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -266,6 +266,9 @@
#define plist_next(pos) \
list_next_entry(pos, node_list)
+#define plist_next_entry(pos, type, member) \
+ container_of(plist_next(pos), type, member)
+
/**
* plist_prev - get the prev entry in list
* @pos: the type * to cursor
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 4381570..8e7a431 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -271,6 +271,7 @@
POWER_SUPPLY_TYPE_USB_HVDCP_3, /* Efficient High Voltage DCP */
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery */
POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_USB_FLOAT, /* Floating charger */
POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */
POWER_SUPPLY_TYPE_MAIN, /* Main Path */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 6c9ddcd..2938206 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -64,11 +64,14 @@
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
struct pinctrl_state *geni_gpio_sleep;
+ int clk_freq_out;
};
#define PINCTRL_DEFAULT "default"
#define PINCTRL_SLEEP "sleep"
+#define KHz(freq) (1000 * (freq))
+
/* Common SE registers */
#define GENI_INIT_CFG_REVISION (0x0)
#define GENI_S_INIT_CFG_REVISION (0x4)
diff --git a/include/linux/qcom_tspp.h b/include/linux/qcom_tspp.h
new file mode 100644
index 0000000..7a9e569
--- /dev/null
+++ b/include/linux/qcom_tspp.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_TSPP_H_
+#define _MSM_TSPP_H_
+
+struct tspp_data_descriptor {
+ void *virt_base; /* logical address of the actual data */
+ phys_addr_t phys_base; /* physical address of the actual data */
+ dma_addr_t dma_base; /* DMA address of the actual data */
+ u32 size; /* size of buffer in bytes */
+ int id; /* unique identifier */
+ void *user; /* user-defined data */
+};
+
+enum tspp_key_parity {
+ TSPP_KEY_PARITY_EVEN,
+ TSPP_KEY_PARITY_ODD
+};
+
+struct tspp_key {
+ enum tspp_key_parity parity;
+ int lsb;
+ int msb;
+};
+
+enum tspp_source {
+ TSPP_SOURCE_TSIF0,
+ TSPP_SOURCE_TSIF1,
+ TSPP_SOURCE_MEM,
+ TSPP_SOURCE_NONE = -1
+};
+
+enum tspp_mode {
+ TSPP_MODE_DISABLED,
+ TSPP_MODE_PES,
+ TSPP_MODE_RAW,
+ TSPP_MODE_RAW_NO_SUFFIX
+};
+
+enum tspp_tsif_mode {
+ TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */
+ TSPP_TSIF_MODE_1, /* without sync */
+ TSPP_TSIF_MODE_2 /* with sync signal */
+};
+
+struct tspp_filter {
+ int pid;
+ int mask;
+ enum tspp_mode mode;
+ unsigned int priority; /* 0 - 15 */
+ int decrypt;
+ enum tspp_source source;
+};
+
+struct tspp_select_source {
+ enum tspp_source source;
+ enum tspp_tsif_mode mode;
+ int clk_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+};
+
+enum tsif_tts_source {
+ TSIF_TTS_TCR = 0, /* Time stamps from TCR counter */
+ TSIF_TTS_LPASS_TIMER /* Time stamps from AV/Qtimer Timer */
+};
+
+struct tspp_ion_dma_buf_info {
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ bool smmu_map;
+ dma_addr_t dma_map_base;
+};
+
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
+ phys_addr_t *phys_base, dma_addr_t *dma_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user);
+
+/* Kernel API functions */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source);
+int tspp_close_stream(u32 dev, u32 channel_id);
+int tspp_open_channel(u32 dev, u32 channel_id);
+int tspp_close_channel(u32 dev, u32 channel_id);
+int tspp_get_ref_clk_counter(u32 dev,
+ enum tspp_source source, u32 *tcr_counter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_remove_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
+ void *data, u32 timer_ms);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
+ u32 size, u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user);
+
+int tspp_get_tts_source(u32 dev, int *tts_source);
+int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source,
+ u64 *lpass_time_counter);
+
+int tspp_attach_ion_dma_buff(u32 dev,
+ struct tspp_ion_dma_buf_info *ion_dma_buf);
+
+int tspp_detach_ion_dma_buff(u32 dev,
+ struct tspp_ion_dma_buf_info *ion_dma_buf);
+#endif /* _MSM_TSPP_H_ */
diff --git a/include/linux/random.h b/include/linux/random.h
index 16ab429..1fa0dc8 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -42,8 +42,42 @@
extern const struct file_operations random_fops, urandom_fops;
#endif
-unsigned int get_random_int(void);
-unsigned long get_random_long(void);
+u32 get_random_u32(void);
+u64 get_random_u64(void);
+static inline unsigned int get_random_int(void)
+{
+ return get_random_u32();
+}
+static inline unsigned long get_random_long(void)
+{
+#if BITS_PER_LONG == 64
+ return get_random_u64();
+#else
+ return get_random_u32();
+#endif
+}
+
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ unsigned long val = get_random_long();
+
+ return val & CANARY_MASK;
+}
+
unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 0000000..0d905d8
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b46bb56..71fd2b3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -10,6 +10,11 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
+extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
+extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
+ struct vm_area_struct *vma);
+
/*
* The anon_vma heads a list of private "related" vmas, to scan if
* an anonymous page pointing to this anon_vma needs to be unmapped:
@@ -186,7 +191,8 @@
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap(struct page *, enum ttu_flags flags,
+ struct vm_area_struct *vma);
/*
* Used by uprobes to replace a userspace page safely
@@ -263,6 +269,7 @@
*/
struct rmap_walk_control {
void *arg;
+ struct vm_area_struct *target_vma;
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
@@ -287,7 +294,7 @@
return 0;
}
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs, vma) SWAP_FAIL
static inline int page_mkclean(struct page *page)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9e7ab05..0737cb6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -177,11 +177,31 @@
extern u64 nr_running_integral(unsigned int cpu);
#endif
+#ifdef CONFIG_SMP
extern void sched_update_nr_prod(int cpu, long delta, bool inc);
extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
unsigned int *max_nr,
unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
+extern u64 sched_get_cpu_last_busy_time(int cpu);
+#else
+static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+}
+static inline void sched_get_nr_running_avg(int *avg, int *iowait_avg,
+ int *big_avg, unsigned int *max_nr,
+ unsigned int *big_max_nr)
+{
+}
+static inline unsigned int sched_get_cpu_util(int cpu)
+{
+ return 0;
+}
+static inline u64 sched_get_cpu_last_busy_time(int cpu)
+{
+ return 0;
+}
+#endif
extern void calc_global_load(unsigned long ticks);
@@ -1484,12 +1504,10 @@
*/
u64 mark_start;
u32 sum, demand;
+ u32 coloc_demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
-#ifdef CONFIG_SCHED_HMP
- u64 curr_burst, avg_burst, avg_sleep_time;
-#endif
u16 active_windows;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -2637,38 +2655,10 @@
#define MAX_NUM_CGROUP_COLOC_ID 20
-#ifdef CONFIG_SCHED_HMP
-extern int sched_set_window(u64 window_start, unsigned int window_size);
-extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus);
-extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
-extern u32 sched_get_init_task_load(struct task_struct *p);
-extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
-extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
-extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
-extern unsigned int sched_get_cluster_wake_idle(int cpu);
-extern int sched_update_freq_max_load(const cpumask_t *cpumask);
-extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
- u32 fmin, u32 fmax);
-extern void sched_set_cpu_cstate(int cpu, int cstate,
- int wakeup_energy, int wakeup_latency);
-extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency);
-extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
-extern unsigned int sched_get_group_id(struct task_struct *p);
-
-#else /* CONFIG_SCHED_HMP */
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
return -EINVAL;
}
-static inline unsigned long sched_get_busy(int cpu)
-{
- return 0;
-}
static inline void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus) {};
@@ -2682,12 +2672,6 @@
{
}
-static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
- int dstate, int wakeup_energy, int wakeup_latency)
-{
-}
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_SCHED_WALT
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void sched_set_io_is_busy(int val);
@@ -2715,10 +2699,8 @@
#endif /* CONFIG_SCHED_WALT */
#ifndef CONFIG_SCHED_WALT
-#ifndef CONFIG_SCHED_HMP
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }
-#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_NO_HZ_COMMON
@@ -2831,7 +2813,7 @@
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
@@ -2966,7 +2948,6 @@
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
@@ -3201,7 +3182,7 @@
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *mm);
#ifdef CONFIG_MMU
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
@@ -3905,6 +3886,8 @@
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
+#define SCHED_CPUFREQ_WALT (1U << 4)
+#define SCHED_CPUFREQ_PL (1U << 5)
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 190bf3b..3597d55 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -22,52 +22,24 @@
extern unsigned int sysctl_sched_sync_hint_enable;
extern unsigned int sysctl_sched_initial_task_util;
extern unsigned int sysctl_sched_cstate_aware;
-#ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_init_task_load_pct;
-#endif
+extern unsigned int sysctl_sched_capacity_margin;
+extern unsigned int sysctl_sched_capacity_margin_down;
#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_init_task_load_pct;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_boost;
-#endif
-
-#ifdef CONFIG_SCHED_HMP
-
-enum freq_reporting_policy {
- FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK,
- FREQ_REPORT_CPU_LOAD,
- FREQ_REPORT_TOP_TASK,
- FREQ_REPORT_INVALID_POLICY
-};
-
-extern int sysctl_sched_freq_inc_notify;
-extern int sysctl_sched_freq_dec_notify;
-extern unsigned int sysctl_sched_freq_reporting_policy;
-extern unsigned int sysctl_sched_window_stats_policy;
-extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_spill_nr_run;
-extern unsigned int sysctl_sched_spill_load_pct;
-extern unsigned int sysctl_sched_upmigrate_pct;
-extern unsigned int sysctl_sched_downmigrate_pct;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
-extern unsigned int sysctl_early_detection_duration;
-extern unsigned int sysctl_sched_small_wakee_task_load_pct;
-extern unsigned int sysctl_sched_big_waker_task_load_pct;
-extern unsigned int sysctl_sched_select_prev_cpu_us;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sysctl_sched_pred_alert_freq;
-extern unsigned int sysctl_sched_freq_aggregate;
-extern unsigned int sysctl_sched_enable_thread_grouping;
-extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
-extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
-extern unsigned int sysctl_sched_short_burst;
-extern unsigned int sysctl_sched_short_sleep;
-#endif /* CONFIG_SCHED_HMP */
+
+extern int
+walt_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+#endif /* CONFIG_SCHED_WALT */
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
@@ -96,9 +68,6 @@
extern int sched_migrate_notify_proc_handler(struct ctl_table *table,
int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-extern int sched_hmp_proc_update_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-
extern int sched_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -148,6 +117,10 @@
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int sched_updown_migrate_handler(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 1450caa..0320210 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -114,15 +114,16 @@
*
* @fps: panel te interval
* @vtotal: current vertical total (height + vbp + vfp)
- * @jitter: panel can set the jitter to wake up rsc/solver early
- * This value causes mdp core to exit certain mode
- * early. Default is 10% jitter
+ * @jitter_numer: panel jitter numerator value. This config causes rsc/solver
+ * early before te. Default is 0.8% jitter.
+ * @jitter_denom: panel jitter denominator.
* @prefill_lines: max prefill lines based on panel
*/
struct sde_rsc_cmd_config {
u32 fps;
u32 vtotal;
- u32 jitter;
+ u32 jitter_numer;
+ u32 jitter_denom;
u32 prefill_lines;
};
diff --git a/include/linux/seemp_instrumentation.h b/include/linux/seemp_instrumentation.h
index 21bc436..1db7a44 100644
--- a/include/linux/seemp_instrumentation.h
+++ b/include/linux/seemp_instrumentation.h
@@ -15,6 +15,8 @@
#ifdef CONFIG_SEEMP_CORE
#include <linux/kernel.h>
+#include <linux/seemp_api.h>
+#include <linux/socket.h>
#define MAX_BUF_SIZE 188
@@ -66,11 +68,33 @@
seemp_logk_kernel_end(blck);
}
+
+static inline void seemp_logk_rtic(__u8 type, pid_t pid, __u8 asset_id[0x20],
+ __u8 asset_category, __u8 response)
+{
+ char *buf = NULL;
+ void *blck = NULL;
+
+ blck = seemp_setup_buf(&buf);
+ if (!blck)
+ return;
+
+ SEEMP_LOGK_RECORD(SEEMP_API_kernel__rtic,
+ "app_pid=%d,rtic_type=%u,asset_id=%s,asset_category=%u,response=%u",
+ pid, type, asset_id, asset_category, response);
+
+ seemp_logk_kernel_end(blck);
+}
#else
static inline void seemp_logk_sendto(int fd, void __user *buff,
size_t len, unsigned int flags, struct sockaddr __user *addr,
int addr_len)
{
}
+
+static inline void seemp_logk_rtic(__u8 type, __u64 actor, __u8 asset_id[0x20],
+ __u8 asset_category, __u8 response)
+{
+}
#endif
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 26b6f6a..4e510df 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -169,4 +169,204 @@
return tail ? tail + 1 : path;
}
+#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
+#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ if (strscpy(p, q, p_size < q_size ? p_size : q_size) < 0)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+ size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ if (__builtin_constant_p(len) && len >= p_size)
+ __write_overflow();
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __builtin_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return ret;
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __builtin_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __read_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+#endif
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 55ff559..92d1fde 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -151,12 +151,14 @@
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
+ SWP_FAST = (1 << 11), /* blkdev access is fast and cheap */
/* add others here before... */
- SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+#define SWAPFILE_CLUSTER 256
#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
@@ -241,6 +243,8 @@
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
+ unsigned int write_pending;
+ unsigned int max_writes;
};
/* linux/mm/workingset.c */
@@ -328,6 +332,8 @@
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
+extern int sysctl_swap_ratio;
+extern int sysctl_swap_ratio_enable;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
@@ -389,10 +395,18 @@
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
+extern bool is_swap_fast(swp_entry_t entry);
/* Swap 50% full? Release swapcache more aggressively.. */
-static inline bool vm_swap_full(void)
+static inline bool vm_swap_full(struct swap_info_struct *si)
{
+ /*
+ * If the swap device is fast, return true
+ * not to delay swap free.
+ */
+ if (si->flags & SWP_FAST)
+ return true;
+
return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
}
@@ -428,7 +442,7 @@
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
-#define vm_swap_full() 0
+#define vm_swap_full(si) 0
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
@@ -579,7 +593,7 @@
static inline bool mem_cgroup_swap_full(struct page *page)
{
- return vm_swap_full();
+ return vm_swap_full(page_swap_info(page));
}
#endif
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 388293a..ed2a9c9 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -7,7 +7,12 @@
*/
extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
+extern spinlock_t swap_avail_lock;
+extern struct plist_head swap_avail_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern int swap_ratio(struct swap_info_struct **si);
+extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
+extern bool is_swap_ratio_group(int prio);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index adf4e51..8f84c84 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -59,6 +59,9 @@
void __user *, size_t *, loff_t *);
extern int proc_do_large_bitmap(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
/*
* Register a set of sysctl names by calling register_sysctl_table
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 387fa7d..d802692 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -42,6 +42,7 @@
* are available -- else NULL's).
*/
+bool sysrq_on(void);
void handle_sysrq(int key);
void __handle_sysrq(int key, bool check_mask);
int register_sysrq_key(int key, struct sysrq_key_op *op);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 2873baf..5837387 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,51 +9,18 @@
#include <linux/types.h>
#include <linux/bug.h>
-
-struct timespec;
-struct compat_timespec;
+#include <linux/restart_block.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
+/*
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
+ */
+#include <asm/current.h>
#define current_thread_info() ((struct thread_info *)current)
#endif
-/*
- * System call restart block.
- */
-struct restart_block {
- long (*fn)(struct restart_block *);
- union {
- /* For futex_wait and futex_wait_requeue_pi */
- struct {
- u32 __user *uaddr;
- u32 val;
- u32 flags;
- u32 bitset;
- u64 time;
- u32 __user *uaddr2;
- } futex;
- /* For nanosleep */
- struct {
- clockid_t clockid;
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
-#endif
- u64 expires;
- } nanosleep;
- /* For poll */
- struct {
- struct pollfd __user *ufds;
- int nfds;
- int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
- } poll;
- };
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
-
#include <linux/bitops.h>
#include <asm/thread_info.h>
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index e880054..2c225d4 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
*/
struct tk_read_base {
struct clocksource *clock;
- cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
cycle_t cycle_last;
u32 mult;
@@ -58,7 +57,7 @@
* interval.
* @xtime_remainder: Shifted nano seconds left over when rounding
* @cycle_interval
- * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
* @ntp_error: Difference between accumulated time and NTP time in ntp
* shifted nano seconds.
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
@@ -100,7 +99,7 @@
cycle_t cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
- u32 raw_interval;
+ u64 raw_interval;
/* The ntp_tick_length() value currently being used.
* This cached copy ensures we consistently apply the tick
* length for an entire tick, as ntp_tick_length may change
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 56dde53..5f5107b 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -185,6 +185,9 @@
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+#ifdef CONFIG_SMP
+extern bool check_pending_deferrable_timers(int cpu);
+#endif
/*
* The jiffies value which is added to now, when there is no timer
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1f39661..0f9fff3 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -333,6 +333,8 @@
struct usb_ssp_cap_descriptor *ssp_cap;
struct usb_ss_container_id_descriptor *ss_id;
struct usb_ptm_cap_descriptor *ptm_cap;
+ struct usb_config_summary_descriptor *config_summary;
+ unsigned int num_config_summary_desc;
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index f2322f3..b6cc17b 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -50,7 +50,8 @@
#define CLUSTER_ID_MONO 0x0001
#define CLUSTER_ID_STEREO 0x0002
-#define FULL_ADC_PROFILE 0x01
+/* A.2 audio function subclass codes */
+#define FULL_ADC_3_0 0x01
/* BADD Profile IDs */
#define PROF_GENERIC_IO 0x20
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 5c0b3fa..b305b0e 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -582,9 +582,9 @@
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
/* GetBusState and SetHubDescriptor are optional, omitted */
diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h
index 3566a7a..4dbd91f 100644
--- a/include/linux/usb/usbpd.h
+++ b/include/linux/usb/usbpd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,10 @@
void (*connect)(struct usbpd_svid_handler *hdlr);
void (*disconnect)(struct usbpd_svid_handler *hdlr);
+ /* DP driver -> PE driver for requesting USB SS lanes */
+ int (*request_usb_ss_lane)(struct usbpd *pd,
+ struct usbpd_svid_handler *hdlr);
+
/* Unstructured VDM */
void (*vdm_received)(struct usbpd_svid_handler *hdlr, u32 vdm_hdr,
const u32 *vdos, int num_vdos);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 0ecae0b..ed46675 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -88,6 +88,8 @@
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 4d6ec58..9cc195f 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -21,7 +21,7 @@
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
FOR_ALL_ZONES(ALLOCSTALL),
FOR_ALL_ZONES(PGSCAN_SKIP),
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3d9d786..f113e0e 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -19,6 +19,8 @@
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+#define VM_LOWMEM 0x00000100 /* Tracking of direct mapped lowmem */
+
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -82,6 +84,7 @@
const void *caller);
extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
@@ -159,6 +162,13 @@
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern __init int vm_area_check_early(struct vm_struct *vm);
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern void mark_vmalloc_reserved_area(void *addr, unsigned long size);
+#else
+static inline void mark_vmalloc_reserved_area(void *addr, unsigned long size)
+{ };
+#endif
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
@@ -184,7 +194,12 @@
#endif
#ifdef CONFIG_MMU
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+extern unsigned long total_vmalloc_size;
+#define VMALLOC_TOTAL total_vmalloc_size
+#else
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+#endif
#else
#define VMALLOC_TOTAL 0UL
#endif
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 3347cc3..93000f5 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -15,6 +15,7 @@
unsigned long tree_scanned;
unsigned long tree_reclaimed;
+ unsigned long stall;
/* The lock is used to keep the scanned/reclaimed above in sync. */
struct spinlock sr_lock;
@@ -28,11 +29,13 @@
struct mem_cgroup;
-#ifdef CONFIG_MEMCG
+extern int vmpressure_notifier_register(struct notifier_block *nb);
+extern int vmpressure_notifier_unregister(struct notifier_block *nb);
extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
unsigned long scanned, unsigned long reclaimed);
extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
+#ifdef CONFIG_MEMCG
extern void vmpressure_init(struct vmpressure *vmpr);
extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
@@ -43,9 +46,9 @@
extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd);
#else
-static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
- unsigned long scanned, unsigned long reclaimed) {}
-static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
- int prio) {}
+static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
#endif /* CONFIG_MEMCG */
#endif /* __LINUX_VMPRESSURE_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index bb5a21c..623b6f0 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -20,7 +20,7 @@
#include <linux/msm_ion.h>
#include <uapi/media/msm_vidc.h>
-#define HAL_BUFFER_MAX 0xb
+#define HAL_BUFFER_MAX 0xd
enum smem_type {
SMEM_ION,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6d27dae..82b4b53 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -24,6 +24,30 @@
#include <linux/net.h>
#include <net/regulatory.h>
+/* backport support for new cfg80211 ops "update_connect_params" */
+#define CFG80211_UPDATE_CONNECT_PARAMS 1
+
+/**
+ * backport support for NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA
+ * and NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED.
+ */
+#define CFG80211_RAND_TA_FOR_PUBLIC_ACTION_FRAME 1
+
+/* backport support for NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI */
+#define CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN 1
+
+/* backport support for specifying reason for connect timeout */
+#define CFG80211_CONNECT_TIMEOUT_REASON_CODE 1
+
+/* Indicate backport support for the new connect done api */
+#define CFG80211_CONNECT_DONE 1
+
+/* Indicate backport support for FILS SK offload in cfg80211 */
+#define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1
+
+/* Indicate support for including KEK length in rekey data */
+#define CFG80211_REKEY_DATA_KEK_LEN 1
+
/**
* DOC: Introduction
*
@@ -772,6 +796,30 @@
};
/**
+ * struct iface_combination_params - input parameters for interface combinations
+ *
+ * Used to pass interface combination parameters
+ *
+ * @num_different_channels: the number of different channels we want
+ * to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ * width where radar detection is needed, as in the definition of
+ * &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the number of interfaces of each interface
+ * type. The index is the interface type as specified in &enum
+ * nl80211_iftype.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ * that's not operating yet, if such is to be checked as part of
+ * the verification
+ */
+struct iface_combination_params {
+ int num_different_channels;
+ u8 radar_detect;
+ int iftype_num[NUM_NL80211_IFTYPES];
+ u32 new_beacon_int;
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -1568,6 +1616,17 @@
};
/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+ enum nl80211_band band;
+ s8 delta;
+};
+
+/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
@@ -1602,6 +1661,16 @@
* cycle. The driver may ignore this parameter and start
* immediately (or at any other time), if this feature is not
* supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ * reporting in connected state to cases where a matching BSS is determined
+ * to have better or slightly worse RSSI than the current connected BSS.
+ * The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ * to the specified band while deciding whether a better BSS is reported
+ * using @relative_rssi. If delta is a negative number, the BSSs that
+ * belong to the specified band will be penalized by delta dB in relative
+ * comparisions.
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1621,6 +1690,10 @@
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ bool relative_rssi_set;
+ s8 relative_rssi;
+ struct cfg80211_bss_select_adjust rssi_adjust;
+
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
@@ -1761,9 +1834,11 @@
* @key_len: length of WEP key for shared key authentication
* @key_idx: index of WEP key for shared key authentication
* @key: WEP key for shared key authentication
- * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
- * Authentication transaction sequence number field.
- * @sae_data_len: Length of sae_data buffer in octets
+ * @auth_data: Fields and elements in Authentication frames. This contains
+ * the authentication frame body (non-IE and IE data), excluding the
+ * Authentication algorithm number, i.e., starting at the Authentication
+ * transaction sequence number field.
+ * @auth_data_len: Length of auth_data buffer in octets
*/
struct cfg80211_auth_request {
struct cfg80211_bss *bss;
@@ -1772,8 +1847,8 @@
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len, key_idx;
- const u8 *sae_data;
- size_t sae_data_len;
+ const u8 *auth_data;
+ size_t auth_data_len;
};
/**
@@ -1814,6 +1889,12 @@
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT capability override
* @vht_capa_mask: VHT capability mask indicating which fields to use
+ * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
+ * %NULL if FILS is not used.
+ * @fils_kek_len: Length of fils_kek in octets
+ * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
+ * Request/Response frame or %NULL if FILS is not used. This field starts
+ * with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -1825,6 +1906,9 @@
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa, vht_capa_mask;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ const u8 *fils_nonces;
};
/**
@@ -1918,17 +2002,6 @@
};
/**
- * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
- *
- * @band: band of BSS which should match for RSSI level adjustment.
- * @delta: value of RSSI level adjustment.
- */
-struct cfg80211_bss_select_adjust {
- enum nl80211_band band;
- s8 delta;
-};
-
-/**
* struct cfg80211_bss_selection - connection parameters for BSS selection.
*
* @behaviour: requested BSS selection behaviour.
@@ -1988,6 +2061,19 @@
* the BSSID of the current association, i.e., to the value that is
* included in the Current AP address field of the Reassociation Request
* frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ * NAI or %NULL if not specified. This is used to construct FILS wrapped
+ * data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ * %NULL if not specified. This specifies the domain name of ER server and
+ * is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ * messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ * keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -2013,6 +2099,30 @@
bool pbss;
struct cfg80211_bss_selection bss_select;
const u8 *prev_bssid;
+ const u8 *fils_erp_username;
+ size_t fils_erp_username_len;
+ const u8 *fils_erp_realm;
+ size_t fils_erp_realm_len;
+ u16 fils_erp_next_seq_num;
+ const u8 *fils_erp_rrk;
+ size_t fils_erp_rrk_len;
+};
+
+/**
+ * enum cfg80211_connect_params_changed - Connection parameters being updated
+ *
+ * This enum provides information of all connect parameters that
+ * have to be updated as part of update_connect_params() call.
+ *
+ * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm,
+ * username, erp sequence number and rrk) are updated
+ * @UPDATE_AUTH_TYPE: Indicates that Authentication type is updated
+ */
+enum cfg80211_connect_params_changed {
+ UPDATE_ASSOC_IES = BIT(0),
+ UPDATE_FILS_ERP_INFO = BIT(1),
+ UPDATE_AUTH_TYPE = BIT(2),
};
/**
@@ -2039,12 +2149,27 @@
* This structure is passed to the set/del_pmksa() method for PMKSA
* caching.
*
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ * derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ * the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ * cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ * %NULL).
*/
struct cfg80211_pmksa {
const u8 *bssid;
const u8 *pmkid;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *ssid;
+ size_t ssid_len;
+ const u8 *cache_id;
};
/**
@@ -2219,12 +2344,14 @@
/**
* struct cfg80211_gtk_rekey_data - rekey data
- * @kek: key encryption key (NL80211_KEK_LEN bytes)
+ * @kek: key encryption key
* @kck: key confirmation key (NL80211_KCK_LEN bytes)
* @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes)
+ * @kek_len: Length of @kek in octets
*/
struct cfg80211_gtk_rekey_data {
const u8 *kek, *kck, *replay_ctr;
+ size_t kek_len;
};
/**
@@ -2536,10 +2663,17 @@
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
+ * @update_connect_params: Update the connect parameters while connected to a
+ * BSS. The updated parameters can be used by driver/firmware for
+ * subsequent BSS selection (roaming) decisions and to form the
+ * Authentication/(Re)Association Request frames. This call does not
+ * request an immediate disassociation or reassociation with the current
+ * BSS, i.e., this impacts only subsequent (re)associations. The bits in
+ * changed are defined in &enum cfg80211_connect_params_changed.
+ * (invoked with the wireless_dev mutex held)
* @disconnect: Disconnect from the BSS/ESS. Once done, call
* cfg80211_disconnected().
* (invoked with the wireless_dev mutex held)
- *
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
* cfg80211_ibss_joined(), also call that function when changing BSSID due
* to a merge.
@@ -2706,6 +2840,8 @@
* @nan_change_conf: changes NAN configuration. The changed parameters must
* be specified in @changes (using &enum cfg80211_nan_conf_changes);
* All other parameters must be ignored.
+ *
+ * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2820,6 +2956,10 @@
int (*connect)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
+ int (*update_connect_params)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme,
+ u32 changed);
int (*disconnect)(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
@@ -2982,6 +3122,10 @@
struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf,
u32 changes);
+
+ int (*set_multicast_to_unicast)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled);
};
/*
@@ -3082,6 +3226,12 @@
* only in special cases.
* @radar_detect_widths: bitmap of channel widths supported for radar detection
* @radar_detect_regions: bitmap of regions supported for radar detection
+ * @beacon_int_min_gcd: This interface combination supports different
+ * beacon intervals.
+ * = 0 - all beacon intervals for different interface must be same.
+ * > 0 - any beacon interval for the interface part of this combination AND
+ * *GCD* of all beacon intervals from beaconing interfaces of this
+ * combination must be greater or equal to this value.
*
* With this structure the driver can describe which interface
* combinations it supports concurrently.
@@ -3147,6 +3297,7 @@
bool beacon_int_infra_match;
u8 radar_detect_widths;
u8 radar_detect_regions;
+ u32 beacon_int_min_gcd;
};
struct ieee80211_txrx_stypes {
@@ -4964,6 +5115,78 @@
#endif
/**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ * cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ * @bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ * @fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ * FILS Authentication. This value should be specified irrespective of the
+ * status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ * used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+ int status;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ bool update_erp_next_seq_num;
+ u16 fils_erp_next_seq_num;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *pmkid;
+ enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp);
+
+/**
* cfg80211_connect_bss - notify cfg80211 of connection result
*
* @dev: network device
@@ -4974,20 +5197,50 @@
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
- * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
- * the real status code for failures.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
* @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
*
- * It should be called by the underlying driver whenever connect() has
- * succeeded. This is similar to cfg80211_connect_result(), but with the
- * option of identifying the exact bss entry for the connection. Only one of
- * these functions should be called.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
- struct cfg80211_bss *bss, const u8 *req_ie,
- size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp);
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
+{
+ struct cfg80211_connect_resp_params params;
+
+ memset(¶ms, 0, sizeof(params));
+ params.status = status;
+ params.bssid = bssid;
+ params.bss = bss;
+ params.req_ie = req_ie;
+ params.req_ie_len = req_ie_len;
+ params.resp_ie = resp_ie;
+ params.resp_ie_len = resp_ie_len;
+ params.timeout_reason = timeout_reason;
+
+ cfg80211_connect_done(dev, ¶ms, gfp);
+}
/**
* cfg80211_connect_result - notify cfg80211 of connection result
@@ -4998,13 +5251,16 @@
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
* the real status code for failures.
* @gfp: allocation flags
*
- * It should be called by the underlying driver whenever connect() has
- * succeeded.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5013,7 +5269,8 @@
u16 status, gfp_t gfp)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
- resp_ie_len, status, gfp);
+ resp_ie_len, status, gfp,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
/**
@@ -5024,19 +5281,23 @@
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
*
* It should be called by the underlying driver whenever connect() has failed
* in a sequence where no explicit authentication/association rejection was
* received from the AP. This could happen, e.g., due to not being able to send
* out the Authentication or Association Request frame or timing out while
- * waiting for the response.
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len, gfp_t gfp)
+ const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
- gfp);
+ gfp, timeout_reason);
}
/**
@@ -5644,36 +5905,20 @@
* cfg80211_check_combinations - check interface combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
*
* This function can be called by the driver to check whether a
* combination of interfaces and their types are allowed according to
* the interface combinations.
*/
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES]);
+ struct iface_combination_params *params);
/**
* cfg80211_iter_combinations - iterate over matching combinations
*
* @wiphy: the wiphy
- * @num_different_channels: the number of different channels we want
- * to use for verification
- * @radar_detect: a bitmap where each bit corresponds to a channel
- * width where radar detection is needed, as in the definition of
- * &struct ieee80211_iface_combination.@radar_detect_widths
- * @iftype_num: array with the numbers of interfaces of each interface
- * type. The index is the interface type as specified in &enum
- * nl80211_iftype.
+ * @params: the interface combinations parameter
* @iter: function to call for each matching combination
* @data: pointer to pass to iter function
*
@@ -5682,9 +5927,7 @@
* purposes.
*/
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h
new file mode 100644
index 0000000..6ff0fd0
--- /dev/null
+++ b/include/net/cnss_utils.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_UTILS_H_
+#define _CNSS_UTILS_H_
+
+enum cnss_utils_cc_src {
+ CNSS_UTILS_SOURCE_CORE,
+ CNSS_UTILS_SOURCE_11D,
+ CNSS_UTILS_SOURCE_USER
+};
+
+extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 ch_count);
+extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev,
+ u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len);
+extern int cnss_utils_wlan_set_dfs_nol(struct device *dev,
+ const void *info, u16 info_len);
+extern int cnss_utils_wlan_get_dfs_nol(struct device *dev,
+ void *info, u16 info_len);
+extern int cnss_utils_get_driver_load_cnt(struct device *dev);
+extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
+extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_utils_set_cc_source(struct device *dev,
+ enum cnss_utils_cc_src cc_source);
+extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
+
+#endif
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f5e625f..4341731 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -22,6 +22,7 @@
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
+#include <net/lwtunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/route.h>
@@ -233,4 +234,11 @@
return daddr;
}
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
+{
+ return a->dst.dev == b->dst.dev &&
+ a->rt6i_idev == b->rt6i_idev &&
+ ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
+ !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+}
#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 91afb4a..615ce0a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@
{
u32 hash;
+ /* @flowlabel may include more than a flow label, eg, the traffic class.
+ * Here we want only the flow label value.
+ */
+ flowlabel &= IPV6_FLOWLABEL_MASK;
+
if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
(!autolabel &&
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2700f92..7b93ffd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -140,6 +140,9 @@
* most likely due to retrans in 3WHS.
*/
+/* Number of full MSS to receive before Acking RFC2581 */
+#define TCP_DELACK_SEG 1
+
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
@@ -274,6 +277,11 @@
extern int sysctl_tcp_default_init_rwnd;
extern atomic_long_t tcp_memory_allocated;
+
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
@@ -364,6 +372,13 @@
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+/* sysctl master controller */
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *table,
+ int write, void __user *buffer, size_t *length,
+ loff_t *ppos);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 308adc4..9fce47e 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -221,9 +221,17 @@
bool no_share;
};
+struct vxlan_dev_node {
+ struct hlist_node hlist;
+ struct vxlan_dev *vxlan;
+};
+
/* Pseudo network device */
struct vxlan_dev {
- struct hlist_node hlist; /* vni hash table */
+ struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */
+#endif
struct list_head next; /* vxlan's per namespace list */
struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 31947b9..835c30e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -944,10 +944,6 @@
struct flow_cache_object flo;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct flowi *origin;
- struct xfrm_selector *partner;
-#endif
u32 xfrm_genid;
u32 policy_genid;
u32 route_mtu_cached;
@@ -963,12 +959,6 @@
dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm);
-#ifdef CONFIG_XFRM_SUB_POLICY
- kfree(xdst->origin);
- xdst->origin = NULL;
- kfree(xdst->partner);
- xdst->partner = NULL;
-#endif
}
#endif
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9d249f6..69fa6b3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -254,6 +254,7 @@
STARGET_CREATED = 1,
STARGET_RUNNING,
STARGET_REMOVE,
+ STARGET_CREATED_REMOVE,
STARGET_DEL,
};
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
index 7b86481..4522b11 100644
--- a/include/soc/qcom/glink.h
+++ b/include/soc/qcom/glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -340,6 +340,22 @@
*/
unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle);
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle: Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle);
+
#else /* CONFIG_MSM_GLINK */
static inline void *glink_open(const struct glink_open_config *cfg_ptr)
{
@@ -428,5 +444,16 @@
{
return 0;
}
+
+static inline int glink_start_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_end_rx_rt(void *handle)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_MSM_GLINK */
#endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 549cb84..3527c35 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -77,13 +77,6 @@
struct icnss_shadow_reg_cfg *shadow_reg_cfg;
};
-/* MSA Memory Regions Information */
-struct icnss_mem_region_info {
- uint64_t reg_addr;
- uint32_t size;
- uint8_t secure_flag;
-};
-
/* driver modes */
enum icnss_driver_mode {
ICNSS_MISSION,
diff --git a/include/soc/qcom/msm-core.h b/include/soc/qcom/msm-core.h
index cd44615..f1c06a6 100644
--- a/include/soc/qcom/msm-core.h
+++ b/include/soc/qcom/msm-core.h
@@ -16,9 +16,12 @@
#ifdef CONFIG_APSS_CORE_EA
void set_cpu_throttled(struct cpumask *mask, bool throttling);
struct blocking_notifier_head *get_power_update_notifier(void);
+void trigger_cpu_pwr_stats_calc(void);
+struct cpu_pwr_stats *get_cpu_pwr_stats(void);
#else
static inline void set_cpu_throttled(struct cpumask *mask, bool throttling) {}
struct blocking_notifier_head *get_power_update_notifier(void) {return NULL; }
+static inline void trigger_cpu_pwr_stats_calc(void) {}
+struct cpu_pwr_stats *get_cpu_pwr_stats(void) {return NULL; }
#endif
#endif
-
diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h
index a82ada6..58d011e 100644
--- a/include/soc/qcom/pm.h
+++ b/include/soc/qcom/pm.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -70,16 +70,6 @@
};
/**
- * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
- *
- * @cpu: cpuid of the CPU going down.
- *
- * Returns the l2 flush flag enum that is passed down to TZ during power
- * collaps
- */
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
-
-/**
* msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
* @cpu: CPU on which to check for the sleep mode.
* @mode: Sleep Mode to check for.
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index 0efea04..a7d4190 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -336,7 +336,7 @@
__packed struct qseecom_continue_blocked_request_ireq {
uint32_t qsee_cmd_id;
- uint32_t app_id;
+ uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
};
@@ -682,6 +682,9 @@
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index f0f81a9..12fa374 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,7 +37,10 @@
VMID_CP_APP = 0x12,
VMID_WLAN = 0x18,
VMID_WLAN_CE = 0x19,
+ VMID_CP_SPSS_SP = 0x1A,
VMID_CP_CAMERA_PREVIEW = 0x1D,
+ VMID_CP_SPSS_SP_SHARED = 0x22,
+ VMID_CP_SPSS_HLOS_SHARED = 0x24,
VMID_LAST,
VMID_INVAL = -1
};
@@ -47,26 +50,15 @@
#define PERM_EXEC 0x1
#ifdef CONFIG_QCOM_SECURE_BUFFER
-int msm_secure_table(struct sg_table *table);
-int msm_unsecure_table(struct sg_table *table);
int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems);
-int hyp_assign_phys(phys_addr_t addr, u64 size,
+extern int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems);
-bool msm_secure_v2_is_supported(void);
const char *msm_secure_vmid_to_string(int secure_vmid);
#else
-static inline int msm_secure_table(struct sg_table *table)
-{
- return -EINVAL;
-}
-static inline int msm_unsecure_table(struct sg_table *table)
-{
- return -EINVAL;
-}
static inline int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
@@ -82,10 +74,6 @@
return -EINVAL;
}
-static inline bool msm_secure_v2_is_supported(void)
-{
- return false;
-}
static inline const char *msm_secure_vmid_to_string(int secure_vmid)
{
return "N/A";
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index dc404e4..f196d40 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -233,6 +233,7 @@
uint32_t socinfo_get_version(void);
uint32_t socinfo_get_raw_id(void);
char *socinfo_get_build_id(void);
+char *socinfo_get_id_string(void);
uint32_t socinfo_get_platform_type(void);
uint32_t socinfo_get_platform_subtype(void);
uint32_t socinfo_get_platform_version(void);
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 14f6445..aa95178 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -8537,6 +8537,8 @@
#define VSS_ICOMMON_CMD_GET_PARAM_V2 0x0001133E
#define VSS_ICOMMON_RSP_GET_PARAM 0x00011008
+#define VSS_MAX_AVCS_NUM_SERVICES 25
+
/* ID of the Bass Boost module.
* This module supports the following parameter IDs:
* - #AUDPROC_PARAM_ID_BASS_BOOST_ENABLE
@@ -9197,6 +9199,74 @@
*/
} __packed;
+/* Q6Core Specific */
+#define AVCS_CMD_GET_FWK_VERSION (0x0001292C)
+#define AVCS_CMDRSP_GET_FWK_VERSION (0x0001292D)
+
+#define AVCS_SERVICE_ID_ALL (0xFFFFFFFF)
+#define APRV2_IDS_SERVICE_ID_ADSP_CVP_V (0xB)
+
+struct avcs_get_fwk_version {
+ /*
+ * Indicates the major version of the AVS build.
+ * This value is incremented on chipset family boundaries.
+ */
+ uint32_t build_major_version;
+
+ /*
+ * Minor version of the AVS build.
+ * This value represents the mainline to which the AVS build belongs.
+ */
+ uint32_t build_minor_version;
+
+ /* Indicates the AVS branch version to which the image belongs. */
+ uint32_t build_branch_version;
+
+ /* Indicates the AVS sub-branch or customer product line information. */
+ uint32_t build_subbranch_version;
+
+ /* Number of supported AVS services in the current build. */
+ uint32_t num_services;
+};
+
+struct avs_svc_api_info {
+ /*
+ * APRV2 service IDs for the individual static services.
+ *
+ * @values
+ * - APRV2_IDS_SERVICE_ID_ADSP_CORE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_AFE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ASM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ADM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_MVM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVS_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVP_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_LSM_V
+ */
+ uint32_t service_id;
+
+ /*
+ * Indicates the API version of the service.
+ *
+ * Each new API update that warrants a change on the HLOS side triggers
+ * an increment in the version.
+ */
+ uint32_t api_version;
+
+ /*
+ * Indicates the API increments on a sub-branch (not on the mainline).
+ *
+ * API branch version numbers can increment independently on different
+ * sub-branches.
+ */
+ uint32_t api_branch_version;
+};
+
+struct avcs_fwk_ver_info {
+ struct avcs_get_fwk_version avcs_fwk_version;
+ struct avs_svc_api_info *services;
+} __packed;
+
/* LSM Specific */
#define VW_FEAT_DIM (39)
diff --git a/include/sound/q6core.h b/include/sound/q6core.h
index 0b8309a..111af67 100644
--- a/include/sound/q6core.h
+++ b/include/sound/q6core.h
@@ -13,6 +13,7 @@
#ifndef __Q6CORE_H__
#define __Q6CORE_H__
#include <linux/qdsp6v2/apr.h>
+#include <sound/apr_audio-v2.h>
@@ -21,6 +22,11 @@
bool q6core_is_adsp_ready(void);
+int q6core_get_service_version(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info,
+ size_t size);
+size_t q6core_get_avcs_service_size(uint32_t service_id);
+
#define ADSP_CMD_SET_DTS_EAGLE_DATA_ID 0x00012919
#define DTS_EAGLE_LICENSE_ID 0x00028346
struct adsp_dts_eagle {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 128da7b..f0da77a 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -466,8 +466,6 @@
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
-struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
- const struct snd_kcontrol *kcontrol);
struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(
struct snd_kcontrol *kcontrol);
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index 2beb9b3..8a4c6d9 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
#define __WCD_DSP_MGR_H__
#include <linux/types.h>
+#include <linux/device.h>
/*
* These enums correspond to the component types
@@ -63,6 +64,9 @@
/* Suspend/Resume related */
WDSP_EVENT_SUSPEND,
WDSP_EVENT_RESUME,
+
+ /* Misc */
+ WDSP_EVENT_GET_DEVOPS
};
enum wdsp_signal {
@@ -109,6 +113,8 @@
* their own ops to manager driver
* @get_dev_for_cmpnt: components can use this to get handle
* to struct device * of any other component
+ * @get_devops_for_cmpnt: components can use this to get ops
+ * from other related components.
* @signal_handler: callback to notify manager driver that signal
* has occurred. Cannot be called from interrupt
* context as this can sleep
@@ -126,6 +132,8 @@
struct wdsp_cmpnt_ops *ops);
struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
enum wdsp_cmpnt_type type);
+ int (*get_devops_for_cmpnt)(struct device *wdsp_dev,
+ enum wdsp_cmpnt_type type, void *data);
int (*signal_handler)(struct device *wdsp_dev,
enum wdsp_signal signal, void *arg);
int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
@@ -133,4 +141,6 @@
int (*resume)(struct device *wdsp_dev);
};
+int wcd_dsp_mgr_init(void);
+void wcd_dsp_mgr_exit(void);
#endif /* end of __WCD_DSP_MGR_H__ */
diff --git a/include/sound/wcd-spi.h b/include/sound/wcd-spi.h
index 1fff58d..b85c68e 100644
--- a/include/sound/wcd-spi.h
+++ b/include/sound/wcd-spi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,23 +35,10 @@
u32 flags;
};
-#ifdef CONFIG_SND_SOC_WCD_SPI
-
-int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg);
-int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg);
-
-#else
-
-int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg)
-{
- return -ENODEV;
-}
-
-int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg)
-{
- return -ENODEV;
-}
-
-#endif /* End of CONFIG_SND_SOC_WCD_SPI */
+struct wcd_spi_ops {
+ struct spi_device *spi_dev;
+ int (*read_dev)(struct spi_device *spi, struct wcd_spi_msg *msg);
+ int (*write_dev)(struct spi_device *spi, struct wcd_spi_msg *msg);
+};
#endif /* End of __WCD_SPI_H__ */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4ac24f5..33b2e75 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -64,6 +64,14 @@
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -766,6 +774,7 @@
u8 t10_pi;
u32 fabric_prot_type;
u32 tpg_enabled_sendtargets;
+ u32 login_keys_workaround;
struct iscsi_portal_group *tpg;
};
diff --git a/include/trace/events/almk.h b/include/trace/events/almk.h
new file mode 100644
index 0000000..85d712d
--- /dev/null
+++ b/include/trace/events/almk.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM almk
+
+#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_ALMK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+TRACE_EVENT(almk_vmpressure,
+
+ TP_PROTO(unsigned long pressure,
+ int other_free,
+ int other_file),
+
+ TP_ARGS(pressure, other_free, other_file),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pressure)
+ __field(int, other_free)
+ __field(int, other_file)
+ ),
+
+ TP_fast_assign(
+ __entry->pressure = pressure;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ ),
+
+ TP_printk("%lu, %d, %d",
+ __entry->pressure, __entry->other_free,
+ __entry->other_file)
+);
+
+TRACE_EVENT(almk_shrink,
+
+ TP_PROTO(int tsize,
+ int vmp,
+ int other_free,
+ int other_file,
+ short adj),
+
+ TP_ARGS(tsize, vmp, other_free, other_file, adj),
+
+ TP_STRUCT__entry(
+ __field(int, tsize)
+ __field(int, vmp)
+ __field(int, other_free)
+ __field(int, other_file)
+ __field(short, adj)
+ ),
+
+ TP_fast_assign(
+ __entry->tsize = tsize;
+ __entry->vmp = vmp;
+ __entry->other_free = other_free;
+ __entry->other_file = other_file;
+ __entry->adj = adj;
+ ),
+
+ TP_printk("%d, %d, %d, %d, %d",
+ __entry->tsize,
+ __entry->vmp,
+ __entry->other_free,
+ __entry->other_file,
+ __entry->adj)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/process_reclaim.h b/include/trace/events/process_reclaim.h
new file mode 100644
index 0000000..6fcede7
--- /dev/null
+++ b/include/trace/events/process_reclaim.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM process_reclaim
+
+#if !defined(_TRACE_EVENT_PROCESSRECLAIM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_PROCESSRECLAIM_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+TRACE_EVENT(process_reclaim,
+
+ TP_PROTO(int tasksize,
+ short oom_score_adj,
+ int nr_scanned, int nr_reclaimed,
+ int per_swap_size, int total_sz,
+ int nr_to_reclaim),
+
+ TP_ARGS(tasksize, oom_score_adj, nr_scanned,
+ nr_reclaimed, per_swap_size,
+ total_sz, nr_to_reclaim),
+
+ TP_STRUCT__entry(
+ __field(int, tasksize)
+ __field(short, oom_score_adj)
+ __field(int, nr_scanned)
+ __field(int, nr_reclaimed)
+ __field(int, per_swap_size)
+ __field(int, total_sz)
+ __field(int, nr_to_reclaim)
+ ),
+
+ TP_fast_assign(
+ __entry->tasksize = tasksize;
+ __entry->oom_score_adj = oom_score_adj;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->per_swap_size = per_swap_size;
+ __entry->total_sz = total_sz;
+ __entry->nr_to_reclaim = nr_to_reclaim;
+ ),
+
+ TP_printk("%d, %hd, %d, %d, %d, %d, %d",
+ __entry->tasksize, __entry->oom_score_adj,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->per_swap_size, __entry->total_sz,
+ __entry->nr_to_reclaim)
+);
+
+TRACE_EVENT(process_reclaim_eff,
+
+ TP_PROTO(int efficiency, int reclaim_avg_efficiency),
+
+ TP_ARGS(efficiency, reclaim_avg_efficiency),
+
+ TP_STRUCT__entry(
+ __field(int, efficiency)
+ __field(int, reclaim_avg_efficiency)
+ ),
+
+ TP_fast_assign(
+ __entry->efficiency = efficiency;
+ __entry->reclaim_avg_efficiency = reclaim_avg_efficiency;
+ ),
+
+ TP_printk("%d, %d", __entry->efficiency,
+ __entry->reclaim_avg_efficiency)
+);
+
+#endif
+
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index bf8f149..cf3f5e3 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -71,10 +71,8 @@
__field(unsigned long, cpu_load )
__field(unsigned int, rt_nr_running )
__field(unsigned int, cpus_allowed )
-#ifdef CONFIG_SCHED_HMP
__field(unsigned int, demand )
__field(unsigned int, pred_demand )
-#endif
),
TP_fast_assign(
@@ -87,24 +85,17 @@
__entry->cpu_load = task_rq(p)->cpu_load[0];
__entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
__entry->cpus_allowed = cpus_allowed;
-#ifdef CONFIG_SCHED_HMP
- __entry->demand = p->ravg.demand;
- __entry->pred_demand = p->ravg.pred_demand;
-#endif
+ __entry->demand = task_load(p);
+ __entry->pred_demand = task_pl(p);
),
- TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
-#ifdef CONFIG_SCHED_HMP
- " demand=%u pred_demand=%u"
-#endif
- , __entry->cpu,
+ TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u",
+ __entry->cpu,
__entry->enqueue ? "enqueue" : "dequeue",
__entry->comm, __entry->pid,
__entry->prio, __entry->nr_running,
__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
-#ifdef CONFIG_SCHED_HMP
, __entry->demand, __entry->pred_demand
-#endif
)
);
@@ -236,6 +227,7 @@
__field( int, samples )
__field(enum task_event, evt )
__field(unsigned int, demand )
+ __field(unsigned int, coloc_demand )
__field(unsigned int, pred_demand )
__array( u32, hist, RAVG_HIST_SIZE_MAX)
__field(unsigned int, nr_big_tasks )
@@ -249,19 +241,20 @@
__entry->samples = samples;
__entry->evt = evt;
__entry->demand = p->ravg.demand;
+ __entry->coloc_demand = p->ravg.coloc_demand;
__entry->pred_demand = p->ravg.pred_demand;
memcpy(__entry->hist, p->ravg.sum_history,
RAVG_HIST_SIZE_MAX * sizeof(u32));
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
__entry->cpu = rq->cpu;
),
- TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
+ TP_printk("%d (%s): runtime %u samples %d event %s demand %u coloc_demand %u pred_demand %u"
" (hist: %u %u %u %u %u) cpu %d nr_big %u",
__entry->pid, __entry->comm,
__entry->runtime, __entry->samples,
task_event_names[__entry->evt],
- __entry->demand, __entry->pred_demand,
+ __entry->demand, __entry->coloc_demand, __entry->pred_demand,
__entry->hist[0], __entry->hist[1],
__entry->hist[2], __entry->hist[3],
__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
@@ -317,6 +310,7 @@
__field( u64, irqtime )
__field(enum task_event, evt )
__field(unsigned int, demand )
+ __field(unsigned int, coloc_demand )
__field(unsigned int, sum )
__field( int, cpu )
__field(unsigned int, pred_demand )
@@ -350,6 +344,7 @@
__entry->mark_start = p->ravg.mark_start;
__entry->delta_m = (wallclock - p->ravg.mark_start);
__entry->demand = p->ravg.demand;
+ __entry->coloc_demand = p->ravg.coloc_demand;
__entry->sum = p->ravg.sum;
__entry->irqtime = irqtime;
__entry->pred_demand = p->ravg.pred_demand;
@@ -370,12 +365,12 @@
__entry->prev_top = rq->prev_top;
),
- TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
+ TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u coloc_demand: %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
__entry->wallclock, __entry->win_start, __entry->delta,
task_event_names[__entry->evt], __entry->cpu,
__entry->cur_freq, __entry->cur_pid,
__entry->pid, __entry->comm, __entry->mark_start,
- __entry->delta_m, __entry->demand,
+ __entry->delta_m, __entry->demand, __entry->coloc_demand,
__entry->sum, __entry->irqtime, __entry->pred_demand,
__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
__window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
@@ -545,9 +540,9 @@
#ifdef CONFIG_SCHED_WALT
DECLARE_EVENT_CLASS(sched_cpu_load,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
- TP_ARGS(rq, idle, irqload, power_cost, temp),
+ TP_ARGS(rq, idle, irqload, power_cost),
TP_STRUCT__entry(
__field(unsigned int, cpu )
@@ -562,197 +557,79 @@
__field(unsigned int, power_cost )
__field( int, cstate )
__field( int, dstate )
- __field( int, temp )
),
TP_fast_assign(
__entry->cpu = rq->cpu;
__entry->idle = idle;
__entry->nr_running = rq->nr_running;
- __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
__entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
__entry->capacity = cpu_capacity(rq->cpu);
- __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+ __entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
__entry->irqload = irqload;
__entry->max_freq = cpu_max_freq(rq->cpu);
__entry->power_cost = power_cost;
__entry->cstate = rq->cstate;
__entry->dstate = rq->cluster->dstate;
- __entry->temp = temp;
),
- TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+ TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
__entry->load_scale_factor, __entry->capacity,
__entry->cumulative_runnable_avg, __entry->irqload,
__entry->max_freq, __entry->power_cost, __entry->cstate,
- __entry->dstate, __entry->temp)
+ __entry->dstate)
);
DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
+ TP_ARGS(rq, idle, irqload, power_cost)
+);
+
+TRACE_EVENT(sched_load_to_gov,
+
+ TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy),
+ TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy),
+
+ TP_STRUCT__entry(
+ __field( int, cpu )
+ __field( int, policy )
+ __field( int, ed_task_pid )
+ __field( u64, aggr_grp_load )
+ __field( u64, freq_aggr_thresh )
+ __field( u64, tt_load )
+ __field( u64, rq_ps )
+ __field( u64, grp_rq_ps )
+ __field( u64, nt_ps )
+ __field( u64, grp_nt_ps )
+ __field( u64, pl )
+ __field( u64, load )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu_of(rq);
+ __entry->policy = policy;
+ __entry->ed_task_pid = rq->ed_task ? rq->ed_task->pid : -1;
+ __entry->aggr_grp_load = aggr_grp_load;
+ __entry->freq_aggr_thresh = freq_aggr_thresh;
+ __entry->tt_load = tt_load;
+ __entry->rq_ps = rq->prev_runnable_sum;
+ __entry->grp_rq_ps = rq->grp_time.prev_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->grp_nt_ps = rq->grp_time.nt_prev_runnable_sum;
+ __entry->pl = rq->walt_stats.pred_demands_sum;
+ __entry->load = load;
+ ),
+
+ TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu",
+ __entry->cpu, __entry->policy, __entry->ed_task_pid,
+ __entry->aggr_grp_load, __entry->freq_aggr_thresh,
+ __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
+ __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load)
);
#endif
-#ifdef CONFIG_SCHED_HMP
-
-TRACE_EVENT(sched_task_load,
-
- TP_PROTO(struct task_struct *p, bool boost, int reason,
- bool sync, bool need_idle, u32 flags, int best_cpu),
-
- TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field(unsigned int, demand )
- __field( bool, boost )
- __field( int, reason )
- __field( bool, sync )
- __field( bool, need_idle )
- __field( u32, flags )
- __field( int, best_cpu )
- __field( u64, latency )
- __field( int, grp_id )
- __field( u64, avg_burst )
- __field( u64, avg_sleep )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->demand = p->ravg.demand;
- __entry->boost = boost;
- __entry->reason = reason;
- __entry->sync = sync;
- __entry->need_idle = need_idle;
- __entry->flags = flags;
- __entry->best_cpu = best_cpu;
- __entry->latency = p->state == TASK_WAKING ?
- sched_ktime_clock() -
- p->ravg.mark_start : 0;
- __entry->grp_id = p->grp ? p->grp->id : 0;
- __entry->avg_burst = p->ravg.avg_burst;
- __entry->avg_sleep = p->ravg.avg_sleep_time;
- ),
-
- TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
- __entry->pid, __entry->comm, __entry->demand,
- __entry->boost, __entry->reason, __entry->sync,
- __entry->need_idle, __entry->flags, __entry->grp_id,
- __entry->best_cpu, __entry->latency, __entry->avg_burst,
- __entry->avg_sleep)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
- TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-TRACE_EVENT(sched_reset_all_window_stats,
-
- TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
- int reason, unsigned int old_val, unsigned int new_val),
-
- TP_ARGS(window_start, window_size, time_taken,
- reason, old_val, new_val),
-
- TP_STRUCT__entry(
- __field( u64, window_start )
- __field( u64, window_size )
- __field( u64, time_taken )
- __field( int, reason )
- __field(unsigned int, old_val )
- __field(unsigned int, new_val )
- ),
-
- TP_fast_assign(
- __entry->window_start = window_start;
- __entry->window_size = window_size;
- __entry->time_taken = time_taken;
- __entry->reason = reason;
- __entry->old_val = old_val;
- __entry->new_val = new_val;
- ),
-
- TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
- __entry->time_taken, __entry->window_start,
- __entry->window_size,
- sched_window_reset_reasons[__entry->reason],
- __entry->old_val, __entry->new_val)
-);
-
-TRACE_EVENT(sched_get_busy,
-
- TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
-
- TP_ARGS(cpu, load, nload, pload, early),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( u64, load )
- __field( u64, nload )
- __field( u64, pload )
- __field( int, early )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->load = load;
- __entry->nload = nload;
- __entry->pload = pload;
- __entry->early = early;
- ),
-
- TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d",
- __entry->cpu, __entry->load, __entry->nload,
- __entry->pload, __entry->early)
-);
-
-TRACE_EVENT(sched_freq_alert,
-
- TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
- u64 new_load),
-
- TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
-
- TP_STRUCT__entry(
- __field( int, cpu )
- __field( int, pd_notif )
- __field( int, check_groups )
- __field( u64, old_busy_time )
- __field( u64, ps )
- __field( u64, new_load )
- __field( u64, old_pred )
- __field( u64, new_pred )
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->pd_notif = pd_notif;
- __entry->check_groups = check_groups;
- __entry->old_busy_time = rq->old_busy_time;
- __entry->ps = rq->prev_runnable_sum;
- __entry->new_load = new_load;
- __entry->old_pred = rq->old_estimated_time;
- __entry->new_pred = rq->hmp_stats.pred_demands_sum;
- ),
-
- TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
- __entry->cpu, __entry->pd_notif, __entry->check_groups,
- __entry->old_busy_time, __entry->ps, __entry->new_load,
- __entry->old_pred, __entry->new_pred)
-);
-
-#endif /* CONFIG_SCHED_HMP */
-
#ifdef CONFIG_SMP
TRACE_EVENT(sched_cpu_util,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 0c68ae22..33dfa76 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -50,6 +50,33 @@
TP_printk("skbaddr=%p", __entry->skbaddr)
);
+TRACE_EVENT(print_skb_gso,
+
+ TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest),
+
+ TP_ARGS(skb, src, dest),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ __field(int, len)
+ __field(int, data_len)
+ __field(__be16, src)
+ __field(__be16, dest)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->src = src;
+ __entry->dest = dest;
+ ),
+
+ TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u",
+ __entry->skbaddr, __entry->len, __entry->data_len,
+ be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest))
+);
+
TRACE_EVENT(skb_copy_datagram_iovec,
TP_PROTO(const struct sk_buff *skb, int len),
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 031ae49..c0475a2 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -45,6 +45,23 @@
__entry->temp)
);
+TRACE_EVENT(cdev_update_start,
+
+ TP_PROTO(struct thermal_cooling_device *cdev),
+
+ TP_ARGS(cdev),
+
+ TP_STRUCT__entry(
+ __string(type, cdev->type)
+ ),
+
+ TP_fast_assign(
+ __assign_str(type, cdev->type);
+ ),
+
+ TP_printk("type=%s update start", __get_str(type))
+);
+
TRACE_EVENT(cdev_update,
TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target,
@@ -98,6 +115,75 @@
show_tzt_type(__entry->trip_type))
);
+TRACE_EVENT(thermal_handle_trip,
+
+ TP_PROTO(struct thermal_zone_device *tz, int trip),
+
+ TP_ARGS(tz, trip),
+
+ TP_STRUCT__entry(
+ __string(thermal_zone, tz->type)
+ __field(int, id)
+ __field(int, trip)
+ ),
+
+ TP_fast_assign(
+ __assign_str(thermal_zone, tz->type);
+ __entry->id = tz->id;
+ __entry->trip = trip;
+ ),
+
+ TP_printk("thermal_zone=%s id=%d handle trip=%d",
+ __get_str(thermal_zone), __entry->id, __entry->trip)
+);
+
+TRACE_EVENT(thermal_device_update,
+
+ TP_PROTO(struct thermal_zone_device *tz, int event),
+
+ TP_ARGS(tz, event),
+
+ TP_STRUCT__entry(
+ __string(thermal_zone, tz->type)
+ __field(int, id)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __assign_str(thermal_zone, tz->type);
+ __entry->id = tz->id;
+ __entry->event = event;
+ ),
+
+ TP_printk("thermal_zone=%s id=%d received event:%d",
+ __get_str(thermal_zone), __entry->id, __entry->event)
+);
+
+TRACE_EVENT(thermal_set_trip,
+
+ TP_PROTO(struct thermal_zone_device *tz),
+
+ TP_ARGS(tz),
+
+ TP_STRUCT__entry(
+ __string(thermal_zone, tz->type)
+ __field(int, id)
+ __field(int, low)
+ __field(int, high)
+ ),
+
+ TP_fast_assign(
+ __assign_str(thermal_zone, tz->type);
+ __entry->id = tz->id;
+ __entry->low = tz->prev_low_trip;
+ __entry->high = tz->prev_high_trip;
+ ),
+
+ TP_printk("thermal_zone=%s id=%d low trip=%d high trip=%d",
+ __get_str(thermal_zone), __entry->id, __entry->low,
+ __entry->high)
+);
+
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
size_t load_len, u32 dynamic_power, u32 static_power),
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index eb7e0c6..71c2c9e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -322,6 +322,7 @@
#define DRM_EVENT_AD_BACKLIGHT 0x80000001
#define DRM_EVENT_CRTC_POWER 0x80000002
#define DRM_EVENT_SYS_BACKLIGHT 0x80000003
+#define DRM_EVENT_SDE_POWER 0x80000004
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index d9155a9..c7f2308 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -28,17 +28,35 @@
/**
* struct drm_msm_pcc - pcc feature structure
- * flags: for customizing operations
- * r: red coefficients.
- * g: green coefficients.
- * b: blue coefficients.
+ * @flags: for customizing operations
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ * @r_rr: second order coefficients
+ * @r_gg: second order coefficients
+ * @r_bb: second order coefficients
+ * @g_rr: second order coefficients
+ * @g_gg: second order coefficients
+ * @g_bb: second order coefficients
+ * @b_rr: second order coefficients
+ * @b_gg: second order coefficients
+ * @b_bb: second order coefficients
*/
-
+#define DRM_MSM_PCC3
struct drm_msm_pcc {
__u64 flags;
struct drm_msm_pcc_coeff r;
struct drm_msm_pcc_coeff g;
struct drm_msm_pcc_coeff b;
+ __u32 r_rr;
+ __u32 r_gg;
+ __u32 r_bb;
+ __u32 g_rr;
+ __u32 g_gg;
+ __u32 g_bb;
+ __u32 b_rr;
+ __u32 b_gg;
+ __u32 b_bb;
};
/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
@@ -135,6 +153,26 @@
__u32 c2[PGC_TBL_LEN];
};
+#define IGC_TBL_LEN 256
+#define IGC_DITHER_ENABLE (1 << 0)
+/**
+ * struct drm_msm_igc_lut - igc lut feature structure
+ * @flags: flags for the feature customization, values can be:
+ * - IGC_DITHER_ENABLE: Enable dither functionality
+ * @c0: color0 component lut
+ * @c1: color1 component lut
+ * @c2: color2 component lut
+ * @strength: dither strength, considered valid when IGC_DITHER_ENABLE
+ * is set in flags. Strength value based on source bit width.
+ */
+struct drm_msm_igc_lut {
+ __u64 flags;
+ __u32 c0[IGC_TBL_LEN];
+ __u32 c1[IGC_TBL_LEN];
+ __u32 c2[IGC_TBL_LEN];
+ __u32 strength;
+};
+
#define AD4_LUT_GRP0_SIZE 33
#define AD4_LUT_GRP1_SIZE 32
/*
@@ -281,4 +319,26 @@
__u32 cfg_param_053;
};
+#define DITHER_MATRIX_SZ 16
+
+/**
+ * struct drm_msm_dither - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct drm_msm_dither {
+ __u64 flags;
+ __u32 temporal_en;
+ __u32 c0_bitdepth;
+ __u32 c1_bitdepth;
+ __u32 c2_bitdepth;
+ __u32 c3_bitdepth;
+ __u32 matrix[DITHER_MATRIX_SZ];
+};
+
#endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 44b42a6..439a925 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -67,6 +67,48 @@
#define SDE_DRM_BITMASK_COUNT 64
/**
+ * Framebuffer modes for "fb_translation_mode" PLANE property
+ *
+ * @SDE_DRM_FB_NON_SEC: IOMMU configuration for this framebuffer mode
+ * is non-secure domain and requires
+ * both stage I and stage II translations when
+ * this buffer is accessed by the display HW.
+ * This is the default mode of all frambuffers.
+ * @SDE_DRM_FB_SEC: IOMMU configuration for this framebuffer mode
+ * is secure domain and requires
+ * both stage I and stage II translations when
+ * this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ * is non-secure domain and requires
+ * only stage II translation when
+ * this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ * is secure domain and requires
+ * only stage II translation when
+ * this buffer is accessed by the display HW.
+ */
+
+#define SDE_DRM_FB_NON_SEC 0
+#define SDE_DRM_FB_SEC 1
+#define SDE_DRM_FB_NON_SEC_DIR_TRANS 2
+#define SDE_DRM_FB_SEC_DIR_TRANS 3
+
+/**
+ * Secure levels for "security_level" CRTC property.
+ * CRTC property which specifies what plane types
+ * can be attached to this CRTC. Plane component
+ * derives the plane type based on the FB_MODE.
+ * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be
+ * attached to this CRTC. This is the default state of
+ * the CRTC.
+ * @ SDE_DRM_SEC_ONLY: Only secure planes can be added to this CRTC. If a
+ * CRTC is instructed to be in this mode it follows the
+ * platform dependent restrictions.
+ */
+#define SDE_DRM_SEC_NON_SEC 0
+#define SDE_DRM_SEC_ONLY 1
+
+/**
* struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
* @num_ext_pxls_tb: Number of total vertical lines
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 7668b57..5539933 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -37,9 +37,56 @@
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+ /**
+ * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+ *
+ * These bits can be used to set the minimum scheduler priority
+ * at which transactions into this node should run. Valid values
+ * in these bits depend on the scheduler policy encoded in
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+ *
+ * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+ * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+ */
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ /**
+ * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+ */
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+ /**
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+ *
+ * These two bits can be used to set the min scheduling policy at which
+ * transactions on this node should run. These match the UAPI
+ * scheduler policy values, eg:
+ * 00b: SCHED_NORMAL
+ * 01b: SCHED_FIFO
+ * 10b: SCHED_RR
+ * 11b: SCHED_BATCH
+ */
+ FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+ 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+ /**
+ * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+ *
+ * Only when set, calls into this node will inherit a real-time
+ * scheduling policy from the caller (for synchronous transactions).
+ */
+ FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
};
#ifdef BINDER_IPC_32BIT
@@ -186,6 +233,19 @@
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes. ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ __u32 has_strong_ref;
+ __u32 has_weak_ref;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -193,6 +253,7 @@
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index 427e489..175534a 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -32,6 +32,11 @@
#define DMX_FILTER_SIZE 16
+/* Min recording chunk upon which event is generated */
+#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188)
+
+#define DMX_MAX_DECODER_BUFFER_NUM (32)
+
enum dmx_output
{
DMX_OUT_DECODER, /* Streaming directly to decoder. */
@@ -108,6 +113,44 @@
#define DMX_KERNEL_CLIENT 0x8000
};
+enum dmx_video_codec {
+ DMX_VIDEO_CODEC_MPEG2,
+ DMX_VIDEO_CODEC_H264,
+ DMX_VIDEO_CODEC_VC1
+};
+
+/* Index entries types */
+#define DMX_IDX_RAI 0x00000001
+#define DMX_IDX_PUSI 0x00000002
+#define DMX_IDX_MPEG_SEQ_HEADER 0x00000004
+#define DMX_IDX_MPEG_GOP 0x00000008
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_START 0x00000010
+#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_END 0x00000020
+#define DMX_IDX_MPEG_I_FRAME_START 0x00000040
+#define DMX_IDX_MPEG_I_FRAME_END 0x00000080
+#define DMX_IDX_MPEG_P_FRAME_START 0x00000100
+#define DMX_IDX_MPEG_P_FRAME_END 0x00000200
+#define DMX_IDX_MPEG_B_FRAME_START 0x00000400
+#define DMX_IDX_MPEG_B_FRAME_END 0x00000800
+#define DMX_IDX_H264_SPS 0x00001000
+#define DMX_IDX_H264_PPS 0x00002000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_START 0x00004000
+#define DMX_IDX_H264_FIRST_SPS_FRAME_END 0x00008000
+#define DMX_IDX_H264_IDR_START 0x00010000
+#define DMX_IDX_H264_IDR_END 0x00020000
+#define DMX_IDX_H264_NON_IDR_START 0x00040000
+#define DMX_IDX_H264_NON_IDR_END 0x00080000
+#define DMX_IDX_VC1_SEQ_HEADER 0x00100000
+#define DMX_IDX_VC1_ENTRY_POINT 0x00200000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_START 0x00400000
+#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000
+#define DMX_IDX_VC1_FRAME_START 0x01000000
+#define DMX_IDX_VC1_FRAME_END 0x02000000
+#define DMX_IDX_H264_ACCESS_UNIT_DEL 0x04000000
+#define DMX_IDX_H264_SEI 0x08000000
+#define DMX_IDX_H264_IDR_ISLICE_START 0x10000000
+#define DMX_IDX_H264_NON_IDR_PSLICE_START 0x20000000
+#define DMX_IDX_H264_NON_IDR_BSLICE_START 0x40000000
struct dmx_pes_filter_params
{
@@ -116,11 +159,457 @@
dmx_output_t output;
dmx_pes_type_t pes_type;
__u32 flags;
+
+ /*
+ * The following configures when the event
+ * DMX_EVENT_NEW_REC_CHUNK will be triggered.
+ * When new recorded data is received with size
+ * equal or larger than this value a new event
+ * will be triggered. This is relevant when
+ * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP,
+ * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE
+ * and smaller than buffer size.
+ */
+ __u32 rec_chunk_size;
+
+ enum dmx_video_codec video_codec;
+};
+
+struct dmx_buffer_status {
+ /* size of buffer in bytes */
+ unsigned int size;
+
+ /* fullness of buffer in bytes */
+ unsigned int fullness;
+
+ /*
+ * How many bytes are free
+ * It's the same as: size-fullness-1
+ */
+ unsigned int free_bytes;
+
+ /* read pointer offset in bytes */
+ unsigned int read_offset;
+
+ /* write pointer offset in bytes */
+ unsigned int write_offset;
+
+ /* non-zero if data error occurred */
+ int error;
+};
+
+/* Events associated with each demux filter */
+enum dmx_event {
+ /* New PES packet is ready to be consumed */
+ DMX_EVENT_NEW_PES = 0x00000001,
+
+ /* New section is ready to be consumed */
+ DMX_EVENT_NEW_SECTION = 0x00000002,
+
+ /* New recording chunk is ready to be consumed */
+ DMX_EVENT_NEW_REC_CHUNK = 0x00000004,
+
+ /* New PCR value is ready */
+ DMX_EVENT_NEW_PCR = 0x00000008,
+
+ /* Overflow */
+ DMX_EVENT_BUFFER_OVERFLOW = 0x00000010,
+
+ /* Section was dropped due to CRC error */
+ DMX_EVENT_SECTION_CRC_ERROR = 0x00000020,
+
+ /* End-of-stream, no more data from this filter */
+ DMX_EVENT_EOS = 0x00000040,
+
+ /* New Elementary Stream data is ready */
+ DMX_EVENT_NEW_ES_DATA = 0x00000080,
+
+ /* Data markers */
+ DMX_EVENT_MARKER = 0x00000100,
+
+ /* New indexing entry is ready */
+ DMX_EVENT_NEW_INDEX_ENTRY = 0x00000200,
+
+ /*
+ * Section filter timer expired. This is notified
+ * when timeout is configured to section filter
+ * (dmx_sct_filter_params) and no sections were
+ * received for the given time.
+ */
+ DMX_EVENT_SECTION_TIMEOUT = 0x00000400,
+
+ /* Scrambling bits change between clear and scrambled */
+ DMX_EVENT_SCRAMBLING_STATUS_CHANGE = 0x00000800
+};
+
+enum dmx_oob_cmd {
+ /* End-of-stream, no more data from this filter */
+ DMX_OOB_CMD_EOS,
+
+ /* Data markers */
+ DMX_OOB_CMD_MARKER,
+};
+
+/* Flags passed in filter events */
+
+/* Continuity counter error was detected */
+#define DMX_FILTER_CC_ERROR 0x01
+
+/* Discontinuity indicator was set */
+#define DMX_FILTER_DISCONTINUITY_INDICATOR 0x02
+
+/* PES length in PES header is not correct */
+#define DMX_FILTER_PES_LENGTH_ERROR 0x04
+
+
+/* PES info associated with DMX_EVENT_NEW_PES event */
+struct dmx_pes_event_info {
+ /* Offset at which PES information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which PES data
+ * from the stream starts.
+ * Equal to base_offset if PES data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the PES information */
+ __u32 total_length;
+
+ /* Actual length holding the PES data */
+ __u32 actual_length;
+
+ /* Local receiver timestamp in 27MHz */
+ __u64 stc;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the PES.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the PES */
+ __u32 continuity_error_counter;
+
+ /* Total number of TS packets holding the PES */
+ __u32 ts_packets_num;
+};
+
+/* Section info associated with DMX_EVENT_NEW_SECTION event */
+struct dmx_section_event_info {
+ /* Offset at which section information starts */
+ __u32 base_offset;
+
+ /*
+ * Start offset at which section data
+ * from the stream starts.
+ * Equal to base_offset if section data
+ * starts from the beginning.
+ */
+ __u32 start_offset;
+
+ /* Total length holding the section information */
+ __u32 total_length;
+
+ /* Actual length holding the section data */
+ __u32 actual_length;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */
+struct dmx_rec_chunk_event_info {
+ /* Offset at which recording chunk starts */
+ __u32 offset;
+
+ /* Size of recording chunk in bytes */
+ __u32 size;
+};
+
+/* PCR info associated with DMX_EVENT_NEW_PCR event */
+struct dmx_pcr_event_info {
+ /* Local timestamp in 27MHz
+ * when PCR packet was received
+ */
+ __u64 stc;
+
+ /* PCR value in 27MHz */
+ __u64 pcr;
+
+ /* Flags passed in filter events */
+ __u32 flags;
+};
+
+/*
+ * Elementary stream data information associated
+ * with DMX_EVENT_NEW_ES_DATA event
+ */
+struct dmx_es_data_event_info {
+ /* Buffer user-space handle */
+ int buf_handle;
+
+ /*
+ * Cookie to provide when releasing the buffer
+ * using the DMX_RELEASE_DECODER_BUFFER ioctl command
+ */
+ int cookie;
+
+ /* Offset of data from the beginning of the buffer */
+ __u32 offset;
+
+ /* Length of data in buffer (in bytes) */
+ __u32 data_len;
+
+ /* Indication whether PTS value is valid */
+ int pts_valid;
+
+ /* PTS value associated with the buffer */
+ __u64 pts;
+
+ /* Indication whether DTS value is valid */
+ int dts_valid;
+
+ /* DTS value associated with the buffer */
+ __u64 dts;
+
+ /* STC value associated with the buffer in 27MHz */
+ __u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI) set
+ * in the TS packet header since last reported event
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors since last reported event */
+ __u32 continuity_error_counter;
+
+ /* Total number of TS packets processed since last reported event */
+ __u32 ts_packets_num;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported event
+ */
+ __u32 ts_dropped_bytes;
+};
+
+/* Marker details associated with DMX_EVENT_MARKER event */
+struct dmx_marker_event_info {
+ /* Marker id */
+ __u64 id;
+};
+
+/* Indexing information associated with DMX_EVENT_NEW_INDEX_ENTRY event */
+struct dmx_index_event_info {
+ /* Index entry type, one of DMX_IDX_* */
+ __u64 type;
+
+ /*
+ * The PID the index entry belongs to.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl and each can be indexed separately.
+ */
+ __u16 pid;
+
+ /*
+ * The TS packet number in the recorded data at which
+ * the indexing event is found.
+ */
+ __u64 match_tsp_num;
+
+ /*
+ * The TS packet number in the recorded data preceding
+ * match_tsp_num and has PUSI set.
+ */
+ __u64 last_pusi_tsp_num;
+
+ /* STC associated with match_tsp_num, in 27MHz */
+ __u64 stc;
+};
+
+/* Scrambling information associated with DMX_EVENT_SCRAMBLING_STATUS_CHANGE */
+struct dmx_scrambling_status_event_info {
+ /*
+ * The PID which its scrambling bit status changed.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have
+ * different scrambling bits status.
+ */
+ __u16 pid;
+
+ /* old value of scrambling bits */
+ __u8 old_value;
+
+ /* new value of scrambling bits */
+ __u8 new_value;
+};
+
+/*
+ * Filter's event returned through DMX_GET_EVENT.
+ * poll with POLLPRI would block until events are available.
+ */
+struct dmx_filter_event {
+ enum dmx_event type;
+
+ union {
+ struct dmx_pes_event_info pes;
+ struct dmx_section_event_info section;
+ struct dmx_rec_chunk_event_info recording_chunk;
+ struct dmx_pcr_event_info pcr;
+ struct dmx_es_data_event_info es_data;
+ struct dmx_marker_event_info marker;
+ struct dmx_index_event_info index;
+ struct dmx_scrambling_status_event_info scrambling_status;
+ } params;
+};
+
+/* Filter's buffer requirement returned in dmx_caps */
+struct dmx_buffer_requirement {
+ /* Buffer size alignment, 0 means no special requirement */
+ __u32 size_alignment;
+
+ /* Maximum buffer size allowed */
+ __u32 max_size;
+
+ /* Maximum number of linear buffers handled by demux */
+ __u32 max_buffer_num;
+
+ /* Feature support bitmap as detailed below */
+ __u32 flags;
+
+/* Buffer must be allocated as physically contiguous memory */
+#define DMX_BUFFER_CONTIGUOUS_MEM 0x1
+
+/* If the filter's data is decrypted, the buffer should be secured one */
+#define DMX_BUFFER_SECURED_IF_DECRYPTED 0x2
+
+/* Buffer can be allocated externally */
+#define DMX_BUFFER_EXTERNAL_SUPPORT 0x4
+
+/* Buffer can be allocated internally */
+#define DMX_BUFFER_INTERNAL_SUPPORT 0x8
+
+/* Filter output can be output to a linear buffer group */
+#define DMX_BUFFER_LINEAR_GROUP_SUPPORT 0x10
+
+/* Buffer may be allocated as cached buffer */
+#define DMX_BUFFER_CACHED 0x20
+};
+
+/* Out-of-band (OOB) command */
+struct dmx_oob_command {
+ enum dmx_oob_cmd type;
+
+ union {
+ struct dmx_marker_event_info marker;
+ } params;
};
typedef struct dmx_caps {
__u32 caps;
+
+/* Indicates whether demux support playback from memory in pull mode */
+#define DMX_CAP_PULL_MODE 0x01
+
+/* Indicates whether demux support indexing of recorded video stream */
+#define DMX_CAP_VIDEO_INDEXING 0x02
+
+/* Indicates whether demux support sending data directly to video decoder */
+#define DMX_CAP_VIDEO_DECODER_DATA 0x04
+
+/* Indicates whether demux support sending data directly to audio decoder */
+#define DMX_CAP_AUDIO_DECODER_DATA 0x08
+
+/* Indicates whether demux support sending data directly to subtitle decoder */
+#define DMX_CAP_SUBTITLE_DECODER_DATA 0x10
+
+/* Indicates whether TS insertion is supported */
+#define DMX_CAP_TS_INSERTION 0x20
+
+/* Indicates whether playback from secured input is supported */
+#define DMX_CAP_SECURED_INPUT_PLAYBACK 0x40
+
+/* Indicates whether automatic buffer flush upon overflow is allowed */
+#define DMX_CAP_AUTO_BUFFER_FLUSH 0x80
+
+ /* Number of decoders demux can output data to */
int num_decoders;
+
+ /* Number of demux devices */
+ int num_demux_devices;
+
+ /* Max number of PID filters */
+ int num_pid_filters;
+
+ /* Max number of section filters */
+ int num_section_filters;
+
+ /*
+ * Max number of section filters using same PID,
+ * 0 if not supported
+ */
+ int num_section_filters_per_pid;
+
+ /*
+ * Length of section filter, not including section
+ * length field (2 bytes).
+ */
+ int section_filter_length;
+
+ /* Max number of demod based input */
+ int num_demod_inputs;
+
+ /* Max number of memory based input */
+ int num_memory_inputs;
+
+ /* Overall bitrate from all inputs concurrently. Mbit/sec */
+ int max_bitrate;
+
+ /* Max bitrate from single demod input. Mbit/sec */
+ int demod_input_max_bitrate;
+
+ /* Max bitrate from single memory input. Mbit/sec */
+ int memory_input_max_bitrate;
+
+ /* Max number of supported cipher operations per PID */
+ int num_cipher_ops;
+
+ /* Max possible value of STC reported by demux, in 27MHz */
+ __u64 max_stc;
+
+ /*
+ * For indexing support (DMX_CAP_VIDEO_INDEXING capability) this is
+ * the max number of video pids that can be indexed for a single
+ * recording filter. If 0, means there is not limitation.
+ */
+ int recording_max_video_pids_indexed;
+
+ struct dmx_buffer_requirement section;
+
+ /* For PES not sent to decoder */
+ struct dmx_buffer_requirement pes;
+
+ /* For PES sent to decoder */
+ struct dmx_buffer_requirement decoder;
+
+ /* Recording buffer for recording of 188 bytes packets */
+ struct dmx_buffer_requirement recording_188_tsp;
+
+ /* Recording buffer for recording of 192 bytes packets */
+ struct dmx_buffer_requirement recording_192_tsp;
+
+ /* DVR input buffer for playback of 188 bytes packets */
+ struct dmx_buffer_requirement playback_188_tsp;
+
+ /* DVR input buffer for playback of 192 bytes packets */
+ struct dmx_buffer_requirement playback_192_tsp;
} dmx_caps_t;
typedef enum dmx_source {
@@ -134,12 +623,229 @@
DMX_SOURCE_DVR3
} dmx_source_t;
+enum dmx_tsp_format_t {
+ DMX_TSP_FORMAT_188 = 0,
+ DMX_TSP_FORMAT_192_TAIL,
+ DMX_TSP_FORMAT_192_HEAD,
+ DMX_TSP_FORMAT_204,
+};
+
+enum dmx_playback_mode_t {
+ /*
+ * In push mode, if one of output buffers
+ * is full, the buffer would overflow
+ * and demux continue processing incoming stream.
+ * This is the default mode. When playing from frontend,
+ * this is the only mode that is allowed.
+ */
+ DMX_PB_MODE_PUSH = 0,
+
+ /*
+ * In pull mode, if one of output buffers
+ * is full, demux stalls waiting for free space,
+ * this would cause DVR input buffer fullness
+ * to accumulate.
+ * This mode is possible only when playing
+ * from DVR.
+ */
+ DMX_PB_MODE_PULL,
+};
+
struct dmx_stc {
unsigned int num; /* input : which STC? 0..N */
unsigned int base; /* output: divisor for stc to get 90 kHz clock */
__u64 stc; /* output: stc in 'base'*90 kHz units */
};
+enum dmx_buffer_mode {
+ /*
+ * demux buffers are allocated internally
+ * by the demux driver. This is the default mode.
+ * DMX_SET_BUFFER_SIZE can be used to set the size of
+ * this buffer.
+ */
+ DMX_BUFFER_MODE_INTERNAL,
+
+ /*
+ * demux buffers are allocated externally and provided
+ * to demux through DMX_SET_BUFFER.
+ * When this mode is used DMX_SET_BUFFER_SIZE and
+ * mmap are prohibited.
+ */
+ DMX_BUFFER_MODE_EXTERNAL,
+};
+
+struct dmx_buffer {
+ unsigned int size;
+ int handle;
+
+ /*
+ * The following indication is relevant only when setting
+ * DVR input buffer. It indicates whether the input buffer
+ * being set is secured one or not. Secured (locked) buffers
+ * are required for playback from secured input. In such case
+ * write() syscall is not allowed.
+ */
+ int is_protected;
+};
+
+struct dmx_decoder_buffers {
+ /*
+ * Specify if linear buffer support is requested. If set, buffers_num
+ * must be greater than 1
+ */
+ int is_linear;
+
+ /*
+ * Specify number of external buffers allocated by user.
+ * If set to 0 means internal buffer allocation is requested
+ */
+ __u32 buffers_num;
+
+ /* Specify buffer size, either external or internal */
+ __u32 buffers_size;
+
+ /* Array of externally allocated buffer handles */
+ int handles[DMX_MAX_DECODER_BUFFER_NUM];
+};
+
+struct dmx_secure_mode {
+ /*
+ * Specifies whether the filter is secure or not.
+ * Filter should be set as secured if the filter's data *may* include
+ * encrypted data that would require decryption configured through
+ * DMX_SET_CIPHER ioctl. The setting may be done while
+ * filter is in idle state only.
+ */
+ int is_secured;
+};
+
+struct dmx_cipher_operation {
+ /* Indication whether the operation is encryption or decryption */
+ int encrypt;
+
+ /* The ID of the key used for decryption or encryption */
+ __u32 key_ladder_id;
+};
+
+#define DMX_MAX_CIPHER_OPERATIONS_COUNT 5
+struct dmx_cipher_operations {
+ /*
+ * The PID to perform the cipher operations on.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have different
+ * cipher operations.
+ */
+ __u16 pid;
+
+ /* Total number of operations */
+ __u8 operations_count;
+
+ /*
+ * Cipher operation to perform on the given PID.
+ * The operations are performed in the order they are given.
+ */
+ struct dmx_cipher_operation operations[DMX_MAX_CIPHER_OPERATIONS_COUNT];
+};
+
+struct dmx_events_mask {
+ /*
+ * Bitmask of events to be disabled (dmx_event).
+ * Disabled events will not be notified to the user.
+ * By default all events are enabled except for
+ * DMX_EVENT_NEW_ES_DATA.
+ * Overflow event can't be disabled.
+ */
+ __u32 disable_mask;
+
+ /*
+ * Bitmask of events that will not wake-up the user
+ * when user calls poll with POLLPRI flag.
+ * Events that are used as wake-up source should not be
+ * disabled in disable_mask or they would not be used
+ * as a wake-up source.
+ * By default all enabled events are set as wake-up events.
+ * Overflow event can't be disabled as a wake-up source.
+ */
+ __u32 no_wakeup_mask;
+
+ /*
+ * Number of ready wake-up events which will trigger
+ * a wake-up when user calls poll with POLLPRI flag.
+ * Default is set to 1.
+ */
+ __u32 wakeup_threshold;
+};
+
+struct dmx_indexing_params {
+ /*
+ * PID to index. In case of recording filter, multiple PIDs
+ * may exist in the same filter through DMX_ADD_PID ioctl.
+ * It is assumed that the PID was already added using DMX_ADD_PID
+ * or an error will be reported.
+ */
+ __u16 pid;
+
+ /* enable or disable indexing, default is disabled */
+ int enable;
+
+ /* combination of DMX_IDX_* bits */
+ __u64 types;
+};
+
+struct dmx_set_ts_insertion {
+ /*
+ * Unique identifier managed by the caller.
+ * This identifier can be used later to remove the
+ * insertion using DMX_ABORT_TS_INSERTION ioctl.
+ */
+ __u32 identifier;
+
+ /*
+ * Repetition time in msec, minimum allowed value is 25msec.
+ * 0 repetition time means one-shot insertion is done.
+ * Insertion done based on wall-clock.
+ */
+ __u32 repetition_time;
+
+ /*
+ * TS packets buffer to be inserted.
+ * The buffer is inserted as-is to the recording buffer
+ * without any modification.
+ * It is advised to set discontinuity flag in the very
+ * first TS packet in the buffer.
+ */
+ const __u8 *ts_packets;
+
+ /*
+ * Size in bytes of the TS packets buffer to be inserted.
+ * Should be in multiples of 188 or 192 bytes
+ * depending on recording filter output format.
+ */
+ size_t size;
+};
+
+struct dmx_abort_ts_insertion {
+ /*
+ * Identifier of the insertion buffer previously set
+ * using DMX_SET_TS_INSERTION.
+ */
+ __u32 identifier;
+};
+
+struct dmx_scrambling_bits {
+ /*
+ * The PID to return its scrambling bit value.
+ * In case of recording filter, multiple PIDs may exist in the same
+ * filter through DMX_ADD_PID ioctl, each may have different
+ * scrambling bits status.
+ */
+ __u16 pid;
+
+ /* Current value of scrambling bits: 0, 1, 2 or 3 */
+ __u8 value;
+};
+
#define DMX_START _IO('o', 41)
#define DMX_STOP _IO('o', 42)
#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -151,5 +857,27 @@
#define DMX_GET_STC _IOWR('o', 50, struct dmx_stc)
#define DMX_ADD_PID _IOW('o', 51, __u16)
#define DMX_REMOVE_PID _IOW('o', 52, __u16)
+#define DMX_SET_TS_PACKET_FORMAT _IOW('o', 53, enum dmx_tsp_format_t)
+#define DMX_SET_TS_OUT_FORMAT _IOW('o', 54, enum dmx_tsp_format_t)
+#define DMX_SET_DECODER_BUFFER_SIZE _IO('o', 55)
+#define DMX_GET_BUFFER_STATUS _IOR('o', 56, struct dmx_buffer_status)
+#define DMX_RELEASE_DATA _IO('o', 57)
+#define DMX_FEED_DATA _IO('o', 58)
+#define DMX_SET_PLAYBACK_MODE _IOW('o', 59, enum dmx_playback_mode_t)
+#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event)
+#define DMX_SET_BUFFER_MODE _IOW('o', 61, enum dmx_buffer_mode)
+#define DMX_SET_BUFFER _IOW('o', 62, struct dmx_buffer)
+#define DMX_SET_DECODER_BUFFER _IOW('o', 63, struct dmx_decoder_buffers)
+#define DMX_REUSE_DECODER_BUFFER _IO('o', 64)
+#define DMX_SET_SECURE_MODE _IOW('o', 65, struct dmx_secure_mode)
+#define DMX_SET_EVENTS_MASK _IOW('o', 66, struct dmx_events_mask)
+#define DMX_GET_EVENTS_MASK _IOR('o', 67, struct dmx_events_mask)
+#define DMX_PUSH_OOB_COMMAND _IOW('o', 68, struct dmx_oob_command)
+#define DMX_SET_INDEXING_PARAMS _IOW('o', 69, struct dmx_indexing_params)
+#define DMX_SET_TS_INSERTION _IOW('o', 70, struct dmx_set_ts_insertion)
+#define DMX_ABORT_TS_INSERTION _IOW('o', 71, struct dmx_abort_ts_insertion)
+#define DMX_GET_SCRAMBLING_BITS _IOWR('o', 72, struct dmx_scrambling_bits)
+#define DMX_SET_CIPHER _IOW('o', 73, struct dmx_cipher_operations)
+#define DMX_FLUSH_BUFFER _IO('o', 74)
#endif /* _UAPI_DVBDMX_H_ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8e54723..5c22e8c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1368,6 +1368,8 @@
ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1377,7 +1379,7 @@
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 0932378..e645f17 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -266,6 +266,7 @@
#define FUSE_PARALLEL_DIROPS (1 << 18)
#define FUSE_HANDLE_KILLPRIV (1 << 19)
#define FUSE_POSIX_ACL (1 << 20)
+#define FUSE_PASSTHROUGH (1 << 21)
/**
* CUSE INIT request/reply flags
@@ -498,7 +499,7 @@
struct fuse_open_out {
uint64_t fh;
uint32_t open_flags;
- uint32_t padding;
+ int32_t passthrough_fd;
};
struct fuse_release_in {
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 48cfe31..5fdbdd8 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -69,8 +69,12 @@
#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
#define IPA_IOCTL_GET_HW_VERSION 45
#define IPA_IOCTL_ADD_RT_RULE_EXT 46
-#define IPA_IOCTL_NAT_MODIFY_PDN 47
-#define IPA_IOCTL_MAX 48
+#define IPA_IOCTL_ADD_VLAN_IFACE 47
+#define IPA_IOCTL_DEL_VLAN_IFACE 48
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50
+#define IPA_IOCTL_NAT_MODIFY_PDN 51
+#define IPA_IOCTL_MAX 52
/**
* max size of the header to be inserted
@@ -127,6 +131,7 @@
#define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19)
#define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20)
#define IPA_FLT_MAC_ETHER_TYPE (1ul << 21)
+#define IPA_FLT_MAC_DST_ADDR_L2TP (1ul << 22)
/**
* maximal number of NAT PDNs in the PDN config table
@@ -250,9 +255,12 @@
IPA_CLIENT_TEST4_PROD = 70,
IPA_CLIENT_TEST4_CONS = 71,
+
+ /* RESERVERD PROD = 72, */
+ IPA_CLIENT_DUMMY_CONS = 73
};
-#define IPA_CLIENT_MAX (IPA_CLIENT_TEST4_CONS + 1)
+#define IPA_CLIENT_MAX (IPA_CLIENT_DUMMY_CONS + 1)
#define IPA_CLIENT_IS_APPS_CONS(client) \
((client) == IPA_CLIENT_APPS_LAN_CONS || \
@@ -451,7 +459,16 @@
IPA_SSR_EVENT_MAX
};
-#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX)
+enum ipa_vlan_l2tp_event {
+ ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX,
+ DEL_VLAN_IFACE,
+ ADD_L2TP_VLAN_MAPPING,
+ DEL_L2TP_VLAN_MAPPING,
+ IPA_VLAN_L2TP_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
* enum ipa_rm_resource_name - IPA RM clients identification names
@@ -784,8 +801,10 @@
IPA_HDR_PROC_ETHII_TO_802_3,
IPA_HDR_PROC_802_3_TO_ETHII,
IPA_HDR_PROC_802_3_TO_802_3,
+ IPA_HDR_PROC_L2TP_HEADER_ADD,
+ IPA_HDR_PROC_L2TP_HEADER_REMOVE
};
-#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_802_3_TO_802_3 + 1)
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1)
/**
* struct ipa_rt_rule - attributes of a routing rule
@@ -856,10 +875,60 @@
};
/**
+ * struct ipa_l2tp_header_add_procparams -
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1)
+ * @output_ip_version: Specifies if template header is IPV4(0) or IPV6(1)
+ */
+struct ipa_l2tp_header_add_procparams {
+ uint32_t eth_hdr_retained:1;
+ uint32_t input_ip_version:1;
+ uint32_t output_ip_version:1;
+ uint32_t reserved:29;
+};
+
+/**
+ * struct ipa_l2tp_header_remove_procparams -
+ * @hdr_len_remove: Specifies how much of the header needs to
+ be removed in bytes
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ * @hdr_ofst_pkt_size_valid: Specifies if the Header offset is valid
+ * @hdr_ofst_pkt_size: If hdr_ofst_pkt_size_valid =1, this indicates where the
+ packet size field (2bytes) resides
+ * @hdr_endianness: 0:little endian, 1:big endian
+ */
+struct ipa_l2tp_header_remove_procparams {
+ uint32_t hdr_len_remove:8;
+ uint32_t eth_hdr_retained:1;
+ /* Following fields are valid if eth_hdr_retained =1 ( bridge mode) */
+ uint32_t hdr_ofst_pkt_size_valid:1;
+ uint32_t hdr_ofst_pkt_size:6;
+ uint32_t hdr_endianness:1;
+ uint32_t reserved:15;
+};
+
+/**
+ * struct ipa_l2tp_hdr_proc_ctx_params -
+ * @hdr_add_param: parameters for header add
+ * @hdr_remove_param: parameters for header remove
+ * @is_dst_pipe_valid: if dst pipe is valid
+ * @dst_pipe: destination pipe
+ */
+struct ipa_l2tp_hdr_proc_ctx_params {
+ struct ipa_l2tp_header_add_procparams hdr_add_param;
+ struct ipa_l2tp_header_remove_procparams hdr_remove_param;
+ uint8_t is_dst_pipe_valid;
+ enum ipa_client_type dst_pipe;
+};
+
+#define L2TP_USER_SPACE_SPECIFY_DST_PIPE
+
+/**
* struct ipa_hdr_proc_ctx_add - processing context descriptor includes
* in and out parameters
* @type: processing context type
* @hdr_hdl: in parameter, handle to header
+ * @l2tp_params: l2tp parameters
* @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
* @status: out parameter, status of header add operation,
* 0 for success,
@@ -870,8 +939,11 @@
uint32_t hdr_hdl;
uint32_t proc_ctx_hdl;
int status;
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
};
+#define IPA_L2TP_HDR_PROC_SUPPORT
+
/**
* struct ipa_ioc_add_hdr - processing context addition parameters (support
* multiple processing context and commit)
@@ -1445,6 +1517,30 @@
};
/**
+ * struct ipa_ioc_vlan_iface_info - add vlan interface
+ * @name: interface name
+ * @vlan_id: VLAN ID
+ */
+struct ipa_ioc_vlan_iface_info {
+ char name[IPA_RESOURCE_NAME_MAX];
+ uint8_t vlan_id;
+};
+
+/**
+ * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info
+ * @iptype: l2tp tunnel IP type
+ * @l2tp_iface_name: l2tp interface name
+ * @l2tp_session_id: l2tp session id
+ * @vlan_iface_name: vlan interface name
+ */
+struct ipa_ioc_l2tp_vlan_mapping_info {
+ enum ipa_ip_type iptype;
+ char l2tp_iface_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t l2tp_session_id;
+ char vlan_iface_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
* struct ipa_msg_meta - Format of the message meta-data.
* @msg_type: the type of the message
* @rsvd: reserved bits for future use.
@@ -1721,6 +1817,21 @@
IPA_IOCTL_GET_HW_VERSION, \
enum ipa_hw_type *)
+#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_VLAN_IFACE, \
+ struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_VLAN_IFACE, \
+ struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \
+ struct ipa_ioc_l2tp_vlan_mapping_info *)
+
+#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
+ struct ipa_ioc_l2tp_vlan_mapping_info *)
/*
* unique magic number of the Tethering bridge ioctls
*/
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e0..d0b5fa9 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
#define NF_LOG_MASK 0x2f
+#define NF_LOG_PREFIXLEN 128
+
#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index d3cbe48..8c0fc7b 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -173,6 +173,42 @@
*/
/**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ * %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ * %NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ * %NL80211_ATTR_FILS_KEK - used for key renewal
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ * %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ * %Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -370,10 +406,18 @@
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
*
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ * (for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ * (PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ * authentication.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -600,6 +644,20 @@
*
* @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface.
*
+ * @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform
+ * multicast to unicast conversion. When enabled, all multicast packets
+ * with ethertype ARP, IPv4 or IPv6 (possibly within an 802.1Q header)
+ * will be sent out to each station once with the destination (multicast)
+ * MAC address replaced by the station's MAC address. Note that this may
+ * break certain expectations of the receiver, e.g. the ability to drop
+ * unicast IP packets encapsulated in multicast L2 frames, or the ability
+ * to not send destination unreachable messages in such cases.
+ * This can only be toggled per BSS. Configure this on an interface of
+ * type %NL80211_IFTYPE_AP. It applies to all its VLAN interfaces
+ * (%NL80211_IFTYPE_AP_VLAN), except for those in 4addr (WDS) mode.
+ * If %NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED is not present with this
+ * command, the feature is disabled.
+ *
* @NL80211_CMD_JOIN_MESH: Join a mesh. The mesh ID must be given, and initial
* mesh config parameters may be given.
* @NL80211_CMD_LEAVE_MESH: Leave the mesh network -- no special arguments, the
@@ -874,6 +932,12 @@
* This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
* %NL80211_ATTR_COOKIE.
*
+ * @NL80211_CMD_UPDATE_CONNECT_PARAMS: Update one or more connect parameters
+ * for subsequent roaming cases if the driver or firmware uses internal
+ * BSS selection. This command can be issued only while connected and it
+ * does not result in a change for the current association. Currently,
+ * only the %NL80211_ATTR_IE data is used and updated with this command.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1069,6 +1133,10 @@
NL80211_CMD_CHANGE_NAN_CONFIG,
NL80211_CMD_NAN_MATCH,
+ NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+
+ NL80211_CMD_UPDATE_CONNECT_PARAMS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1638,8 +1706,16 @@
* the connection request from a station. nl80211_connect_failed_reason
* enum has different reasons of connection failure.
*
- * @NL80211_ATTR_SAE_DATA: SAE elements in Authentication frames. This starts
- * with the Authentication transaction sequence number field.
+ * @NL80211_ATTR_AUTH_DATA: Fields and elements in Authentication frames.
+ * This contains the authentication frame body (non-IE and IE data),
+ * excluding the Authentication algorithm number, i.e., starting at the
+ * Authentication transaction sequence number field. It is used with
+ * authentication algorithms that need special fields to be added into
+ * the frames (SAE and FILS). Currently, only the SAE cases use the
+ * initial two fields (Authentication transaction sequence number and
+ * Status code). However, those fields are included in the attribute data
+ * for all authentication algorithms to keep the attribute definition
+ * consistent.
*
* @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
@@ -1936,10 +2012,61 @@
* attribute.
* @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
* See &enum nl80211_nan_match_attributes.
+ * @NL80211_ATTR_FILS_KEK: KEK for FILS (Re)Association Request/Response frame
+ * protection.
+ * @NL80211_ATTR_FILS_NONCES: Nonces (part of AAD) for FILS (Re)Association
+ * Request/Response frame protection. This attribute contains the 16 octet
+ * STA Nonce followed by 16 octets of AP Nonce.
+ *
+ * @NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED: Indicates whether or not multicast
+ * packets should be send out as unicast to all stations (flag attribute).
*
* @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
* used in various commands/events for specifying the BSSID.
*
+ * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which
+ * other BSSs has to be better or slightly worse than the current
+ * connected BSS so that they get reported to user space.
+ * This will give an opportunity to userspace to consider connecting to
+ * other matching BSSs which have better or slightly worse RSSI than
+ * the current connected BSS by using an offloaded operation to avoid
+ * unnecessary wakeups.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
+ * the specified band is to be adjusted before doing
+ * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ * better BSSs. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
+ *
+ * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out.
+ * u32 attribute with an &enum nl80211_timeout_reason value. This is used,
+ * e.g., with %NL80211_CMD_CONNECT event.
+ *
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ * username part of NAI used to refer keys rRK and rIK. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ * of NAI specifying the domain name of the ER server. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ * to use in ERP messages. This is used in generating the FILS wrapped data
+ * for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ * NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ * %NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ * from successful FILS authentication and is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * identifying the scope of PMKSAs. This is used with
+ * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ * This is used with @NL80211_CMD_SET_PMKSA.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2198,7 +2325,7 @@
NL80211_ATTR_CONN_FAILED_REASON,
- NL80211_ATTR_SAE_DATA,
+ NL80211_ATTR_AUTH_DATA,
NL80211_ATTR_VHT_CAPABILITY,
@@ -2339,8 +2466,26 @@
NL80211_ATTR_NAN_FUNC,
NL80211_ATTR_NAN_MATCH,
+ NL80211_ATTR_FILS_KEK,
+ NL80211_ATTR_FILS_NONCES,
+
+ NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED,
+
NL80211_ATTR_BSSID,
+ NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+ NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+
+ NL80211_ATTR_TIMEOUT_REASON,
+
+ NL80211_ATTR_FILS_ERP_USERNAME,
+ NL80211_ATTR_FILS_ERP_REALM,
+ NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+ NL80211_ATTR_FILS_ERP_RRK,
+ NL80211_ATTR_FILS_CACHE_ID,
+
+ NL80211_ATTR_PMK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2352,6 +2497,7 @@
#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
+#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -3032,6 +3178,13 @@
* how this API was implemented in the past. Also, due to the same problem,
* the only way to create a matchset with only an RSSI filter (with this
* attribute) is if there's only a single matchset with the RSSI attribute.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether
+ * %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or
+ * relative to current bss's RSSI.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for
+ * BSS-es in the specified band is to be adjusted before doing
+ * RSSI-based BSS selection. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3041,6 +3194,8 @@
NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
+ NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
+ NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3665,6 +3820,9 @@
* @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
* @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
* @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals
+ * @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
+ * @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
+ * @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
* @__NL80211_AUTHTYPE_NUM: internal
* @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
* @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -3677,6 +3835,9 @@
NL80211_AUTHTYPE_FT,
NL80211_AUTHTYPE_NETWORK_EAP,
NL80211_AUTHTYPE_SAE,
+ NL80211_AUTHTYPE_FILS_SK,
+ NL80211_AUTHTYPE_FILS_SK_PFS,
+ NL80211_AUTHTYPE_FILS_PK,
/* keep last */
__NL80211_AUTHTYPE_NUM,
@@ -4285,6 +4446,9 @@
* of supported channel widths for radar detection.
* @NL80211_IFACE_COMB_RADAR_DETECT_REGIONS: u32 attribute containing the bitmap
* of supported regulatory regions for radar detection.
+ * @NL80211_IFACE_COMB_BI_MIN_GCD: u32 attribute specifying the minimum GCD of
+ * different beacon intervals supported by all the interface combinations
+ * in this group (if not present, all beacon intervals be identical).
* @NUM_NL80211_IFACE_COMB: number of attributes
* @MAX_NL80211_IFACE_COMB: highest attribute number
*
@@ -4292,8 +4456,8 @@
* limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2
* => allows an AP and a STA that must match BIs
*
- * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8
- * => allows 8 of AP/GO
+ * numbers = [ #{AP, P2P-GO} <= 8 ], BI min gcd, channels = 1, max = 8,
+ * => allows 8 of AP/GO that can have BI gcd >= min gcd
*
* numbers = [ #{STA} <= 2 ], channels = 2, max = 2
* => allows two STAs on different channels
@@ -4319,6 +4483,7 @@
NL80211_IFACE_COMB_NUM_CHANNELS,
NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
+ NL80211_IFACE_COMB_BI_MIN_GCD,
/* keep last */
NUM_NL80211_IFACE_COMB,
@@ -4639,6 +4804,20 @@
* configuration (AP/mesh) with HT rates.
* @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
* configuration (AP/mesh) with VHT rates.
+ * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
+ * with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA
+ * in @NL80211_CMD_FRAME while not associated.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports
+ * randomized TA in @NL80211_CMD_FRAME while associated.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
+ * for reporting BSSs with better RSSI than the current connected BSS
+ * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ * %NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ * RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ * authentication with %NL80211_CMD_CONNECT.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4653,6 +4832,12 @@
NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
NL80211_EXT_FEATURE_BEACON_RATE_HT,
NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+ NL80211_EXT_FEATURE_FILS_STA,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
+ NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4692,6 +4877,21 @@
};
/**
+ * enum nl80211_timeout_reason - timeout reasons
+ *
+ * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified.
+ * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out.
+ * @NL80211_TIMEOUT_AUTH: Authentication timed out.
+ * @NL80211_TIMEOUT_ASSOC: Association timed out.
+ */
+enum nl80211_timeout_reason {
+ NL80211_TIMEOUT_UNSPECIFIED,
+ NL80211_TIMEOUT_SCAN,
+ NL80211_TIMEOUT_AUTH,
+ NL80211_TIMEOUT_ASSOC,
+};
+
+/**
* enum nl80211_scan_flags - scan request control flags
*
* Scan request control flags are used to control the handling
diff --git a/include/uapi/linux/seemp_api.h b/include/uapi/linux/seemp_api.h
index 4dfc257..a42ad4b 100644
--- a/include/uapi/linux/seemp_api.h
+++ b/include/uapi/linux/seemp_api.h
@@ -1,6 +1,8 @@
#ifndef _SEEMP_API_H_
#define _SEEMP_API_H_
+#define SEEMP_API_kernel__rtic 100000
+
#define SEEMP_API_kernel__oom_adjust_write 0
#define SEEMP_API_kernel__sendto 1
#define SEEMP_API_kernel__recvfrom 2
diff --git a/include/uapi/linux/seemp_param_id.h b/include/uapi/linux/seemp_param_id.h
index c72c579..d8b9f78 100644
--- a/include/uapi/linux/seemp_param_id.h
+++ b/include/uapi/linux/seemp_param_id.h
@@ -15,7 +15,11 @@
#define PARAM_ID_SENSOR 8
#define PARAM_ID_WINDOW_TYPE 9
#define PARAM_ID_WINDOW_FLAG 10
-#define NUM_PARAM_IDS 11
+#define PARAM_ID_RTIC_TYPE 11
+#define PARAM_ID_RTIC_ASSET_ID 12
+#define PARAM_ID_RTIC_ASSET_CATEGORY 13
+#define PARAM_ID_RTIC_RESPONSE 14
+#define NUM_PARAM_IDS 15
static inline int param_id_index(const char *param, const char *end)
{
@@ -44,6 +48,14 @@
id = 9;
else if ((len == 11) && !memcmp(param, "window_flag", 11))
id = 10;
+ else if ((len == 9) && !memcmp(param, "rtic_type", 9))
+ id = 11;
+ else if ((len == 8) && !memcmp(param, "asset_id", 8))
+ id = 12;
+ else if ((len == 14) && !memcmp(param, "asset_category", 14))
+ id = 13;
+ else if ((len == 8) && !memcmp(param, "response", 8))
+ id = 14;
return id;
}
@@ -86,6 +98,18 @@
case 10:
name = "window_flag";
break;
+ case 11:
+ name = "rtic_type";
+ break;
+ case 12:
+ name = "asset_id";
+ break;
+ case 13:
+ name = "asset_category";
+ break;
+ case 14:
+ name = "response";
+ break;
}
return name;
}
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
index 9b6b9b7..39b1be0 100644
--- a/include/uapi/linux/spcom.h
+++ b/include/uapi/linux/spcom.h
@@ -30,6 +30,12 @@
* with special size SPCOM_GET_NEXT_REQUEST_SIZE.
*/
+/*
+ * Maximum number of channel between Secure Processor and HLOS.
+ * including predefined channels, like "sp_kernel".
+ */
+#define SPCOM_MAX_CHANNELS 0x20
+
/* Maximum size (including null) for channel names */
#define SPCOM_CHANNEL_NAME_SIZE 32
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 361297e..576c704e 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
*/
#define USB_MAXCHILDREN 31
+/* See USB 3.1 spec Table 10-5 */
+#define USB_SS_MAXPORTS 15
+
/*
* Hub request types
*/
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24..0e5ce0d 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1051,6 +1051,30 @@
*/
#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
+/*
+ * Configuration Summary descriptors: Defines a list of functions in the
+ * configuration. This descriptor may be used by Host software to decide
+ * which Configuration to use to obtain the desired functionality.
+ */
+#define USB_CAP_TYPE_CONFIG_SUMMARY 0x10
+
+struct function_class_info {
+ __u8 bClass;
+ __u8 bSubClass;
+ __u8 bProtocol;
+};
+
+struct usb_config_summary_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDevCapabilityType;
+ __u16 bcdVersion;
+ __u8 bConfigurationValue;
+ __u8 bMaxPower;
+ __u8 bNumFunctions;
+ struct function_class_info cs_info[];
+} __attribute__((packed));
+
/*-------------------------------------------------------------------------*/
/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index cf96ac1..731b2f0 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -779,6 +779,9 @@
#define V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE \
V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE
V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE = 30,
+#define V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO \
+ V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO
+ V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO = 31,
};
#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE \
@@ -1071,6 +1074,11 @@
V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED,
};
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 109)
+#define V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_10BIT \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 110)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 98844ac..4ded0a4 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -4,6 +4,7 @@
header-y += cam_isp.h
header-y += cam_isp_vfe.h
header-y += cam_isp_ife.h
+header-y += cam_jpeg.h
header-y += cam_req_mgr.h
header-y += cam_sensor.h
header-y += cam_sync.h
diff --git a/include/uapi/media/cam_jpeg.h b/include/uapi/media/cam_jpeg.h
new file mode 100644
index 0000000..f3082f3
--- /dev/null
+++ b/include/uapi/media/cam_jpeg.h
@@ -0,0 +1,117 @@
+#ifndef __UAPI_CAM_JPEG_H__
+#define __UAPI_CAM_JPEG_H__
+
+#include "cam_defs.h"
+
+/* enc, dma, cdm(enc/dma) are used in querycap */
+#define CAM_JPEG_DEV_TYPE_ENC 0
+#define CAM_JPEG_DEV_TYPE_DMA 1
+#define CAM_JPEG_DEV_TYPE_MAX 2
+
+#define CAM_JPEG_NUM_DEV_PER_RES_MAX 1
+
+/* definitions needed for jpeg aquire device */
+#define CAM_JPEG_RES_TYPE_ENC 0
+#define CAM_JPEG_RES_TYPE_DMA 1
+#define CAM_JPEG_RES_TYPE_MAX 2
+
+/* packet opcode types */
+#define CAM_JPEG_OPCODE_ENC_UPDATE 0
+#define CAM_JPEG_OPCODE_DMA_UPDATE 1
+
+/* ENC input port resource type */
+#define CAM_JPEG_ENC_INPUT_IMAGE 0x0
+
+/* ENC output port resource type */
+#define CAM_JPEG_ENC_OUTPUT_IMAGE 0x1
+
+#define CAM_JPEG_ENC_IO_IMAGES_MAX 0x2
+
+/* DMA input port resource type */
+#define CAM_JPEG_DMA_INPUT_IMAGE 0x0
+
+/* DMA output port resource type */
+#define CAM_JPEG_DMA_OUTPUT_IMAGE 0x1
+
+#define CAM_JPEG_DMA_IO_IMAGES_MAX 0x2
+
+#define CAM_JPEG_IMAGE_MAX 0x2
+
+/**
+ * struct cam_jpeg_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of JPEG ENC, JPEG DMA
+ * from hardware and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @size : Size of struct passed
+ * @dev_type: Hardware type for the cap info(jpeg enc, jpeg dma)
+ * @hw_ver: Major, minor and incr values of a device version
+ */
+struct cam_jpeg_dev_ver {
+ uint32_t size;
+ uint32_t dev_type;
+ struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_jpeg_query_cap_cmd - JPEG query device capability payload
+ *
+ * @dev_iommu_handle: Jpeg iommu handles for secure/non secure
+ * modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_enc: Number of encoder
+ * @num_dma: Number of dma
+ * @dev_ver: Returned device capability array
+ */
+struct cam_jpeg_query_cap_cmd {
+ struct cam_iommu_handle dev_iommu_handle;
+ struct cam_iommu_handle cdm_iommu_handle;
+ uint32_t num_enc;
+ uint32_t num_dma;
+ struct cam_jpeg_dev_ver dev_ver[CAM_JPEG_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_jpeg_res_info - JPEG output resource info
+ *
+ * @format: Format of the resource
+ * @width: Width in pixels
+ * @height: Height in lines
+ * @fps: Fps
+ */
+struct cam_jpeg_res_info {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ uint32_t fps;
+};
+
+/**
+ * struct cam_jpeg_acquire_dev_info - An JPEG device info
+ *
+ * @dev_type: Device type (ENC/DMA)
+ * @reserved: Reserved Bytes
+ * @in_res: In resource info
+ * @in_res: Iut resource info
+ */
+struct cam_jpeg_acquire_dev_info {
+ uint32_t dev_type;
+ uint32_t reserved;
+ struct cam_jpeg_res_info in_res;
+ struct cam_jpeg_res_info out_res;
+};
+
+/**
+ * struct cam_jpeg_config_inout_param_info - JPEG Config time
+ * input output params
+ *
+ * @clk_index: Input Param- clock selection index.(-1 default)
+ * @output_size: Output Param - jpeg encode/dma output size in
+ * bytes
+ */
+struct cam_jpeg_config_inout_param_info {
+ int32_t clk_index;
+ int32_t output_size;
+};
+
+#endif /* __UAPI_CAM_JPEG_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index e6c1a45..23a8ccf 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -22,6 +22,7 @@
#define CAM_ACTUATOR_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 9)
#define CAM_CCI_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 10)
#define CAM_FLASH_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 11)
+#define CAM_EEPROM_DEVICE_TYPE (CAM_DEVICE_TYPE_BASE + 12)
/* cam_req_mgr hdl info */
#define CAM_REQ_MGR_HDL_IDX_POS 8
@@ -43,6 +44,10 @@
#define V4L_EVENT_CAM_REQ_MGR_ERROR 1
#define V4L_EVENT_CAM_REQ_MGR_MAX 2
+/* SOF Event status */
+#define CAM_REQ_MGR_SOF_EVENT_SUCCESS 0
+#define CAM_REQ_MGR_SOF_EVENT_ERROR 1
+
/**
* Request Manager : flush_type
* @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending
@@ -353,14 +358,18 @@
/**
* struct cam_req_mgr_frame_msg
- * @request_id: request id of frame
- * @frame_count: running count of frames
- * @timestamp: timestamp of frame
+ * @request_id: request id of the frame
+ * @frame_id: frame id of the frame
+ * @timestamp: timestamp of the frame
+ * @link_hdl: link handle associated with this message
+ * @sof_status: sof status success or fail
*/
struct cam_req_mgr_frame_msg {
uint64_t request_id;
- uint64_t frame_count;
+ uint64_t frame_id;
uint64_t timestamp;
+ int32_t link_hdl;
+ uint32_t sof_status;
};
/**
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 83f1a02..ac370ba 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -6,7 +6,7 @@
#include <media/cam_defs.h>
#define CAM_SENSOR_PROBE_CMD (CAM_COMMON_OPCODE_MAX + 1)
-#define CAM_SENSOR_MAX_LED_TRIGGERS 3
+#define CAM_FLASH_MAX_LED_TRIGGERS 3
/**
* struct cam_sensor_query_cap - capabilities info for sensor
*
@@ -63,6 +63,18 @@
} __attribute__((packed));
/**
+ * struct cam_eeprom_query_cap_t - capabilities info for eeprom
+ *
+ * @slot_info : Indicates about the slotId or cell Index
+ * @eeprom_kernel_probe : Indicates about the kernel or userspace probe
+ */
+struct cam_eeprom_query_cap_t {
+ uint32_t slot_info;
+ uint16_t eeprom_kernel_probe;
+ uint16_t reserved;
+} __attribute__((packed));
+
+/**
* struct cam_cmd_i2c_info - Contains slave I2C related info
*
* @slave_addr : Slave address
@@ -360,7 +372,7 @@
uint16_t reserved;
uint32_t led_on_delay_ms;
uint32_t led_off_delay_ms;
- uint32_t led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__((packed));
/**
@@ -379,7 +391,7 @@
uint16_t count;
uint8_t opcode;
uint8_t cmd_type;
- uint32_t led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__((packed));
/**
@@ -409,9 +421,9 @@
*/
struct cam_flash_query_cap_info {
uint32_t slot_info;
- uint32_t max_current_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
- uint32_t max_duration_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
- uint32_t max_current_torch[CAM_SENSOR_MAX_LED_TRIGGERS];
+ uint32_t max_current_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t max_duration_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+ uint32_t max_current_torch[CAM_FLASH_MAX_LED_TRIGGERS];
} __attribute__ ((packed));
#endif
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 038dd48..4fe325d 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -170,6 +170,16 @@
unsigned int data[1];
};
+struct msm_vidc_ubwc_cr_stats_info {
+ unsigned int stats_tile_32;
+ unsigned int stats_tile_64;
+ unsigned int stats_tile_96;
+ unsigned int stats_tile_128;
+ unsigned int stats_tile_160;
+ unsigned int stats_tile_192;
+ unsigned int stats_tile_256;
+};
+
struct msm_vidc_yuv_stats_payload {
unsigned int frame_qp;
unsigned int texture;
@@ -250,6 +260,12 @@
#define MSM_VIDC_EXTRADATA_PQ_INFO \
MSM_VIDC_EXTRADATA_PQ_INFO
MSM_VIDC_EXTRADATA_PQ_INFO = 0x00000017,
+#define MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI \
+ MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI
+ MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI = 0x00000018,
+#define MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO \
+ MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO
+ MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO = 0x00000019,
MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E,
#define MSM_VIDC_EXTRADATA_OUTPUT_CROP \
MSM_VIDC_EXTRADATA_OUTPUT_CROP
diff --git a/include/uapi/sound/devdep_params.h b/include/uapi/sound/devdep_params.h
index 5061ec0..9e3133b 100644
--- a/include/uapi/sound/devdep_params.h
+++ b/include/uapi/sound/devdep_params.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,4 +66,14 @@
uint32_t device;
} __packed;
+#define HWDEP_FE_BASE 3000 /*unique base for FE hw dep nodes*/
+struct snd_pcm_mmap_fd {
+ int32_t dir;
+ int32_t fd;
+ int32_t size;
+ int32_t actual_size;
+};
+
+#define SNDRV_PCM_IOCTL_MMAP_DATA_FD _IOWR('U', 0xd2, struct snd_pcm_mmap_fd)
+
#endif
diff --git a/init/Kconfig b/init/Kconfig
index 954de19..af000c7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1276,16 +1276,6 @@
endif # CGROUPS
-config SCHED_HMP
- bool "Scheduler support for heterogenous multi-processor systems"
- select SCHED_WALT
- depends on SMP && FAIR_GROUP_SCHED
- help
- This feature will let the scheduler optimize task placement on
- systems made of heterogeneous cpus i.e cpus that differ either
- in their instructions per-cycle capability or the maximum
- frequency they can attain.
-
config SCHED_WALT
bool "WALT"
depends on SMP && FAIR_GROUP_SCHED
@@ -1293,14 +1283,6 @@
Use Window-Assisted Load Tracking (WALT) as an alternative or
additional load tracking scheme in lieu of or along with PELT.
-config SCHED_HMP_CSTATE_AWARE
- bool "CPU C-state aware scheduler"
- depends on SCHED_HMP
- help
- This feature will let the HMP scheduler optimize task placement
- with CPUs C-state. If this is enabled, scheduler places tasks
- onto the shallowest C-state CPU among the most power efficient CPUs.
-
config SCHED_CORE_CTL
bool "QTI Core Control"
depends on SMP
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a37a10b..02fb438 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1249,8 +1249,10 @@
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
- if (ret == 1)
+ if (ret == 1) {
+ sock = NULL;
goto retry;
+ }
if (ret) {
sock = NULL;
nc = NULL;
diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c..314e7d6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@
obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index a2ac051..f3721e1 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
*/
#include <linux/bpf.h>
#include <linux/err.h>
-#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/filter.h>
@@ -74,14 +73,10 @@
if (array_size >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
-
/* allocate all map elements and zero-initialize them */
- array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
- if (!array) {
- array = vzalloc(array_size);
- if (!array)
- return ERR_PTR(-ENOMEM);
- }
+ array = bpf_map_area_alloc(array_size);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@
if (array_size >= U32_MAX - PAGE_SIZE ||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
- kvfree(array);
+ bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
out:
@@ -262,7 +257,7 @@
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
- kvfree(array);
+ bpf_map_area_free(array);
}
static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@
/* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++)
BUG_ON(array->ptrs[i] != NULL);
- kvfree(array);
+
+ bpf_map_area_free(array);
}
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index ad1bc67..ad2f0ed 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include "percpu_freelist.h"
struct bucket {
@@ -84,14 +83,15 @@
free_percpu(pptr);
}
free_elems:
- vfree(htab->elems);
+ bpf_map_area_free(htab->elems);
}
static int prealloc_elems_and_freelist(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+ htab->elems = bpf_map_area_alloc(htab->elem_size *
+ htab->map.max_entries);
if (!htab->elems)
return -ENOMEM;
@@ -227,14 +227,10 @@
goto free_htab;
err = -ENOMEM;
- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
- GFP_USER | __GFP_NOWARN);
-
- if (!htab->buckets) {
- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
- if (!htab->buckets)
- goto free_htab;
- }
+ htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+ sizeof(struct bucket));
+ if (!htab->buckets)
+ goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -258,7 +254,7 @@
free_extra_elems:
free_percpu(htab->extra_elems);
free_buckets:
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
free_htab:
kfree(htab);
return ERR_PTR(err);
@@ -715,7 +711,7 @@
pcpu_freelist_destroy(&htab->freelist);
}
free_percpu(htab->extra_elems);
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
kfree(htab);
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16..be85191 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include "percpu_freelist.h"
@@ -32,7 +31,7 @@
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
int err;
- smap->elems = vzalloc(elem_size * smap->map.max_entries);
+ smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
if (!smap->elems)
return -ENOMEM;
@@ -45,7 +44,7 @@
return 0;
free_elems:
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
return err;
}
@@ -76,12 +75,9 @@
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
- smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
- if (!smap) {
- smap = vzalloc(cost);
- if (!smap)
- return ERR_PTR(-ENOMEM);
- }
+ smap = bpf_map_area_alloc(cost);
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
err = -E2BIG;
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@
put_buffers:
put_callchain_buffers();
free_smap:
- kvfree(smap);
+ bpf_map_area_free(smap);
return ERR_PTR(err);
}
@@ -262,9 +258,9 @@
/* wait for bpf programs to complete before freeing stack map */
synchronize_rcu();
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
pcpu_freelist_destroy(&smap->freelist);
- kvfree(smap);
+ bpf_map_area_free(smap);
put_callchain_buffers();
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5e668da..cd62aea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -12,6 +12,8 @@
#include <linux/bpf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/license.h>
@@ -48,6 +50,30 @@
list_add(&tl->list_node, &bpf_map_types);
}
+void *bpf_map_area_alloc(size_t size)
+{
+ /* We definitely need __GFP_NORETRY, so OOM killer doesn't
+ * trigger under memory pressure as we really just want to
+ * fail instead.
+ */
+ const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+ void *area;
+
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+ area = kmalloc(size, GFP_USER | flags);
+ if (area != NULL)
+ return area;
+ }
+
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
+ PAGE_KERNEL);
+}
+
+void bpf_map_area_free(void *area)
+{
+ kvfree(area);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fe158bd..8ce679d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -885,6 +885,11 @@
if (err)
return err;
+ if (is_pointer_value(env, insn->src_reg)) {
+ verbose("R%d leaks addr into mem\n", insn->src_reg);
+ return -EACCES;
+ }
+
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
@@ -2164,7 +2169,6 @@
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
- case BPF_PROG_TYPE_CGROUP_SKB:
return true;
default:
return false;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c0644f4..7bb21fd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6616,7 +6616,7 @@
struct task_struct *task;
int count = 0;
- seq_printf(seq, "css_set %p\n", cset);
+ seq_printf(seq, "css_set %pK\n", cset);
list_for_each_entry(task, &cset->tasks, cg_list) {
if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/configs/android-base-arm64.cfg b/kernel/configs/android-base-arm64.cfg
new file mode 100644
index 0000000..43f23d6
--- /dev/null
+++ b/kernel/configs/android-base-arm64.cfg
@@ -0,0 +1,5 @@
+# KEEP ALPHABETICALLY SORTED
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_SWP_EMULATION=y
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index fb6017e..80df048 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -12,17 +12,14 @@
CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ARMV8_DEPRECATED=y
CONFIG_ASHMEM=y
CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_BPF=y
-CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_DEFAULT_SECURITY_SELINUX=y
CONFIG_EMBEDDED=y
CONFIG_FB=y
@@ -156,9 +153,7 @@
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY_SELINUX=y
-CONFIG_SETEND_EMULATION=y
CONFIG_STAGING=y
-CONFIG_SWP_EMULATION=y
CONFIG_SYNC=y
CONFIG_TUN=y
CONFIG_UID_SYS_STATS=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 78b72d5..7e3dfa6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -409,12 +409,25 @@
cpu_notify(CPU_ONLINE, cpu);
return 0;
}
+static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
static int bringup_wait_for_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_completion(&st->done);
+ BUG_ON(!cpu_online(cpu));
+
+ /* Unpark the stopper thread and the hotplug thread of the target cpu */
+ stop_machine_unpark(cpu);
+ kthread_unpark(st->thread);
+
+ /* Should we go further up ? */
+ if (st->target > CPUHP_AP_ONLINE_IDLE) {
+ __cpuhp_kick_ap_work(st);
+ wait_for_completion(&st->done);
+ }
return st->result;
}
@@ -437,9 +450,7 @@
cpu_notify(CPU_UP_CANCELED, cpu);
return ret;
}
- ret = bringup_wait_for_ap(cpu);
- BUG_ON(!cpu_online(cpu));
- return ret;
+ return bringup_wait_for_ap(cpu);
}
/*
@@ -886,6 +897,9 @@
if (!cpu_present(cpu))
return -EINVAL;
+ if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
+ return -EBUSY;
+
cpu_hotplug_begin();
cpuhp_tasks_frozen = tasks_frozen;
@@ -976,31 +990,20 @@
}
/*
- * Called from the idle task. We need to set active here, so we can kick off
- * the stopper thread and unpark the smpboot threads. If the target state is
- * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
- * cpu further.
+ * Called from the idle task. Wake up the controlling task which brings the
+ * stopper and the hotplug thread of the upcoming CPU up and then delegates
+ * the rest of the online bringup to the hotplug thread.
*/
void cpuhp_online_idle(enum cpuhp_state state)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
- unsigned int cpu = smp_processor_id();
/* Happens for the boot cpu */
if (state != CPUHP_AP_ONLINE_IDLE)
return;
st->state = CPUHP_AP_ONLINE_IDLE;
-
- /* Unpark the stopper thread and the hotplug thread of this cpu */
- stop_machine_unpark(cpu);
- kthread_unpark(st->thread);
-
- /* Should we go further up ? */
- if (st->target > CPUHP_AP_ONLINE_IDLE)
- __cpuhp_kick_ap_work(st);
- else
- complete(&st->done);
+ complete(&st->done);
}
/* Requires cpu_add_remove_lock to be held */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d877aba..d1bed63 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -358,6 +358,32 @@
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
+/* The shared events struct. */
+#define SHARED_EVENTS_MAX 7
+
+struct shared_events_str {
+ /*
+ * Mutex to serialize access to shared list. Needed for the
+ * read/modify/write sequences.
+ */
+ struct mutex list_mutex;
+
+ /*
+ * A 1 bit for an index indicates that the slot is being used for
+ * an event. A 0 means that the slot can be used.
+ */
+ DECLARE_BITMAP(used_mask, SHARED_EVENTS_MAX);
+
+ /*
+ * The kernel events that are shared for a cpu;
+ */
+ struct perf_event *events[SHARED_EVENTS_MAX];
+ struct perf_event_attr attr[SHARED_EVENTS_MAX];
+ atomic_t refcount[SHARED_EVENTS_MAX];
+};
+
+static struct shared_events_str __percpu *shared_events;
+
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
@@ -2279,7 +2305,7 @@
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
- bool activate = true;
+ bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
@@ -2287,27 +2313,26 @@
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
- /* If we're on the wrong CPU, try again */
- if (task_cpu(ctx->task) != smp_processor_id()) {
+ reprogram = (ctx->task == current);
+
+ /*
+ * If the task is running, it must be running on this CPU,
+ * otherwise we cannot reprogram things.
+ *
+ * If its not running, we don't care, ctx->lock will
+ * serialize against it becoming runnable.
+ */
+ if (task_curr(ctx->task) && !reprogram) {
ret = -ESRCH;
goto unlock;
}
- /*
- * If we're on the right CPU, see if the task we target is
- * current, if not we don't have to activate the ctx, a future
- * context switch will do that for us.
- */
- if (ctx->task != current)
- activate = false;
- else
- WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
-
+ WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
- if (activate) {
+ if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
@@ -2358,13 +2383,36 @@
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
+ *
+ * Instead we use task_curr(), which tells us if the task is running.
+ * However, since we use task_curr() outside of rq::lock, we can race
+ * against the actual state. This means the result can be wrong.
+ *
+ * If we get a false positive, we retry, this is harmless.
+ *
+ * If we get a false negative, things are complicated. If we are after
+ * perf_event_context_sched_in() ctx::lock will serialize us, and the
+ * value must be correct. If we're before, it doesn't matter since
+ * perf_event_context_sched_in() will program the counter.
+ *
+ * However, this hinges on the remote context switch having observed
+ * our task->perf_event_ctxp[] store, such that it will in fact take
+ * ctx::lock in perf_event_context_sched_in().
+ *
+ * We do this by task_function_call(), if the IPI fails to hit the task
+ * we know any future context switch of task must see the
+ * perf_event_ctpx[] store.
*/
-again:
+
/*
- * Cannot use task_function_call() because we need to run on the task's
- * CPU regardless of whether its current or not.
+ * This smp_mb() orders the task->perf_event_ctxp[] store with the
+ * task_cpu() load, such that if the IPI then does not find the task
+ * running, a future context switch of that task must observe the
+ * store.
*/
- if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+ smp_mb();
+again:
+ if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
@@ -2378,12 +2426,16 @@
raw_spin_unlock_irq(&ctx->lock);
return;
}
- raw_spin_unlock_irq(&ctx->lock);
/*
- * Since !ctx->is_active doesn't mean anything, we must IPI
- * unconditionally.
+ * If the task is not running, ctx->lock will avoid it becoming so,
+ * thus we can safely install the event.
*/
- goto again;
+ if (task_curr(task)) {
+ raw_spin_unlock_irq(&ctx->lock);
+ goto again;
+ }
+ add_event_to_ctx(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -4053,6 +4105,35 @@
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
+static int
+perf_event_delete_kernel_shared(struct perf_event *event)
+{
+ int rc = -1, cpu = event->cpu;
+ struct shared_events_str *shrd_events;
+ unsigned long idx;
+
+ if (!shared_events || (u32)cpu >= nr_cpu_ids)
+ return 0;
+
+ shrd_events = per_cpu_ptr(shared_events, cpu);
+
+ mutex_lock(&shrd_events->list_mutex);
+
+ for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
+ if (shrd_events->events[idx] == event) {
+ if (atomic_dec_and_test(&shrd_events->refcount[idx])) {
+ clear_bit(idx, shrd_events->used_mask);
+ shrd_events->events[idx] = NULL;
+ }
+ rc = (int)atomic_read(&shrd_events->refcount[idx]);
+ break;
+ }
+ }
+
+ mutex_unlock(&shrd_events->list_mutex);
+ return rc;
+}
+
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
@@ -4190,8 +4271,12 @@
goto no_ctx;
}
- if (!is_kernel_event(event))
+ if (!is_kernel_event(event)) {
perf_remove_from_owner(event);
+ } else {
+ if (perf_event_delete_kernel_shared(event) > 0)
+ return 0;
+ }
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
@@ -7086,21 +7171,6 @@
perf_output_end(&handle);
}
-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
-{
- /*
- * Due to interrupt latency (AKA "skid"), we may enter the
- * kernel before taking an overflow, even if the PMU is only
- * counting user events.
- * To avoid leaking information to userspace, we must always
- * reject kernel samples when exclude_kernel is set.
- */
- if (event->attr.exclude_kernel && !user_mode(regs))
- return false;
-
- return true;
-}
-
/*
* Generic event overflow handling, sampling.
*/
@@ -7148,12 +7218,6 @@
}
/*
- * For security, drop the skid kernel samples if necessary.
- */
- if (!sample_is_allowed(event, regs))
- return ret;
-
- /*
* XXX event_limit might not quite work as expected on inherited
* events
*/
@@ -10038,6 +10102,103 @@
return err;
}
+static struct perf_event *
+perf_event_create_kernel_shared_check(struct perf_event_attr *attr, int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t overflow_handler,
+ void *context)
+{
+ unsigned long idx;
+ struct perf_event *event;
+ struct shared_events_str *shrd_events;
+
+ /*
+ * Have to be per cpu events for sharing
+ */
+ if (!shared_events || (u32)cpu >= nr_cpu_ids)
+ return NULL;
+
+ /*
+ * Can't handle these type requests for sharing right now.
+ */
+ if (task || context || overflow_handler ||
+ (attr->type != PERF_TYPE_HARDWARE &&
+ attr->type != PERF_TYPE_RAW))
+ return NULL;
+
+ /*
+ * Using per_cpu_ptr (or could do cross cpu call which is what most of
+ * perf does to access per cpu data structures
+ */
+ shrd_events = per_cpu_ptr(shared_events, cpu);
+
+ mutex_lock(&shrd_events->list_mutex);
+
+ event = NULL;
+ for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
+ if (memcmp(attr, &shrd_events->attr[idx],
+ sizeof(shrd_events->attr[idx])) == 0) {
+ atomic_inc(&shrd_events->refcount[idx]);
+ event = shrd_events->events[idx];
+ break;
+ }
+ }
+ mutex_unlock(&shrd_events->list_mutex);
+ return event;
+}
+
+static void
+perf_event_create_kernel_shared_add(struct perf_event_attr *attr, int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t overflow_handler,
+ void *context,
+ struct perf_event *event)
+{
+ unsigned long idx;
+ struct shared_events_str *shrd_events;
+
+ /*
+ * Have to be per cpu events for sharing
+ */
+ if (!shared_events || (u32)cpu >= nr_cpu_ids)
+ return;
+
+ /*
+ * Can't handle these type requests for sharing right now.
+ */
+ if (task || context || overflow_handler ||
+ (attr->type != PERF_TYPE_HARDWARE &&
+ attr->type != PERF_TYPE_RAW))
+ return;
+
+ /*
+ * Using per_cpu_ptr (or could do cross cpu call which is what most of
+ * perf does to access per cpu data structures
+ */
+ shrd_events = per_cpu_ptr(shared_events, cpu);
+
+ mutex_lock(&shrd_events->list_mutex);
+
+ /*
+ * If we are in this routine, we know that this event isn't already in
+ * the shared list. Check if slot available in shared list
+ */
+ idx = find_first_zero_bit(shrd_events->used_mask, SHARED_EVENTS_MAX);
+
+ if (idx >= SHARED_EVENTS_MAX)
+ goto out;
+
+ /*
+ * The event isn't in the list and there is an empty slot so add it.
+ */
+ shrd_events->attr[idx] = *attr;
+ shrd_events->events[idx] = event;
+ set_bit(idx, shrd_events->used_mask);
+ atomic_set(&shrd_events->refcount[idx], 1);
+out:
+ mutex_unlock(&shrd_events->list_mutex);
+}
+
/**
* perf_event_create_kernel_counter
*
@@ -10056,6 +10217,14 @@
int err;
/*
+ * Check if the requested attributes match a shared event
+ */
+ event = perf_event_create_kernel_shared_check(attr, cpu,
+ task, overflow_handler, context);
+ if (event)
+ return event;
+
+ /*
* Get the target context (task or percpu):
*/
@@ -10091,6 +10260,11 @@
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
+ /*
+ * Check if can add event to shared list
+ */
+ perf_event_create_kernel_shared_add(attr, cpu,
+ task, overflow_handler, context, event);
return event;
err_unlock:
@@ -10914,10 +11088,21 @@
void __init perf_event_init(void)
{
- int ret;
+ int ret, cpu;
idr_init(&pmu_idr);
+ shared_events = alloc_percpu(struct shared_events_str);
+ if (!shared_events) {
+ WARN(1, "alloc_percpu failed for shared_events struct");
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct shared_events_str *shrd_events =
+ per_cpu_ptr(shared_events, cpu);
+
+ mutex_init(&shrd_events->list_mutex);
+ }
+ }
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
diff --git a/kernel/exit.c b/kernel/exit.c
index 83e8afa..35ff283 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -465,6 +465,7 @@
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+ int mm_released;
mm_release(tsk, mm);
if (!mm)
@@ -511,9 +512,12 @@
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
- mmput(mm);
+
+ mm_released = mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
+ if (mm_released)
+ set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/extable.c b/kernel/extable.c
index e820cce..4f06fc3 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,7 +66,7 @@
return 0;
}
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index 33663b0..39c0709 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -877,12 +877,17 @@
/*
* Decrement the use count and release all resources for an mm.
*/
-void mmput(struct mm_struct *mm)
+int mmput(struct mm_struct *mm)
{
+ int mm_freed = 0;
might_sleep();
- if (atomic_dec_and_test(&mm->mm_users))
+ if (atomic_dec_and_test(&mm->mm_users)) {
__mmput(mm);
+ mm_freed = 1;
+ }
+
+ return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
@@ -1010,8 +1015,7 @@
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, mode) &&
- !capable(CAP_SYS_RESOURCE)) {
+ !ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
@@ -1513,6 +1517,18 @@
if (!p)
goto fork_out;
+ /*
+ * This _must_ happen before we call free_task(), i.e. before we jump
+ * to any of the bad_fork_* labels. This is to avoid freeing
+ * p->set_child_tid which is (ab)used as a kthread's data pointer for
+ * kernel threads (PF_KTHREAD).
+ */
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+ */
+ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
@@ -1674,11 +1690,6 @@
}
}
- p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
- /*
- * Clear TID on mm_release()?
- */
- p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index dac3724..4684b75 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -36,10 +36,32 @@
affinity = &available_cpus;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ /*
+ * The order of preference for selecting a fallback CPU is
+ *
+ * (1) online and un-isolated CPU from default affinity
+ * (2) online and un-isolated CPU
+ * (3) online CPU
+ */
cpumask_andnot(&available_cpus, cpu_online_mask,
cpu_isolated_mask);
- if (cpumask_empty(affinity))
+ if (cpumask_intersects(&available_cpus, irq_default_affinity))
+ cpumask_and(&available_cpus, &available_cpus,
+ irq_default_affinity);
+ else if (cpumask_empty(&available_cpus))
affinity = cpu_online_mask;
+
+ /*
+ * We are overriding the affinity with all online and
+ * un-isolated cpus. irq_set_affinity_locked() call
+ * below notify this mask to PM QOS affinity listener.
+ * That results in applying the CPU_DMA_LATENCY QOS
+ * to all the CPUs specified in the mask. But the low
+ * level irqchip driver sets the affinity of an irq
+ * to only one CPU. So pick only one CPU from the
+ * prepared mask while overriding the user affinity.
+ */
+ affinity = cpumask_of(cpumask_any(affinity));
ret = true;
}
@@ -47,7 +69,7 @@
if (!c->irq_set_affinity) {
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
} else {
- int r = irq_do_set_affinity(d, affinity, false);
+ int r = irq_set_affinity_locked(d, affinity, false);
if (r)
pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
d->irq, r);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4f64490..c1195eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1311,8 +1311,10 @@
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
- if (ret)
+ if (ret) {
+ irq_release_resources(desc);
goto out_mask;
+ }
}
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index feaa813..88a02e3 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -114,6 +114,11 @@
goto free_cpumask;
}
+ if (cpumask_subset(new_value, cpu_isolated_mask)) {
+ err = -EINVAL;
+ goto free_cpumask;
+ }
+
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 037c321..38f7665 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -25,13 +25,6 @@
#include <linux/vmalloc.h>
#include "kexec_internal.h"
-/*
- * Declare these symbols weak so that if architecture provides a purgatory,
- * these will be overridden.
- */
-char __weak kexec_purgatory[0];
-size_t __weak kexec_purgatory_size = 0;
-
static int kexec_calculate_store_digests(struct kimage *image);
/* Architectures can provide this probe function */
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 0a52315..f95fd2c 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -37,6 +37,8 @@
};
void kimage_file_post_load_cleanup(struct kimage *image);
+extern char kexec_purgatory[];
+extern size_t kexec_purgatory_size;
#else /* CONFIG_KEXEC_FILE */
static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
#endif /* CONFIG_KEXEC_FILE */
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2bef4ab..a608f7a 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -233,8 +233,8 @@
out_nolock:
list_del(&waiter.list);
- if (!list_empty(&sem->wait_list))
- __rwsem_do_wake(sem, 1);
+ if (!list_empty(&sem->wait_list) && sem->count >= 0)
+ __rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return -EINTR;
diff --git a/kernel/panic.c b/kernel/panic.c
index e6480e2..dbec387 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
- pr_emerg("Rebooting in %d seconds..", panic_timeout);
+ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 0854263..009f788 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -284,6 +284,9 @@
if (req->node.prio > qos_val[cpu])
qos_val[cpu] = req->node.prio;
break;
+ case PM_QOS_SUM:
+ qos_val[cpu] += req->node.prio;
+ break;
default:
BUG();
break;
@@ -585,7 +588,12 @@
if (irq_can_set_affinity(req->irq)) {
int ret = 0;
struct irq_desc *desc = irq_to_desc(req->irq);
- struct cpumask *mask = desc->irq_data.common->affinity;
+ struct cpumask *mask;
+
+ if (!desc)
+ break;
+
+ mask = desc->irq_data.common->affinity;
/* Get the current affinity */
cpumask_copy(&req->cpus_affine, mask);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 56583e7..e3944c4 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1767,6 +1767,7 @@
if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
+ smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
swake_up(&rdp_leader->nocb_wq);
}
}
@@ -2021,6 +2022,7 @@
* nocb_gp_head, where they await a grace period.
*/
gotcbs = false;
+ smp_mb(); /* wakeup before ->nocb_head reads. */
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
if (!rdp->nocb_gp_head)
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 27a7574..f6cce95 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,9 +17,8 @@
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o sched_avg.o
-obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-y += wait.o swait.o completion.o idle.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o
obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 1a3309b..1ccd19d 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -32,7 +32,7 @@
{
struct rq *rq = cpu_rq(cpu);
- if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+ if (!test_and_set_bit(BOOST_KICK, &rq->walt_flags))
smp_send_reschedule(cpu);
}
@@ -57,14 +57,14 @@
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
- return test_bit(BOOST_KICK, &rq->hmp_flags);
+ return test_bit(BOOST_KICK, &rq->walt_flags);
}
void clear_boost_kick(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- clear_bit(BOOST_KICK, &rq->hmp_flags);
+ clear_bit(BOOST_KICK, &rq->walt_flags);
}
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 30a1b34..7722ade 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1074,7 +1074,6 @@
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
- int src_cpu = cpu_of(rq);
bool moved = false;
/*
@@ -1109,9 +1108,6 @@
local_irq_enable();
- if (moved)
- notify_migration(src_cpu, arg->dest_cpu, false, p);
-
return 0;
}
@@ -1287,7 +1283,7 @@
#endif
#endif
- trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
+ trace_sched_migrate_task(p, new_cpu, task_util(p));
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
@@ -1685,7 +1681,7 @@
return cpu;
}
-void update_avg(u64 *avg, u64 sample)
+static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
@@ -2091,12 +2087,9 @@
struct related_thread_group *grp = NULL;
int src_cpu;
bool notif_required = false;
- bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
bool check_group = false;
#endif
- wake_flags &= ~WF_NO_NOTIFIER;
-
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
@@ -2196,9 +2189,6 @@
notif_required = true;
}
- if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
- inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));
-
note_task_waking(p, wallclock);
#endif /* CONFIG_SMP */
@@ -2208,18 +2198,14 @@
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-#ifdef CONFIG_SMP
- if (freq_notif_allowed) {
- if (notif_required && !same_freq_domain(src_cpu, cpu)) {
- check_for_freq_change(cpu_rq(cpu),
- false, check_group);
- check_for_freq_change(cpu_rq(src_cpu),
- false, check_group);
- } else if (success) {
- check_for_freq_change(cpu_rq(cpu), true, false);
- }
+ if (success && sched_predl) {
+ raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags);
+ if (do_pl_notif(cpu_rq(cpu)))
+ cpufreq_update_util(cpu_rq(cpu),
+ SCHED_CPUFREQ_WALT |
+ SCHED_CPUFREQ_PL);
+ raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags);
}
-#endif
return success;
}
@@ -2271,8 +2257,6 @@
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
- if (!__task_in_cum_window_demand(rq, p))
- inc_cum_window_demand(rq, p, task_load(p));
cpufreq_update_util(rq, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
note_task_waking(p, wallclock);
@@ -2302,26 +2286,6 @@
}
EXPORT_SYMBOL(wake_up_process);
-/**
- * wake_up_process_no_notif - Wake up a specific process without notifying
- * governor
- * @p: The process to be woken up.
- *
- * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.
- *
- * Return: 1 if the process was woken up, 0 if it was already running.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-int wake_up_process_no_notif(struct task_struct *p)
-{
- WARN_ON(task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
-}
-EXPORT_SYMBOL(wake_up_process_no_notif);
-
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
@@ -3165,7 +3129,7 @@
*load = rq->load.weight;
}
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
@@ -3692,15 +3656,10 @@
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
cpufreq_update_util(rq, 0);
- if (!is_idle_task(prev) && !prev->on_rq)
- update_avg_burst(prev);
-
rq->nr_switches++;
rq->curr = next;
++*switch_count;
- set_task_last_switch_out(prev, wallclock);
-
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
} else {
@@ -5787,7 +5746,7 @@
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm) {
- switch_mm_irqs_off(mm, &init_mm, current);
+ switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
mmdrop(mm);
@@ -5953,7 +5912,6 @@
if (rq != dead_rq) {
raw_spin_unlock(&rq->lock);
raw_spin_unlock(&next->pi_lock);
- notify_migration(dead_rq->cpu, dest_cpu, true, next);
rq = dead_rq;
raw_spin_lock(&next->pi_lock);
raw_spin_lock(&rq->lock);
@@ -6000,13 +5958,7 @@
set_rq_online(rq);
raw_spin_unlock(&rq->lock);
- /*
- * We might have been in tickless state. Clear NOHZ flags to avoid
- * us being kicked for helping out with balancing
- */
- nohz_balance_clear_nohz_mask(cpu);
-
- clear_hmp_request(cpu);
+ clear_walt_request(cpu);
local_irq_enable();
return 0;
}
@@ -6739,6 +6691,9 @@
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
+ * Only CPUs that can arrive at this group should be considered to continue
+ * balancing.
+ *
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
@@ -6750,18 +6705,31 @@
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
- const struct cpumask *span = sched_domain_span(sd);
+ const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
- for_each_cpu(i, span) {
+ for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+ /*
+ * Can happen in the asymmetric case, where these siblings are
+ * unused. The mask will not be empty because those CPUs that
+ * do have the top domain _should_ span the domain.
+ */
+ if (!sibling->child)
+ continue;
+
+ /* If we would not end up here, we can't continue from here */
+ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
}
+
+ /* We must not have empty masks here */
+ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
}
/*
@@ -6785,7 +6753,7 @@
cpumask_clear(covered);
- for_each_cpu(i, span) {
+ for_each_cpu_wrap(i, span, cpu) {
struct cpumask *sg_span;
if (cpumask_test_cpu(i, covered))
@@ -8052,6 +8020,20 @@
int ret;
set_cpu_active(cpu, false);
+ /*
+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+ * users of this state to go away such that all new such users will
+ * observe it.
+ *
+ * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+ * not imply sync_sched(), so wait for both.
+ *
+ * Do sync before park smpboot threads to take care the rcu boost case.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ else
+ synchronize_rcu();
if (!sched_smp_initialized)
return 0;
@@ -8102,7 +8084,7 @@
BUG_ON(rq->nr_running != 1);
raw_spin_unlock_irqrestore(&rq->lock, flags);
- clear_hmp_request(cpu);
+ clear_walt_request(cpu);
calc_load_migrate(rq);
update_max_interval();
@@ -8112,22 +8094,6 @@
}
#endif
-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
-static void sched_init_smt(void)
-{
- /*
- * We've enumerated all CPUs and will assume that if any CPU
- * has SMT siblings, CPU0 will too.
- */
- if (cpumask_weight(cpu_smt_mask(0)) > 1)
- static_branch_enable(&sched_smt_present);
-}
-#else
-static inline void sched_init_smt(void) { }
-#endif
-
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
@@ -8159,9 +8125,6 @@
init_sched_rt_class();
init_sched_dl_class();
-
- sched_init_smt();
-
sched_smp_initialized = true;
}
@@ -8222,9 +8185,6 @@
for (i = 0; i < WAIT_TABLE_SIZE; i++)
init_waitqueue_head(bit_wait_table + i);
-#ifdef CONFIG_SCHED_HMP
- pr_info("HMP scheduling enabled.\n");
-#endif
sched_boost_parse_dt();
init_clusters();
@@ -8345,56 +8305,8 @@
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq->push_task = NULL;
-#ifdef CONFIG_SCHED_WALT
- cpumask_set_cpu(i, &rq->freq_domain_cpumask);
- init_irq_work(&rq->irq_work, walt_irq_work);
- rq->hmp_stats.cumulative_runnable_avg = 0;
- rq->window_start = 0;
- rq->cum_window_start = 0;
- rq->hmp_stats.nr_big_tasks = 0;
- rq->hmp_flags = 0;
- rq->cur_irqload = 0;
- rq->avg_irqload = 0;
- rq->irqload_ts = 0;
- rq->static_cpu_pwr_cost = 0;
- rq->cc.cycles = 1;
- rq->cc.time = 1;
- rq->cstate = 0;
- rq->wakeup_latency = 0;
- rq->wakeup_energy = 0;
+ walt_sched_init(rq);
- /*
- * All cpus part of same cluster by default. This avoids the
- * need to check for rq->cluster being non-NULL in hot-paths
- * like select_best_cpu()
- */
- rq->cluster = &init_cluster;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
- memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
- rq->old_busy_time = 0;
- rq->old_estimated_time = 0;
- rq->old_busy_time_group = 0;
- rq->hmp_stats.pred_demands_sum = 0;
- rq->ed_task = NULL;
- rq->curr_table = 0;
- rq->prev_top = 0;
- rq->curr_top = 0;
-
- for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
- memset(&rq->load_subs[j], 0,
- sizeof(struct load_subtractions));
-
- rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
- sizeof(u8), GFP_NOWAIT);
-
- /* No other choice */
- BUG_ON(!rq->top_tasks[j]);
-
- clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
- }
- rq->cum_window_demand = 0;
-#endif
INIT_LIST_HEAD(&rq->cfs_tasks);
rq_attach_root(rq, &def_root_domain);
@@ -8413,8 +8325,6 @@
i = alloc_related_thread_groups();
BUG_ON(i);
- set_hmp_defaults();
-
set_load_weight(&init_task);
/*
@@ -9123,6 +9033,32 @@
return ret;
}
+#ifdef CONFIG_PROC_SYSCTL
+int sched_updown_migrate_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int *data = (unsigned int *)table->data;
+ unsigned int old_val;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ old_val = *data;
+
+ ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
+
+ if (!ret && write &&
+ sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) {
+ ret = -EINVAL;
+ *data = old_val;
+ }
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
inline struct task_group *css_tg(struct cgroup_subsys_state *css)
@@ -9512,13 +9448,6 @@
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
-#ifdef CONFIG_SCHED_HMP
- {
- .name = "upmigrate_discourage",
- .read_u64 = cpu_upmigrate_discourage_read_u64,
- .write_u64 = cpu_upmigrate_discourage_write_u64,
- },
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
@@ -9645,8 +9574,13 @@
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
dequeue_task(rq, p, 0);
+ /*
+ * task's contribution is already removed from the
+ * cumulative window demand in dequeue. As the
+ * task's stats are reset, the next enqueue does
+ * not change the cumulative window demand.
+ */
reset_task_stats(p);
- dec_cum_window_demand(rq, p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
free_task_load_ptrs(p);
@@ -9656,3 +9590,5 @@
task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_SCHED_WALT */
+
+__read_mostly bool sched_predl;
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index b140e55..e56af41 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -13,7 +13,6 @@
#define pr_fmt(fmt) "core_ctl: " fmt
#include <linux/init.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
@@ -612,8 +611,7 @@
spin_lock_irqsave(&cluster->pending_lock, flags);
cluster->pending = true;
spin_unlock_irqrestore(&cluster->pending_lock, flags);
-
- wake_up_process_no_notif(cluster->core_ctl_thread);
+ wake_up_process(cluster->core_ctl_thread);
}
static u64 core_ctl_check_timestamp;
@@ -878,21 +876,18 @@
return 0;
}
-static int __ref cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int isolation_cpuhp_state(unsigned int cpu, bool online)
{
- uint32_t cpu = (uintptr_t)hcpu;
struct cpu_data *state = &per_cpu(cpu_state, cpu);
struct cluster_data *cluster = state->cluster;
unsigned int need;
- bool do_wakeup, unisolated = false;
+ bool do_wakeup = false, unisolated = false;
unsigned long flags;
if (unlikely(!cluster || !cluster->inited))
- return NOTIFY_DONE;
+ return 0;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
+ if (online) {
cluster->active_cpus = get_active_cpu_count(cluster);
/*
@@ -902,9 +897,7 @@
* reject trying to online CPUs.
*/
move_cpu_lru(state);
- break;
-
- case CPU_DEAD:
+ } else {
/*
* We don't want to have a CPU both offline and isolated.
* So unisolate a CPU that went down if it was isolated by us.
@@ -920,9 +913,6 @@
state->busy = 0;
cluster->active_cpus = get_active_cpu_count(cluster);
- break;
- default:
- return NOTIFY_DONE;
}
need = apply_limits(cluster, cluster->need_cpus);
@@ -934,12 +924,18 @@
if (do_wakeup)
wake_up_core_ctl_thread(cluster);
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block __refdata cpu_notifier = {
- .notifier_call = cpu_callback,
-};
+static int core_ctl_isolation_online_cpu(unsigned int cpu)
+{
+ return isolation_cpuhp_state(cpu, true);
+}
+
+static int core_ctl_isolation_dead_cpu(unsigned int cpu)
+{
+ return isolation_cpuhp_state(cpu, false);
+}
/* ============================ init code ============================== */
@@ -1069,7 +1065,13 @@
if (should_skip(cpu_possible_mask))
return 0;
- register_cpu_notifier(&cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "core_ctl/isolation:online",
+ core_ctl_isolation_online_cpu, NULL);
+
+ cpuhp_setup_state_nocalls(CPUHP_CORE_CTL_ISOLATION_DEAD,
+ "core_ctl/isolation:dead",
+ NULL, core_ctl_isolation_dead_cpu);
for_each_cpu(cpu, &cpus) {
int ret;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 0a0e9aa..dce76d1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,10 @@
raw_spinlock_t update_lock; /* For shared policies */
u64 last_freq_update_time;
s64 freq_update_delay_ns;
+ u64 last_ws;
+ u64 curr_cycles;
+ u64 last_cyc_update_time;
+ unsigned long avg_cap;
unsigned int next_freq;
unsigned int cached_raw_freq;
unsigned long hispeed_util;
@@ -199,19 +203,63 @@
sg_cpu->iowait_boost >>= 1;
}
+static unsigned long freq_to_util(struct sugov_policy *sg_policy,
+ unsigned int freq)
+{
+ return mult_frac(sg_policy->max, freq,
+ sg_policy->policy->cpuinfo.max_freq);
+}
+
+#define KHZ 1000
+static void sugov_track_cycles(struct sugov_policy *sg_policy,
+ unsigned int prev_freq,
+ u64 upto)
+{
+ u64 delta_ns, cycles;
+ /* Track cycles in current window */
+ delta_ns = upto - sg_policy->last_cyc_update_time;
+ cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ);
+ sg_policy->curr_cycles += cycles;
+ sg_policy->last_cyc_update_time = upto;
+}
+
+static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
+ unsigned int prev_freq)
+{
+ u64 last_ws = sg_policy->last_ws;
+ unsigned int avg_freq;
+
+ WARN_ON(curr_ws < last_ws);
+ if (curr_ws <= last_ws)
+ return;
+
+ /* If we skipped some windows */
+ if (curr_ws > (last_ws + sched_ravg_window)) {
+ avg_freq = prev_freq;
+ /* Reset tracking history */
+ sg_policy->last_cyc_update_time = curr_ws;
+ } else {
+ sugov_track_cycles(sg_policy, prev_freq, curr_ws);
+ avg_freq = sg_policy->curr_cycles;
+ avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
+ }
+ sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
+ sg_policy->curr_cycles = 0;
+ sg_policy->last_ws = curr_ws;
+}
+
#define NL_RATIO 75
#define HISPEED_LOAD 90
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu);
bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
unsigned long nl = sg_cpu->walt_load.nl;
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;
- is_hiload = (cpu_util >= mult_frac(cap_cur,
+ is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
HISPEED_LOAD,
100));
@@ -247,6 +295,8 @@
} else {
sugov_get_util(&util, &max, sg_cpu->cpu);
sugov_iowait_boost(sg_cpu, &util, &max);
+ sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+ sg_policy->policy->cur);
sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
}
@@ -322,12 +372,11 @@
raw_spin_lock(&sg_policy->update_lock);
if (sg_policy->max != max) {
- hs_util = mult_frac(max,
- sg_policy->tunables->hispeed_freq,
- sg_policy->policy->cpuinfo.max_freq);
+ sg_policy->max = max;
+ hs_util = freq_to_util(sg_policy,
+ sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
- sg_policy->max = max;
}
sg_cpu->util = util;
@@ -337,6 +386,9 @@
sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
+ sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+ sg_policy->policy->cur);
+
trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max,
sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);
@@ -352,8 +404,13 @@
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+ unsigned long flags;
mutex_lock(&sg_policy->work_lock);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+ sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+ sched_ktime_clock());
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
@@ -432,17 +489,19 @@
unsigned int val;
struct sugov_policy *sg_policy;
unsigned long hs_util;
+ unsigned long flags;
if (kstrtouint(buf, 10, &val))
return -EINVAL;
tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
- hs_util = mult_frac(sg_policy->max,
- sg_policy->tunables->hispeed_freq,
- sg_policy->policy->cpuinfo.max_freq);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+ hs_util = freq_to_util(sg_policy,
+ sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}
return count;
@@ -695,6 +754,11 @@
sg_cpu->cpu = cpu;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ }
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
policy_is_shared(policy) ?
sugov_update_shared :
@@ -722,9 +786,14 @@
static void sugov_limits(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
+ unsigned long flags;
if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
+ raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+ sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+ sched_ktime_clock());
+ raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10a807c..08d4511 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -19,41 +19,6 @@
#include <linux/slab.h>
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_WALT */
-
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -865,7 +830,7 @@
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
- inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+ walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -880,7 +845,7 @@
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
- dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+ walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1845,7 +1810,7 @@
.update_curr = update_curr_dl,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index ed9f6db..0f8c0b2 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -600,12 +600,6 @@
cfs_rq->throttle_count);
SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled",
cfs_rq->runtime_enabled);
-#ifdef CONFIG_SCHED_WALT
- SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
- cfs_rq->hmp_stats.nr_big_tasks);
- SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
- cfs_rq->hmp_stats.cumulative_runnable_avg);
-#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -698,10 +692,6 @@
P(cpu_capacity);
#endif
#ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
- P(static_cpu_pwr_cost);
- P(cluster->static_cluster_pwr_cost);
-#endif
P(cluster->load_scale_factor);
P(cluster->capacity);
P(cluster->max_possible_capacity);
@@ -710,10 +700,10 @@
P(cluster->max_freq);
P(cluster->exec_scale_factor);
#ifdef CONFIG_SCHED_WALT
- P(hmp_stats.nr_big_tasks);
+ P(walt_stats.nr_big_tasks);
#endif
- SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
- rq->hmp_stats.cumulative_runnable_avg);
+ SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
+ rq->walt_stats.cumulative_runnable_avg);
#endif
#undef P
#undef PN
@@ -794,10 +784,6 @@
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
- P(sched_upmigrate);
- P(sched_downmigrate);
-#endif
P(sched_init_task_load_windows);
P(min_capacity);
P(max_capacity);
@@ -965,9 +951,6 @@
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
- unsigned int load_avg;
-
- load_avg = pct_task_load(p);
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
@@ -1025,12 +1008,9 @@
P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
- __P(load_avg);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
P(ravg.demand);
#endif
-#endif
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
avg_atom = div64_ul(avg_atom, nr_switches);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 45f404b..62a29ed 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,98 +37,39 @@
#include "walt.h"
#include <trace/events/sched.h>
-/* QHMP/Zone forward declarations */
-
-struct lb_env;
-struct sd_lb_stats;
-struct sg_lb_stats;
-
#ifdef CONFIG_SCHED_WALT
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand);
-#endif
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_CFS_BANDWIDTH
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-
-static inline void dec_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq);
-
-static inline void inc_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq);
-
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq);
-
-#else /* CONFIG_CFS_BANDWIDTH */
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#ifdef CONFIG_SMP
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
- struct sched_group *group);
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds);
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs);
-
-static int select_best_cpu(struct task_struct *p, int target, int reason,
- int sync);
-
-#ifdef CONFIG_NO_HZ_COMMON
-static int find_new_hmp_ilb(int type);
-static int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type);
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_HMP */
-
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-static inline void dec_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq) { }
-static inline void inc_throttled_cfs_rq_hmp_stats(
- struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq) { }
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
-
-#ifdef CONFIG_SMP
-
-static inline int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
- return 0;
-}
-
-static inline bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs)
-{
- return false;
-}
-#endif /* CONFIG_SMP */
-
-#endif /* CONFIG_SCHED_HMP */
-
-#ifdef CONFIG_SCHED_WALT
static inline bool task_fits_max(struct task_struct *p, int cpu);
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand);
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+ int delta, bool inc);
+#endif /* CONFIG_SCHED_WALT */
+
+#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p);
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p);
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+ struct cfs_rq *cfs_rq);
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+ struct cfs_rq *cfs_rq);
+#else
+static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) {}
+static inline void
+walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+static inline void
+walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+#define walt_inc_throttled_cfs_rq_stats(...)
+#define walt_dec_throttled_cfs_rq_stats(...)
+
#endif
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -221,8 +162,8 @@
* The margin used when comparing utilization with CPU capacity:
* util * 1024 < capacity * margin
*/
-unsigned int capacity_margin = 1078; /* ~5% margin */
-unsigned int capacity_margin_down = 1205; /* ~15% margin */
+unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */
+unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
@@ -4095,7 +4036,7 @@
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
- dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
+ walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
if (qcfs_rq->load.weight)
dequeue = 0;
@@ -4103,7 +4044,7 @@
if (!se) {
sub_nr_running(rq, task_delta);
- dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
+ walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
}
cfs_rq->throttled = 1;
@@ -4125,12 +4066,6 @@
start_cfs_bandwidth(cfs_b);
raw_spin_unlock(&cfs_b->lock);
-
- /* Log effect on hmp stats after throttling */
- trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
- sched_irqload(cpu_of(rq)),
- power_cost(cpu_of(rq), 0),
- cpu_temp(cpu_of(rq)));
}
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -4168,7 +4103,7 @@
if (enqueue)
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
- inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
+ walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4176,18 +4111,12 @@
if (!se) {
add_nr_running(rq, task_delta);
- inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
+ walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
}
/* determine whether we need to wake up potentially idle cpu */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
-
- /* Log effect on hmp stats after un-throttling */
- trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
- sched_irqload(cpu_of(rq)),
- power_cost(cpu_of(rq), 0),
- cpu_temp(cpu_of(rq)));
}
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4525,7 +4454,7 @@
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
- init_cfs_rq_hmp_stats(cfs_rq);
+ walt_init_cfs_rq_stats(cfs_rq);
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4720,6 +4649,9 @@
int task_wakeup = flags & ENQUEUE_WAKEUP;
#endif
+#ifdef CONFIG_SCHED_WALT
+ p->misfit = !task_fits_max(p, rq->cpu);
+#endif
/*
* If in_iowait is set, the code below may not trigger any cpufreq
* utilization updates, so do it here explicitly with the IOWAIT flag
@@ -4743,7 +4675,7 @@
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running++;
- inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
+ walt_inc_cfs_rq_stats(cfs_rq, p);
flags = ENQUEUE_WAKEUP;
}
@@ -4751,7 +4683,7 @@
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running++;
- inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
+ walt_inc_cfs_rq_stats(cfs_rq, p);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4762,10 +4694,7 @@
if (!se) {
add_nr_running(rq, 1);
-#ifdef CONFIG_SCHED_WALT
- p->misfit = !task_fits_max(p, rq->cpu);
-#endif
- inc_rq_hmp_stats(rq, p, 1);
+ inc_rq_walt_stats(rq, p);
}
#ifdef CONFIG_SMP
@@ -4821,7 +4750,7 @@
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running--;
- dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
+ walt_dec_cfs_rq_stats(cfs_rq, p);
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
@@ -4841,7 +4770,7 @@
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
- dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
+ walt_dec_cfs_rq_stats(cfs_rq, p);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4852,7 +4781,7 @@
if (!se) {
sub_nr_running(rq, 1);
- dec_rq_hmp_stats(rq, p, 1);
+ dec_rq_walt_stats(rq, p);
}
#ifdef CONFIG_SMP
@@ -5394,23 +5323,14 @@
return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
}
-static inline int task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_task_util) {
- u64 demand = p->ravg.demand;
-
- return (demand << 10) / sched_ravg_window;
- }
-#endif
- return p->se.avg.util_avg;
-}
-
static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
+bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
+ int rtg_target_cpu = rtg_target ? cpumask_first(rtg_target) : cpu;
+
return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
cpu_active(cpu) && !cpu_isolated(cpu) &&
+ capacity_orig_of(cpu) >= capacity_orig_of(rtg_target_cpu) &&
task_fits_max(p, cpu);
}
@@ -5918,9 +5838,9 @@
util += boosted_task_util(p);
if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu))
- margin = capacity_margin_down;
+ margin = sysctl_sched_capacity_margin_down;
else
- margin = capacity_margin;
+ margin = sysctl_sched_capacity_margin;
return (capacity_orig_of(cpu) * 1024) > (util * margin);
}
@@ -5948,7 +5868,7 @@
static bool __cpu_overutilized(int cpu, int delta)
{
return (capacity_orig_of(cpu) * 1024) <
- ((cpu_util(cpu) + delta) * capacity_margin);
+ ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
}
bool cpu_overutilized(int cpu)
@@ -6085,10 +6005,14 @@
struct sched_group *fit_group = NULL, *spare_group = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
unsigned long fit_capacity = ULONG_MAX;
- unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE;
+ unsigned long max_spare_capacity;
+
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
+ max_spare_capacity = sysctl_sched_capacity_margin -
+ SCHED_CAPACITY_SCALE;
+
if (sd_flag & SD_BALANCE_WAKE)
load_idx = sd->wake_idx;
@@ -6220,43 +6144,6 @@
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
-/*
- * Implement a for_each_cpu() variant that starts the scan at a given cpu
- * (@start), and wraps around.
- *
- * This is used to scan for idle CPUs; such that not all CPUs looking for an
- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
- * through the LLC domain.
- *
- * Especially tbench is found sensitive to this.
- */
-
-static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
-{
- int next;
-
-again:
- next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
-
- if (*wrapped) {
- if (next >= start)
- return nr_cpumask_bits;
- } else {
- if (next >= nr_cpumask_bits) {
- *wrapped = 1;
- n = -1;
- goto again;
- }
- }
-
- return next;
-}
-
-#define for_each_cpu_wrap(cpu, mask, start, wrap) \
- for ((wrap) = 0, (cpu) = (start)-1; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
- (cpu) < nr_cpumask_bits; )
-
#ifdef CONFIG_SCHED_SMT
static inline void set_idle_cores(int cpu, int val)
@@ -6286,7 +6173,7 @@
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
-void __update_idle_core(struct rq *rq)
+void update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
@@ -6316,17 +6203,14 @@
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
- int core, cpu, wrap;
-
- if (!static_branch_likely(&sched_smt_present))
- return -1;
+ int core, cpu;
if (!test_idle_cores(target, false))
return -1;
cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
- for_each_cpu_wrap(core, cpus, target, wrap) {
+ for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -6354,9 +6238,6 @@
{
int cpu;
- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
@@ -6394,7 +6275,7 @@
u64 avg_cost, avg_idle = this_rq()->avg_idle;
u64 time, cost;
s64 delta;
- int cpu, wrap;
+ int cpu;
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
if (!this_sd)
@@ -6411,7 +6292,7 @@
time = local_clock();
- for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+ for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
if (cpu_isolated(cpu))
@@ -6726,6 +6607,7 @@
bool need_idle;
enum sched_boost_policy placement_boost = task_sched_boost(p) ?
sched_boost_policy() : SCHED_BOOST_NONE;
+ struct related_thread_group *grp;
sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
@@ -6741,22 +6623,17 @@
need_idle = wake_to_idle(p);
- if (sync && bias_to_waker_cpu(p, cpu)) {
+ grp = task_related_thread_group(p);
+ if (grp && grp->preferred_cluster)
+ rtg_target = &grp->preferred_cluster->cpus;
+
+ if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
trace_sched_task_util_bias_to_waker(p, task_cpu(p),
task_util(p), cpu, cpu, 0, need_idle);
return cpu;
}
if (sysctl_sched_is_big_little) {
- struct related_thread_group *grp;
-
- rcu_read_lock();
- grp = task_related_thread_group(p);
- rcu_read_unlock();
-
- if (grp && grp->preferred_cluster)
- rtg_target = &grp->preferred_cluster->cpus;
-
task_util_boosted = boosted_task_util(p);
/*
@@ -7072,12 +6949,12 @@
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
-#ifdef CONFIG_SCHED_HMP
- return select_best_cpu(p, prev_cpu, 0, sync);
-#endif
-
- if (energy_aware())
- return energy_aware_wake_cpu(p, prev_cpu, sync);
+ if (energy_aware()) {
+ rcu_read_lock();
+ new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
+ rcu_read_unlock();
+ return new_cpu;
+ }
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
@@ -7744,9 +7621,6 @@
enum fbq_type fbq_type;
enum group_type busiest_group_type;
struct list_head tasks;
-#ifdef CONFIG_SCHED_HMP
- enum sched_boost_policy boost_policy;
-#endif
};
/*
@@ -7844,9 +7718,6 @@
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
-#ifdef CONFIG_SCHED_HMP
- int twf, group_cpus;
-#endif
lockdep_assert_held(&env->src_rq->lock);
@@ -7917,37 +7788,6 @@
return 0;
#endif
-#ifdef CONFIG_SCHED_HMP
- if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
- if (nr_big_tasks(env->src_rq) && !is_big_task(p))
- return 0;
-
- if (env->boost_policy == SCHED_BOOST_ON_BIG &&
- !task_sched_boost(p))
- return 0;
- }
-
- twf = task_will_fit(p, env->dst_cpu);
-
- /*
- * Attempt to not pull tasks that don't fit. We may get lucky and find
- * one that actually fits.
- */
- if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
- return 0;
-
- /*
- * Group imbalance can sometimes cause work to be pulled across groups
- * even though the group could have managed the imbalance on its own.
- * Prevent inter-cluster migrations for big tasks when the number of
- * tasks is lower than the capacity of the group.
- */
- group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
- SCHED_CAPACITY_SCALE);
- if (!twf && env->busiest_nr_running <= group_cpus)
- return 0;
-#endif
-
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
return 0;
@@ -8288,10 +8128,6 @@
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
-#ifdef CONFIG_SCHED_HMP
- unsigned long sum_nr_big_tasks;
- u64 group_cpu_load; /* Scaled load of all CPUs of the group */
-#endif
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
@@ -8335,10 +8171,6 @@
.avg_load = 0UL,
.sum_nr_running = 0,
.group_type = group_other,
-#ifdef CONFIG_SCHED_HMP
- .sum_nr_big_tasks = 0UL,
- .group_cpu_load = 0ULL,
-#endif
},
};
}
@@ -8430,7 +8262,8 @@
mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
raw_spin_unlock_irqrestore(&mcc->lock, flags);
- pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity);
+ printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
+ cpu, capacity);
goto skip_unlock;
#endif
}
@@ -8668,8 +8501,7 @@
trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
sched_irqload(i),
- power_cost(i, 0),
- cpu_temp(i));
+ power_cost(i, 0));
if (cpu_isolated(i))
continue;
@@ -8688,11 +8520,6 @@
if (nr_running > 1)
*overload = true;
-#ifdef CONFIG_SCHED_HMP
- sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
- sgs->group_cpu_load += cpu_load(i);
-#endif
-
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -8754,9 +8581,6 @@
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
- if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
- return true;
-
if (sgs->group_type > busiest->group_type)
return true;
@@ -9220,9 +9044,6 @@
if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE)
goto force_balance;
- if (bail_inter_cluster_balance(env, &sds))
- goto out_balanced;
-
sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
/ sds.total_capacity;
@@ -9301,10 +9122,6 @@
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
-#ifdef CONFIG_SCHED_HMP
- return find_busiest_queue_hmp(env, group);
-#endif
-
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
enum fbq_type rt;
@@ -9510,9 +9327,6 @@
.loop = 0,
.busiest_nr_running = 0,
.busiest_grp_capacity = 0,
-#ifdef CONFIG_SCHED_HMP
- .boost_policy = sched_boost_policy(),
-#endif
};
/*
@@ -9719,21 +9533,9 @@
sd->nr_balance_failed = sd->cache_nice_tries +
NEED_ACTIVE_BALANCE_THRESHOLD - 1;
}
- } else {
+ } else
sd->nr_balance_failed = 0;
- /* Assumes one 'busiest' cpu that we pulled tasks from */
- if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
- int check_groups = !!(env.flags &
- LBF_MOVED_RELATED_THREAD_GROUP_TASK);
-
- check_for_freq_change(this_rq, false, check_groups);
- check_for_freq_change(busiest, false, check_groups);
- } else {
- check_for_freq_change(this_rq, true, false);
- }
- }
-
if (likely(!active_balance)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
@@ -9968,9 +9770,6 @@
.busiest_grp_capacity = 0,
.flags = 0,
.loop = 0,
-#ifdef CONFIG_SCHED_HMP
- .boost_policy = sched_boost_policy(),
-#endif
};
bool moved = false;
@@ -10055,15 +9854,6 @@
local_irq_enable();
- if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
- int check_groups = !!(env.flags &
- LBF_MOVED_RELATED_THREAD_GROUP_TASK);
- check_for_freq_change(busiest_rq, false, check_groups);
- check_for_freq_change(target_rq, false, check_groups);
- } else if (moved) {
- check_for_freq_change(target_rq, true, false);
- }
-
return 0;
}
@@ -10093,15 +9883,13 @@
struct rq *rq = cpu_rq(cpu);
cpumask_t cpumask;
-#ifdef CONFIG_SCHED_HMP
- return find_new_hmp_ilb(type);
-#endif
-
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(rq->sd);
if (sd) {
cpumask_and(&cpumask, nohz.idle_cpus_mask,
sched_domain_span(sd));
+ cpumask_andnot(&cpumask, &cpumask,
+ cpu_isolated_mask);
ilb = cpumask_first(&cpumask);
}
rcu_read_unlock();
@@ -10110,8 +9898,11 @@
if (!energy_aware() ||
(capacity_orig_of(cpu) ==
cpu_rq(cpu)->rd->max_cpu_capacity.val ||
- cpu_overutilized(cpu)))
- ilb = cpumask_first(nohz.idle_cpus_mask);
+ cpu_overutilized(cpu))) {
+ cpumask_andnot(&cpumask, nohz.idle_cpus_mask,
+ cpu_isolated_mask);
+ ilb = cpumask_first(&cpumask);
+ }
}
if (ilb < nr_cpu_ids && idle_cpu(ilb))
@@ -10148,21 +9939,16 @@
return;
}
-void nohz_balance_clear_nohz_mask(int cpu)
-{
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- }
-}
-
void nohz_balance_exit_idle(unsigned int cpu)
{
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
/*
* Completely isolated CPUs don't ever set, so we must test.
*/
- nohz_balance_clear_nohz_mask(cpu);
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+ }
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
}
@@ -10432,13 +10218,12 @@
static inline bool nohz_kick_needed(struct rq *rq, int *type)
{
unsigned long now = jiffies;
-#ifndef CONFIG_SCHED_HMP
struct sched_domain_shared *sds;
struct sched_domain *sd;
int nr_busy;
-#endif
int cpu = rq->cpu;
bool kick = false;
+ cpumask_t cpumask;
if (unlikely(rq->idle_balance))
return false;
@@ -10454,20 +10239,16 @@
* None are in tickless mode and hence no need for NOHZ idle load
* balancing.
*/
- if (likely(!atomic_read(&nohz.nr_cpus)))
+ cpumask_andnot(&cpumask, nohz.idle_cpus_mask, cpu_isolated_mask);
+ if (cpumask_empty(&cpumask))
return false;
-#ifdef CONFIG_SCHED_HMP
- return _nohz_kick_needed_hmp(rq, cpu, type);
-#endif
-
if (time_before(now, nohz.next_balance))
return false;
if (energy_aware())
return rq->nr_running >= 2 && cpu_overutilized(cpu);
-#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (sds && !energy_aware()) {
@@ -10493,15 +10274,13 @@
}
sd = rcu_dereference(per_cpu(sd_asym, cpu));
- if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
- sched_domain_span(sd)) < cpu)) {
+ if (sd && (cpumask_first_and(&cpumask, sched_domain_span(sd)) < cpu)) {
kick = true;
goto unlock;
}
unlock:
rcu_read_unlock();
-#endif
return kick;
}
#else
@@ -10519,6 +10298,13 @@
CPU_IDLE : CPU_NOT_IDLE;
/*
+ * Since core isolation doesn't update nohz.idle_cpus_mask, there
+ * is a possibility this nohz kicked cpu could be isolated. Hence
+ * return if the cpu is isolated.
+ */
+ if (cpu_isolated(this_rq->cpu))
+ return;
+ /*
* If this cpu has a pending nohz_balance_kick, then do the
* balancing on behalf of the other idle cpus whose ticks are
* stopped. Do nohz_idle_balance *before* rebalance_domains to
@@ -10598,7 +10384,7 @@
rq->misfit_task = misfit;
if (old_misfit != misfit) {
- adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
+ walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
curr->misfit = misfit;
}
#endif
@@ -11063,7 +10849,7 @@
.task_change_group = task_change_group_fair,
#endif
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair,
+ .fixup_walt_sched_stats = walt_fixup_sched_stats_fair,
#endif
};
@@ -11113,70 +10899,68 @@
}
/* WALT sched implementation begins here */
+#ifdef CONFIG_SCHED_WALT
-#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
-static inline struct task_group *next_task_group(struct task_group *tg)
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
{
- tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
- return (&tg->list == &task_groups) ? NULL : tg;
+ cfs_rq->walt_stats.nr_big_tasks = 0;
+ cfs_rq->walt_stats.cumulative_runnable_avg = 0;
+ cfs_rq->walt_stats.pred_demands_sum = 0;
}
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu) \
- for (tg = container_of(&task_groups, struct task_group, list); \
- ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
{
- struct task_group *tg;
- struct cfs_rq *cfs_rq;
-
- rcu_read_lock();
-
- for_each_cfs_rq(cfs_rq, tg, cpu)
- reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
- rcu_read_unlock();
+ inc_nr_big_task(&cfs_rq->walt_stats, p);
+ fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, p->ravg.demand,
+ p->ravg.pred_demand);
}
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
{
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
+ dec_nr_big_task(&cfs_rq->walt_stats, p);
+ fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, -(s64)p->ravg.demand,
+ -(s64)p->ravg.pred_demand);
+}
+
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+ struct cfs_rq *tcfs_rq)
+{
+ struct rq *rq = rq_of(tcfs_rq);
+
+ stats->nr_big_tasks += tcfs_rq->walt_stats.nr_big_tasks;
+ fixup_cumulative_runnable_avg(stats,
+ tcfs_rq->walt_stats.cumulative_runnable_avg,
+ tcfs_rq->walt_stats.pred_demands_sum);
+
+ if (stats == &rq->walt_stats)
+ walt_fixup_cum_window_demand(rq,
+ tcfs_rq->walt_stats.cumulative_runnable_avg);
+
+}
+
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+ struct cfs_rq *tcfs_rq)
+{
+ struct rq *rq = rq_of(tcfs_rq);
+
+ stats->nr_big_tasks -= tcfs_rq->walt_stats.nr_big_tasks;
+ fixup_cumulative_runnable_avg(stats,
+ -tcfs_rq->walt_stats.cumulative_runnable_avg,
+ -tcfs_rq->walt_stats.pred_demands_sum);
/*
- * Although below check is not strictly required (as
- * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
- * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
- * efficiency by short-circuiting for_each_sched_entity() loop when
- * sched_disable_window_stats
+ * We remove the throttled cfs_rq's tasks's contribution from the
+ * cumulative window demand so that the same can be added
+ * unconditionally when the cfs_rq is unthrottled.
*/
- if (sched_disable_window_stats)
- return;
-
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
- if (cfs_rq_throttled(cfs_rq))
- break;
- }
-
- /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
- if (!se)
- inc_rq_hmp_stats(rq, p, change_cra);
+ if (stats == &rq->walt_stats)
+ walt_fixup_cum_window_demand(rq,
+ -tcfs_rq->walt_stats.cumulative_runnable_avg);
}
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand)
{
struct cfs_rq *cfs_rq;
@@ -11187,1069 +10971,43 @@
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+ fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
task_load_delta,
pred_demand_delta);
- fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
if (cfs_rq_throttled(cfs_rq))
break;
}
- /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ /* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
if (!se) {
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+ fixup_cumulative_runnable_avg(&rq->walt_stats,
task_load_delta,
pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+ walt_fixup_cum_window_demand(rq, task_load_delta);
}
}
-#elif defined(CONFIG_SCHED_WALT)
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+ int delta, bool inc)
{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
- fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
-}
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
-static inline int task_will_be_throttled(struct task_struct *p)
-{
- return 0;
-}
+ cfs_rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
+ BUG_ON(cfs_rq->walt_stats.nr_big_tasks < 0);
-void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&rq->hmp_stats, p);
-}
-
-static inline int
-kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
-{
- unsigned long flags;
- int rc = 0;
-
- /* Invoke active balance to force migrate currently running task */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (!rq->active_balance) {
- rq->active_balance = 1;
- rq->push_cpu = new_cpu;
- get_task_struct(p);
- rq->push_task = p;
- rc = 1;
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return rc;
-}
-
-#else
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
- return 0;
-}
-
-#endif
-
-/* QHMP/Zone sched implementation begins here */
-
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_SMP
-
-/* CPU selection flag */
-#define SBC_FLAG_PREV_CPU 0x1
-#define SBC_FLAG_BEST_CAP_CPU 0x2
-#define SBC_FLAG_CPU_COST 0x4
-#define SBC_FLAG_MIN_COST 0x8
-#define SBC_FLAG_IDLE_LEAST_LOADED 0x10
-#define SBC_FLAG_IDLE_CSTATE 0x20
-#define SBC_FLAG_COST_CSTATE_TIE_BREAKER 0x40
-#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER 0x80
-#define SBC_FLAG_CSTATE_LOAD 0x100
-#define SBC_FLAG_BEST_SIBLING 0x200
-#define SBC_FLAG_WAKER_CPU 0x400
-#define SBC_FLAG_PACK_TASK 0x800
-
-/* Cluster selection flag */
-#define SBC_FLAG_COLOC_CLUSTER 0x10000
-#define SBC_FLAG_WAKER_CLUSTER 0x20000
-#define SBC_FLAG_BACKUP_CLUSTER 0x40000
-#define SBC_FLAG_BOOST_CLUSTER 0x80000
-
-struct cpu_select_env {
- struct task_struct *p;
- struct related_thread_group *rtg;
- u8 reason;
- u8 need_idle:1;
- u8 need_waker_cluster:1;
- u8 sync:1;
- enum sched_boost_policy boost_policy;
- u8 pack_task:1;
- int prev_cpu;
- DECLARE_BITMAP(candidate_list, NR_CPUS);
- DECLARE_BITMAP(backup_list, NR_CPUS);
- u64 task_load;
- u64 cpu_load;
- u32 sbc_best_flag;
- u32 sbc_best_cluster_flag;
- struct cpumask search_cpus;
-};
-
-struct cluster_cpu_stats {
- int best_idle_cpu, least_loaded_cpu;
- int best_capacity_cpu, best_cpu, best_sibling_cpu;
- int min_cost, best_sibling_cpu_cost;
- int best_cpu_wakeup_latency;
- u64 min_load, best_load, best_sibling_cpu_load;
- s64 highest_spare_capacity;
-};
-
-static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
-{
- u64 total_load;
-
- total_load = env->task_load + env->cpu_load;
-
- if (total_load > sched_spill_load ||
- (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
- return 1;
-
- return 0;
-}
-
-static int skip_cpu(int cpu, struct cpu_select_env *env)
-{
- int tcpu = task_cpu(env->p);
- int skip = 0;
-
- if (!env->reason)
- return 0;
-
- if (is_reserved(cpu))
- return 1;
-
- switch (env->reason) {
- case UP_MIGRATION:
- skip = !idle_cpu(cpu);
- break;
- case IRQLOAD_MIGRATION:
- /* Purposely fall through */
- default:
- skip = (cpu == tcpu);
- break;
- }
-
- return skip;
-}
-
-static inline int
-acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
- int tcpu;
-
- if (!env->reason)
- return 1;
-
- tcpu = task_cpu(env->p);
- switch (env->reason) {
- case UP_MIGRATION:
- return cluster->capacity > cpu_capacity(tcpu);
-
- case DOWN_MIGRATION:
- return cluster->capacity < cpu_capacity(tcpu);
-
- default:
- break;
- }
-
- return 1;
-}
-
-static int
-skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
- if (!test_bit(cluster->id, env->candidate_list))
- return 1;
-
- if (!acceptable_capacity(cluster, env)) {
- __clear_bit(cluster->id, env->candidate_list);
- return 1;
- }
-
- return 0;
-}
-
-static struct sched_cluster *
-select_least_power_cluster(struct cpu_select_env *env)
-{
- struct sched_cluster *cluster;
-
- if (env->rtg) {
- int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
-
- env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
-
- if (task_load_will_fit(env->p, env->task_load,
- cpu, env->boost_policy)) {
- env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
-
- if (env->boost_policy == SCHED_BOOST_NONE)
- return env->rtg->preferred_cluster;
-
- for_each_sched_cluster(cluster) {
- if (cluster != env->rtg->preferred_cluster) {
- __set_bit(cluster->id,
- env->backup_list);
- __clear_bit(cluster->id,
- env->candidate_list);
- }
- }
-
- return env->rtg->preferred_cluster;
- }
-
- /*
- * Since the task load does not fit on the preferred
- * cluster anymore, pretend that the task does not
- * have any preferred cluster. This allows the waking
- * task to get the appropriate CPU it needs as per the
- * non co-location placement policy without having to
- * wait until the preferred cluster is updated.
- */
- env->rtg = NULL;
- }
-
- for_each_sched_cluster(cluster) {
- if (!skip_cluster(cluster, env)) {
- int cpu = cluster_first_cpu(cluster);
-
- env->task_load = scale_load_to_cpu(task_load(env->p),
- cpu);
- if (task_load_will_fit(env->p, env->task_load, cpu,
- env->boost_policy))
- return cluster;
-
- __set_bit(cluster->id, env->backup_list);
- __clear_bit(cluster->id, env->candidate_list);
- }
- }
-
- return NULL;
-}
-
-static struct sched_cluster *
-next_candidate(const unsigned long *list, int start, int end)
-{
- int cluster_id;
-
- cluster_id = find_next_bit(list, end, start - 1 + 1);
- if (cluster_id >= end)
- return NULL;
-
- return sched_cluster[cluster_id];
-}
-
-static void
-update_spare_capacity(struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu, int capacity,
- u64 cpu_load)
-{
- s64 spare_capacity = sched_ravg_window - cpu_load;
-
- if (spare_capacity > 0 &&
- (spare_capacity > stats->highest_spare_capacity ||
- (spare_capacity == stats->highest_spare_capacity &&
- ((!env->need_waker_cluster &&
- capacity > cpu_capacity(stats->best_capacity_cpu)) ||
- (env->need_waker_cluster &&
- cpu_rq(cpu)->nr_running <
- cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
- /*
- * If sync waker is the only runnable of CPU, cr_avg of the
- * CPU is 0 so we have high chance to place the wakee on the
- * waker's CPU which likely causes preemtion of the waker.
- * This can lead migration of preempted waker. Place the
- * wakee on the real idle CPU when it's possible by checking
- * nr_running to avoid such preemption.
- */
- stats->highest_spare_capacity = spare_capacity;
- stats->best_capacity_cpu = cpu;
- }
-}
-
-static inline void find_backup_cluster(
-struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- struct sched_cluster *next = NULL;
- int i;
- struct cpumask search_cpus;
-
- while (!bitmap_empty(env->backup_list, num_clusters)) {
- next = next_candidate(env->backup_list, 0, num_clusters);
- __clear_bit(next->id, env->backup_list);
-
- cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
- for_each_cpu(i, &search_cpus) {
- trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
- sched_irqload(i), power_cost(i, task_load(env->p) +
- cpu_cravg_sync(i, env->sync)), 0);
-
- update_spare_capacity(stats, env, i, next->capacity,
- cpu_load_sync(i, env->sync));
- }
- env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
- }
-}
-
-struct sched_cluster *
-next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
- struct cluster_cpu_stats *stats)
-{
- struct sched_cluster *next = NULL;
-
- __clear_bit(cluster->id, env->candidate_list);
-
- if (env->rtg && preferred_cluster(cluster, env->p))
- return NULL;
-
- do {
- if (bitmap_empty(env->candidate_list, num_clusters))
- return NULL;
-
- next = next_candidate(env->candidate_list, 0, num_clusters);
- if (next) {
- if (next->min_power_cost > stats->min_cost) {
- clear_bit(next->id, env->candidate_list);
- next = NULL;
- continue;
- }
-
- if (skip_cluster(next, env))
- next = NULL;
- }
- } while (!next);
-
- env->task_load = scale_load_to_cpu(task_load(env->p),
- cluster_first_cpu(next));
- return next;
-}
-
-#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu_cost)
-{
- int wakeup_latency;
- int prev_cpu = env->prev_cpu;
-
- wakeup_latency = cpu_rq(cpu)->wakeup_latency;
-
- if (env->need_idle) {
- stats->min_cost = cpu_cost;
- if (idle_cpu(cpu)) {
- if (wakeup_latency < stats->best_cpu_wakeup_latency ||
- (wakeup_latency == stats->best_cpu_wakeup_latency &&
- cpu == prev_cpu)) {
- stats->best_idle_cpu = cpu;
- stats->best_cpu_wakeup_latency = wakeup_latency;
- }
- } else {
- if (env->cpu_load < stats->min_load ||
- (env->cpu_load == stats->min_load &&
- cpu == prev_cpu)) {
- stats->least_loaded_cpu = cpu;
- stats->min_load = env->cpu_load;
- }
- }
-
- return;
- }
-
- if (cpu_cost < stats->min_cost) {
- stats->min_cost = cpu_cost;
- stats->best_cpu_wakeup_latency = wakeup_latency;
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_CPU_COST;
- return;
- }
-
- /* CPU cost is the same. Start breaking the tie by C-state */
-
- if (wakeup_latency > stats->best_cpu_wakeup_latency)
- return;
-
- if (wakeup_latency < stats->best_cpu_wakeup_latency) {
- stats->best_cpu_wakeup_latency = wakeup_latency;
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
- return;
- }
-
- /* C-state is the same. Use prev CPU to break the tie */
- if (cpu == prev_cpu) {
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
- return;
- }
-
- if (stats->best_cpu != prev_cpu &&
- ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
- (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
- stats->best_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
- }
-}
-#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env, int cpu_cost)
-{
- int prev_cpu = env->prev_cpu;
-
- if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
- if (stats->best_sibling_cpu_cost > cpu_cost ||
- (stats->best_sibling_cpu_cost == cpu_cost &&
- stats->best_sibling_cpu_load > env->cpu_load)) {
- stats->best_sibling_cpu_cost = cpu_cost;
- stats->best_sibling_cpu_load = env->cpu_load;
- stats->best_sibling_cpu = cpu;
- }
- }
-
- if ((cpu_cost < stats->min_cost) ||
- ((stats->best_cpu != prev_cpu &&
- stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
- if (env->need_idle) {
- if (idle_cpu(cpu)) {
- stats->min_cost = cpu_cost;
- stats->best_idle_cpu = cpu;
- }
- } else {
- stats->min_cost = cpu_cost;
- stats->min_load = env->cpu_load;
- stats->best_cpu = cpu;
- env->sbc_best_flag = SBC_FLAG_MIN_COST;
- }
- }
-}
-#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-
-static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
- struct cpu_select_env *env)
-{
- int cpu_cost;
-
- /*
- * We try to find the least loaded *busy* CPU irrespective
- * of the power cost.
- */
- if (env->pack_task)
- cpu_cost = cpu_min_power_cost(cpu);
-
- else
- cpu_cost = power_cost(cpu, task_load(env->p) +
- cpu_cravg_sync(cpu, env->sync));
-
- if (cpu_cost <= stats->min_cost)
- __update_cluster_stats(cpu, stats, env, cpu_cost);
-}
-
-static void find_best_cpu_in_cluster(struct sched_cluster *c,
- struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- int i;
- struct cpumask search_cpus;
-
- cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
-
- env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
-
- for_each_cpu(i, &search_cpus) {
- env->cpu_load = cpu_load_sync(i, env->sync);
-
- trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
- sched_irqload(i),
- power_cost(i, task_load(env->p) +
- cpu_cravg_sync(i, env->sync)), 0);
-
- if (skip_cpu(i, env))
- continue;
-
- update_spare_capacity(stats, env, i, c->capacity,
- env->cpu_load);
-
- /*
- * need_idle takes precedence over sched boost but when both
- * are set, idlest CPU with in all the clusters is selected
- * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
- * big cluster is selected within boost_policy = BOOST_ON_BIG.
- */
- if ((!env->need_idle &&
- env->boost_policy != SCHED_BOOST_NONE) ||
- env->need_waker_cluster ||
- sched_cpu_high_irqload(i) ||
- spill_threshold_crossed(env, cpu_rq(i)))
- continue;
-
- update_cluster_stats(i, stats, env);
- }
-}
-
-static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
-{
- stats->best_cpu = stats->best_idle_cpu = -1;
- stats->best_capacity_cpu = stats->best_sibling_cpu = -1;
- stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
- stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
- stats->highest_spare_capacity = 0;
- stats->least_loaded_cpu = -1;
- stats->best_cpu_wakeup_latency = INT_MAX;
- /* No need to initialize stats->best_load */
-}
-
-static inline bool env_has_special_flags(struct cpu_select_env *env)
-{
- if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
- env->reason)
- return true;
-
- return false;
-}
-
-static inline bool
-bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
- int prev_cpu;
- struct task_struct *task = env->p;
- struct sched_cluster *cluster;
-
- if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
- return false;
-
- prev_cpu = env->prev_cpu;
- if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
- return false;
-
- if (task->ravg.mark_start - task->last_cpu_selected_ts >=
- sched_long_cpu_selection_threshold)
- return false;
-
- /*
- * This function should be used by task wake up path only as it's
- * assuming p->last_switch_out_ts as last sleep time.
- * p->last_switch_out_ts can denote last preemption time as well as
- * last sleep time.
- */
- if (task->ravg.mark_start - task->last_switch_out_ts >=
- sched_short_sleep_task_threshold)
- return false;
-
- env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
- cluster = cpu_rq(prev_cpu)->cluster;
-
- if (!task_load_will_fit(task, env->task_load, prev_cpu,
- sched_boost_policy())) {
-
- __set_bit(cluster->id, env->backup_list);
- __clear_bit(cluster->id, env->candidate_list);
- return false;
- }
-
- env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
- if (sched_cpu_high_irqload(prev_cpu) ||
- spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
- update_spare_capacity(stats, env, prev_cpu,
- cluster->capacity, env->cpu_load);
- cpumask_clear_cpu(prev_cpu, &env->search_cpus);
- return false;
- }
-
- return true;
-}
-
-static inline bool
-wake_to_waker_cluster(struct cpu_select_env *env)
-{
- return env->sync &&
- task_load(current) > sched_big_waker_task_load &&
- task_load(env->p) < sched_small_wakee_task_load;
-}
-
-static inline bool
-bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
-{
- return sysctl_sched_prefer_sync_wakee_to_waker &&
- cpu_rq(cpu)->nr_running == 1 &&
- cpumask_test_cpu(cpu, &env->search_cpus);
-}
-
-static inline int
-cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
-{
- return cpumask_intersects(&env->search_cpus, &cluster->cpus);
-}
-
-/* return cheapest cpu that can fit this task */
-static int select_best_cpu(struct task_struct *p, int target, int reason,
- int sync)
-{
- struct sched_cluster *cluster, *pref_cluster = NULL;
- struct cluster_cpu_stats stats;
- struct related_thread_group *grp;
- unsigned int sbc_flag = 0;
- int cpu = raw_smp_processor_id();
- bool special;
-
- struct cpu_select_env env = {
- .p = p,
- .reason = reason,
- .need_idle = wake_to_idle(p),
- .need_waker_cluster = 0,
- .sync = sync,
- .prev_cpu = target,
- .rtg = NULL,
- .sbc_best_flag = 0,
- .sbc_best_cluster_flag = 0,
- .pack_task = false,
- };
-
- env.boost_policy = task_sched_boost(p) ?
- sched_boost_policy() : SCHED_BOOST_NONE;
-
- bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
- bitmap_zero(env.backup_list, NR_CPUS);
-
- cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
- cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
-
- init_cluster_cpu_stats(&stats);
- special = env_has_special_flags(&env);
-
- rcu_read_lock();
-
- grp = task_related_thread_group(p);
-
- if (grp && grp->preferred_cluster) {
- pref_cluster = grp->preferred_cluster;
- if (!cluster_allowed(&env, pref_cluster))
- clear_bit(pref_cluster->id, env.candidate_list);
- else
- env.rtg = grp;
- } else if (!special) {
- cluster = cpu_rq(cpu)->cluster;
- if (wake_to_waker_cluster(&env)) {
- if (bias_to_waker_cpu(&env, cpu)) {
- target = cpu;
- sbc_flag = SBC_FLAG_WAKER_CLUSTER |
- SBC_FLAG_WAKER_CPU;
- goto out;
- } else if (cluster_allowed(&env, cluster)) {
- env.need_waker_cluster = 1;
- bitmap_zero(env.candidate_list, NR_CPUS);
- __set_bit(cluster->id, env.candidate_list);
- env.sbc_best_cluster_flag =
- SBC_FLAG_WAKER_CLUSTER;
- }
- } else if (bias_to_prev_cpu(&env, &stats)) {
- sbc_flag = SBC_FLAG_PREV_CPU;
- goto out;
- }
- }
-
- if (!special && is_short_burst_task(p)) {
- env.pack_task = true;
- sbc_flag = SBC_FLAG_PACK_TASK;
- }
-retry:
- cluster = select_least_power_cluster(&env);
-
- if (!cluster)
- goto out;
-
- /*
- * 'cluster' now points to the minimum power cluster which can satisfy
- * task's perf goals. Walk down the cluster list starting with that
- * cluster. For non-small tasks, skip clusters that don't have
- * mostly_idle/idle cpus
- */
-
- do {
- find_best_cpu_in_cluster(cluster, &env, &stats);
-
- } while ((cluster = next_best_cluster(cluster, &env, &stats)));
-
- if (env.need_idle) {
- if (stats.best_idle_cpu >= 0) {
- target = stats.best_idle_cpu;
- sbc_flag |= SBC_FLAG_IDLE_CSTATE;
- } else if (stats.least_loaded_cpu >= 0) {
- target = stats.least_loaded_cpu;
- sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
- }
- } else if (stats.best_cpu >= 0) {
- if (stats.best_cpu != task_cpu(p) &&
- stats.min_cost == stats.best_sibling_cpu_cost) {
- stats.best_cpu = stats.best_sibling_cpu;
- sbc_flag |= SBC_FLAG_BEST_SIBLING;
- }
- sbc_flag |= env.sbc_best_flag;
- target = stats.best_cpu;
- } else {
- if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
- env.rtg = NULL;
- goto retry;
- }
-
- /*
- * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
- * backup_list = little cluster, candidate_list = none and
- * stats->best_capacity_cpu points the best spare capacity
- * CPU among the CPUs in the big cluster.
- */
- if (env.boost_policy == SCHED_BOOST_ON_BIG &&
- stats.best_capacity_cpu >= 0)
- sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
- else
- find_backup_cluster(&env, &stats);
-
- if (stats.best_capacity_cpu >= 0) {
- target = stats.best_capacity_cpu;
- sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
- }
- }
- p->last_cpu_selected_ts = sched_ktime_clock();
-out:
- sbc_flag |= env.sbc_best_cluster_flag;
- rcu_read_unlock();
- trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
- env.reason, env.sync, env.need_idle, sbc_flag, target);
- return target;
-}
-
-/*
- * Reset balance_interval at all sched_domain levels of given cpu, so that it
- * honors kick.
- */
-static inline void reset_balance_interval(int cpu)
-{
- struct sched_domain *sd;
-
- if (cpu >= nr_cpu_ids)
- return;
-
- rcu_read_lock();
- for_each_domain(cpu, sd)
- sd->balance_interval = 0;
- rcu_read_unlock();
-}
-
-/*
- * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
- * cpu as per its demand or priority)
- *
- * Returns reason why task needs to be migrated
- */
-static inline int migration_needed(struct task_struct *p, int cpu)
-{
- int nice;
- struct related_thread_group *grp;
-
- if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
- return 0;
-
- /* No need to migrate task that is about to be throttled */
- if (task_will_be_throttled(p))
- return 0;
-
- if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
- cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
- return UP_MIGRATION;
-
- if (sched_cpu_high_irqload(cpu))
- return IRQLOAD_MIGRATION;
-
- nice = task_nice(p);
- rcu_read_lock();
- grp = task_related_thread_group(p);
- /*
- * Don't assume higher capacity means higher power. If the task
- * is running on the power efficient CPU, avoid migrating it
- * to a lower capacity cluster.
- */
- if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
- upmigrate_discouraged(p)) &&
- cpu_capacity(cpu) > min_capacity &&
- cpu_max_power_cost(cpu) == max_power_cost) {
- rcu_read_unlock();
- return DOWN_MIGRATION;
- }
-
- if (!task_will_fit(p, cpu)) {
- rcu_read_unlock();
- return UP_MIGRATION;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static DEFINE_RAW_SPINLOCK(migration_lock);
-
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
- int cpu = cpu_of(rq), new_cpu;
- int active_balance = 0, reason;
-
- reason = migration_needed(p, cpu);
- if (!reason)
- return;
-
- raw_spin_lock(&migration_lock);
- new_cpu = select_best_cpu(p, cpu, reason, 0);
-
- if (new_cpu != cpu) {
- active_balance = kick_active_balance(rq, p, new_cpu);
- if (active_balance)
- mark_reserved(new_cpu);
- }
-
- raw_spin_unlock(&migration_lock);
-
- if (active_balance)
- stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
- &rq->active_balance_work);
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
-{
- cfs_rq->hmp_stats.nr_big_tasks = 0;
- cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
- cfs_rq->hmp_stats.pred_demands_sum = 0;
-}
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- dec_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg +=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
-}
-
-static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg -=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
-
- BUG_ON(stats->nr_big_tasks < 0 ||
- (s64)stats->cumulative_runnable_avg < 0);
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
- int local_cpu, busiest_cpu;
- int local_capacity, busiest_capacity;
- int local_pwr_cost, busiest_pwr_cost;
- int nr_cpus;
- int boost = sched_boost();
-
- if (!sysctl_sched_restrict_cluster_spill ||
- boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
- return 0;
-
- local_cpu = group_first_cpu(sds->local);
- busiest_cpu = group_first_cpu(sds->busiest);
-
- local_capacity = cpu_max_possible_capacity(local_cpu);
- busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
-
- local_pwr_cost = cpu_max_power_cost(local_cpu);
- busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
-
- if (local_pwr_cost <= busiest_pwr_cost)
- return 0;
-
- if (local_capacity > busiest_capacity &&
- sds->busiest_stat.sum_nr_big_tasks)
- return 0;
-
- nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
- if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
- (sds->busiest_stat.sum_nr_running <
- nr_cpus * sysctl_sched_spill_nr_run))
- return 1;
-
- return 0;
-}
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs)
-{
- if (env->idle != CPU_NOT_IDLE &&
- cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
- if (sgs->sum_nr_big_tasks >
- sds->busiest_stat.sum_nr_big_tasks) {
- env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
- return true;
- }
- }
-
- return false;
-}
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
- struct sched_group *group)
-{
- struct rq *busiest = NULL, *busiest_big = NULL;
- u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
- int max_nr_big = 0, nr_big;
- bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
- int i;
- cpumask_t cpus;
-
- cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
-
- for_each_cpu(i, &cpus) {
- struct rq *rq = cpu_rq(i);
- u64 cumulative_runnable_avg =
- rq->hmp_stats.cumulative_runnable_avg;
-
- if (!cpumask_test_cpu(i, env->cpus))
- continue;
-
-
- if (find_big) {
- nr_big = nr_big_tasks(rq);
- if (nr_big > max_nr_big ||
- (nr_big > 0 && nr_big == max_nr_big &&
- cumulative_runnable_avg > max_runnable_avg_big)) {
- max_runnable_avg_big = cumulative_runnable_avg;
- busiest_big = rq;
- max_nr_big = nr_big;
- continue;
- }
- }
-
- if (cumulative_runnable_avg > max_runnable_avg) {
- max_runnable_avg = cumulative_runnable_avg;
- busiest = rq;
- }
- }
-
- if (busiest_big)
- return busiest_big;
-
- env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
- return busiest;
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-static inline int find_new_hmp_ilb(int type)
-{
- int call_cpu = raw_smp_processor_id();
- struct sched_domain *sd;
- int ilb;
-
- rcu_read_lock();
-
- /* Pick an idle cpu "closest" to call_cpu */
- for_each_domain(call_cpu, sd) {
- for_each_cpu_and(ilb, nohz.idle_cpus_mask,
- sched_domain_span(sd)) {
- if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
- cpu_max_power_cost(ilb) <=
- cpu_max_power_cost(call_cpu))) {
- rcu_read_unlock();
- reset_balance_interval(ilb);
- return ilb;
- }
- }
- }
-
- rcu_read_unlock();
- return nr_cpu_ids;
-}
-
-static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
-{
- struct sched_domain *sd;
- int i;
-
- if (rq->nr_running < 2)
- return 0;
-
- if (!sysctl_sched_restrict_cluster_spill ||
- sched_boost_policy() == SCHED_BOOST_ON_ALL)
- return 1;
-
- if (cpu_max_power_cost(cpu) == max_power_cost)
- return 1;
-
- rcu_read_lock();
- sd = rcu_dereference_check_sched_domain(rq->sd);
- if (!sd) {
- rcu_read_unlock();
- return 0;
- }
-
- for_each_cpu(i, sched_domain_span(sd)) {
- if (cpu_load(i) < sched_spill_load &&
- cpu_rq(i)->nr_running <
- sysctl_sched_spill_nr_run) {
- /* Change the kick type to limit to CPUs that
- * are of equal or lower capacity.
- */
- *type = NOHZ_KICK_RESTRICT;
+ if (cfs_rq_throttled(cfs_rq))
break;
- }
}
- rcu_read_unlock();
- return 1;
-}
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-#ifdef CONFIG_CFS_BANDWIDTH
+ /* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
+ if (!se)
+ walt_adjust_nr_big_tasks(rq, delta, inc);
+}
+
/*
* Check if task is part of a hierarchy where some cfs_rq does not have any
* runtime left.
@@ -12276,10 +11034,49 @@
return 0;
}
+
+#else /* CONFIG_CFS_BANDWIDTH */
+
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ fixup_walt_sched_stats_common(rq, p, new_task_load, new_pred_demand);
+}
+
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+ int delta, bool inc)
+{
+ walt_adjust_nr_big_tasks(rq, delta, inc);
+}
+
+static int task_will_be_throttled(struct task_struct *p)
+{
+ return false;
+}
+
#endif /* CONFIG_CFS_BANDWIDTH */
-#elif defined(CONFIG_SCHED_WALT)
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int new_cpu;
@@ -12291,17 +11088,26 @@
rq->curr->nr_cpus_allowed == 1)
return;
+ if (task_will_be_throttled(p))
+ return;
+
+ raw_spin_lock(&migration_lock);
+ rcu_read_lock();
new_cpu = energy_aware_wake_cpu(p, cpu, 0);
+ rcu_read_unlock();
if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance) {
mark_reserved(new_cpu);
+ raw_spin_unlock(&migration_lock);
stop_one_cpu_nowait(cpu,
active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
+ return;
}
}
+ raw_spin_unlock(&migration_lock);
}
}
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
deleted file mode 100644
index 24b60d7..0000000
--- a/kernel/sched/hmp.c
+++ /dev/null
@@ -1,1639 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
- * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
- * and Vikram Mulukutla
- */
-
-#include <linux/cpufreq.h>
-#include <linux/list_sort.h>
-#include <linux/syscore_ops.h>
-
-#include "sched.h"
-#include "walt.h"
-
-#include <trace/events/sched.h>
-
-#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
-
-inline void clear_ed_task(struct task_struct *p, struct rq *rq)
-{
- if (p == rq->ed_task)
- rq->ed_task = NULL;
-}
-
-inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
-{
- p->last_switch_out_ts = wallclock;
-}
-
-/*
- * Note C-state for (idle) cpus.
- *
- * @cstate = cstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cpu
- * @wakeup_latency = latency to wakeup from cstate
- *
- */
-void
-sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->cstate = cstate; /* C1, C2 etc */
- rq->wakeup_energy = wakeup_energy;
- /* disregard small latency delta (64 us). */
- rq->wakeup_latency = ((wakeup_latency >>
- CSTATE_LATENCY_GRANULARITY_SHIFT) <<
- CSTATE_LATENCY_GRANULARITY_SHIFT);
-}
-
-/*
- * Note D-state for (idle) cluster.
- *
- * @dstate = dstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cluster
- * @wakeup_latency = latency to wakeup from cluster
- *
- */
-void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
- int wakeup_energy, int wakeup_latency)
-{
- struct sched_cluster *cluster =
- cpu_rq(cpumask_first(cluster_cpus))->cluster;
- cluster->dstate = dstate;
- cluster->dstate_wakeup_energy = wakeup_energy;
- cluster->dstate_wakeup_latency = wakeup_latency;
-}
-
-u32 __weak get_freq_max_load(int cpu, u32 freq)
-{
- /* 100% by default */
- return 100;
-}
-
-struct freq_max_load_entry {
- /* The maximum load which has accounted governor's headroom. */
- u64 hdemand;
-};
-
-struct freq_max_load {
- struct rcu_head rcu;
- int length;
- struct freq_max_load_entry freqs[0];
-};
-
-static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
-static DEFINE_SPINLOCK(freq_max_load_lock);
-
-struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
-{
- return NULL;
-}
-
-int sched_update_freq_max_load(const cpumask_t *cpumask)
-{
- int i, cpu, ret;
- unsigned int freq;
- struct cpu_pstate_pwr *costs;
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
- struct freq_max_load *max_load, *old_max_load;
- struct freq_max_load_entry *entry;
- u64 max_demand_capacity, max_demand;
- unsigned long flags;
- u32 hfreq;
- int hpct;
-
- if (!per_cpu_info)
- return 0;
-
- spin_lock_irqsave(&freq_max_load_lock, flags);
- max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
- for_each_cpu(cpu, cpumask) {
- if (!per_cpu_info[cpu].ptable) {
- ret = -EINVAL;
- goto fail;
- }
-
- old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
-
- /*
- * allocate len + 1 and leave the last power cost as 0 for
- * power_cost() can stop iterating index when
- * per_cpu_info[cpu].len > len of max_load due to race between
- * cpu power stats update and get_cpu_pwr_stats().
- */
- max_load = kzalloc(sizeof(struct freq_max_load) +
- sizeof(struct freq_max_load_entry) *
- (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
- if (unlikely(!max_load)) {
- ret = -ENOMEM;
- goto fail;
- }
-
- max_load->length = per_cpu_info[cpu].len;
-
- max_demand = max_demand_capacity *
- cpu_max_possible_capacity(cpu);
-
- i = 0;
- costs = per_cpu_info[cpu].ptable;
- while (costs[i].freq) {
- entry = &max_load->freqs[i];
- freq = costs[i].freq;
- hpct = get_freq_max_load(cpu, freq);
- if (hpct <= 0 || hpct > 100)
- hpct = 100;
- hfreq = div64_u64((u64)freq * hpct, 100);
- entry->hdemand =
- div64_u64(max_demand * hfreq,
- cpu_max_possible_freq(cpu));
- i++;
- }
-
- rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
- if (old_max_load)
- kfree_rcu(old_max_load, rcu);
- }
-
- spin_unlock_irqrestore(&freq_max_load_lock, flags);
- return 0;
-
-fail:
- for_each_cpu(cpu, cpumask) {
- max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
- if (max_load) {
- rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
- kfree_rcu(max_load, rcu);
- }
- }
-
- spin_unlock_irqrestore(&freq_max_load_lock, flags);
- return ret;
-}
-
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
-{
- return SCHED_CAPACITY_SCALE;
-}
-
-int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
-{
- struct rq *rq = cpu_rq(cpu);
-
- rq->static_cpu_pwr_cost = cost;
- return 0;
-}
-
-unsigned int sched_get_static_cpu_pwr_cost(int cpu)
-{
- return cpu_rq(cpu)->static_cpu_pwr_cost;
-}
-
-int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
-{
- struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
- cluster->static_cluster_pwr_cost = cost;
- return 0;
-}
-
-unsigned int sched_get_static_cluster_pwr_cost(int cpu)
-{
- return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
-}
-
-int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle)
-{
- struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
- cluster->wake_up_idle = !!wake_idle;
- return 0;
-}
-
-unsigned int sched_get_cluster_wake_idle(int cpu)
-{
- return cpu_rq(cpu)->cluster->wake_up_idle;
-}
-
-/*
- * Tasks that are runnable continuously for a period greather than
- * EARLY_DETECTION_DURATION can be flagged early as potential
- * high load tasks.
- */
-#define EARLY_DETECTION_DURATION 9500000
-
-/*
- * For increase, send notification if
- * freq_required - cur_freq > sysctl_sched_freq_inc_notify
- */
-__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
-
-/*
- * For decrease, send notification if
- * cur_freq - freq_required > sysctl_sched_freq_dec_notify
- */
-__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
-__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
-
-/* Maximum allowed threshold before freq aggregation must be enabled */
-#define MAX_FREQ_AGGR_THRESH 1000
-
-#define for_each_related_thread_group(grp) \
- list_for_each_entry(grp, &active_related_thread_groups, list)
-
-/* Size of bitmaps maintained to track top tasks */
-static const unsigned int top_tasks_bitmap_size =
- BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
-
-__read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
-
-/* A cpu can no longer accommodate more tasks if:
- *
- * rq->nr_running > sysctl_sched_spill_nr_run ||
- * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
- */
-unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
-
-/*
- * Place sync wakee tasks those have less than configured demand to the waker's
- * cluster.
- */
-unsigned int __read_mostly sched_small_wakee_task_load;
-unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
-
-unsigned int __read_mostly sched_big_waker_task_load;
-unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
-
-/*
- * CPUs with load greater than the sched_spill_load_threshold are not
- * eligible for task placement. When all CPUs in a cluster achieve a
- * load higher than this level, tasks becomes eligible for inter
- * cluster migration.
- */
-unsigned int __read_mostly sched_spill_load;
-unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
-
-/*
- * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
- * task. This eliminates the LPM exit latency associated with the idle
- * CPUs in the waker cluster.
- */
-unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
-
-/*
- * Tasks whose bandwidth consumption on a cpu is more than
- * sched_upmigrate are considered "big" tasks. Big tasks will be
- * considered for "up" migration, i.e migrating to a cpu with better
- * capacity.
- */
-unsigned int __read_mostly sched_upmigrate;
-unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
-
-/*
- * Big tasks, once migrated, will need to drop their bandwidth
- * consumption to less than sched_downmigrate before they are "down"
- * migrated.
- */
-unsigned int __read_mostly sched_downmigrate;
-unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
-
-/*
- * The load scale factor of a CPU gets boosted when its max frequency
- * is restricted due to which the tasks are migrating to higher capacity
- * CPUs early. The sched_upmigrate threshold is auto-upgraded by
- * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
- */
-unsigned int up_down_migrate_scale_factor = 1024;
-
-/*
- * Scheduler selects and places task to its previous CPU if sleep time is
- * less than sysctl_sched_select_prev_cpu_us.
- */
-unsigned int __read_mostly
-sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
-
-unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
-
-unsigned int __read_mostly
-sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
-
-unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
-
-/*
- * Scheduler tries to avoid waking up idle CPUs for tasks running
- * in short bursts. If the task average burst is less than
- * sysctl_sched_short_burst nanoseconds and it sleeps on an average
- * for more than sysctl_sched_short_sleep nanoseconds, then the
- * task is eligible for packing.
- */
-unsigned int __read_mostly sysctl_sched_short_burst;
-unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
-
-static void _update_up_down_migrate(unsigned int *up_migrate,
- unsigned int *down_migrate, bool is_group)
-{
- unsigned int delta;
-
- if (up_down_migrate_scale_factor == 1024)
- return;
-
- delta = *up_migrate - *down_migrate;
-
- *up_migrate /= NSEC_PER_USEC;
- *up_migrate *= up_down_migrate_scale_factor;
- *up_migrate >>= 10;
- *up_migrate *= NSEC_PER_USEC;
-
- if (!is_group)
- *up_migrate = min(*up_migrate, sched_ravg_window);
-
- *down_migrate /= NSEC_PER_USEC;
- *down_migrate *= up_down_migrate_scale_factor;
- *down_migrate >>= 10;
- *down_migrate *= NSEC_PER_USEC;
-
- *down_migrate = min(*down_migrate, *up_migrate - delta);
-}
-
-static void update_up_down_migrate(void)
-{
- unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
- unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
-
- _update_up_down_migrate(&up_migrate, &down_migrate, false);
- sched_upmigrate = up_migrate;
- sched_downmigrate = down_migrate;
-
- up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
- down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
-
- _update_up_down_migrate(&up_migrate, &down_migrate, true);
- sched_group_upmigrate = up_migrate;
- sched_group_downmigrate = down_migrate;
-}
-
-void set_hmp_defaults(void)
-{
- sched_spill_load =
- pct_to_real(sysctl_sched_spill_load_pct);
-
- update_up_down_migrate();
-
- sched_init_task_load_windows =
- div64_u64((u64)sysctl_sched_init_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
- NSEC_PER_USEC;
-
- sched_small_wakee_task_load =
- div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_big_waker_task_load =
- div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
- (u64)sched_ravg_window, 100);
-
- sched_freq_aggregate_threshold =
- pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-
-int upmigrate_discouraged(struct task_struct *p)
-{
- return task_group(p)->upmigrate_discouraged;
-}
-
-#else
-
-static inline int upmigrate_discouraged(struct task_struct *p)
-{
- return 0;
-}
-
-#endif
-
-/* Is a task "big" on its current cpu */
-static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
-{
- int nice = task_nice(p);
-
- if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
- return 0;
-
- return scaled_load > sched_upmigrate;
-}
-
-int is_big_task(struct task_struct *p)
-{
- return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
-}
-
-u64 cpu_load(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
-
- return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
-}
-
-u64 cpu_load_sync(int cpu, int sync)
-{
- return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
-}
-
-/*
- * Task will fit on a cpu if it's bandwidth consumption on that cpu
- * will be less than sched_upmigrate. A big task that was previously
- * "up" migrated will be considered fitting on "little" cpu if its
- * bandwidth consumption on "little" cpu will be less than
- * sched_downmigrate. This will help avoid frequenty migrations for
- * tasks with load close to the upmigrate threshold
- */
-int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
- enum sched_boost_policy boost_policy)
-{
- int upmigrate = sched_upmigrate;
-
- if (cpu_capacity(cpu) == max_capacity)
- return 1;
-
- if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
- upmigrate = sched_downmigrate;
-
- if (boost_policy != SCHED_BOOST_ON_BIG) {
- if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
- upmigrate_discouraged(p))
- return 1;
-
- if (task_load < upmigrate)
- return 1;
- } else {
- if (task_sched_boost(p) || task_load >= upmigrate)
- return 0;
-
- return 1;
- }
-
- return 0;
-}
-
-int task_will_fit(struct task_struct *p, int cpu)
-{
- u64 tload = scale_load_to_cpu(task_load(p), cpu);
-
- return task_load_will_fit(p, tload, cpu, sched_boost_policy());
-}
-
-/*
- * Return the cost of running task p on CPU cpu. This function
- * currently assumes that task p is the only task which will run on
- * the CPU.
- */
-unsigned int power_cost(int cpu, u64 demand)
-{
- int first, mid, last;
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
- struct cpu_pstate_pwr *costs;
- struct freq_max_load *max_load;
- int total_static_pwr_cost = 0;
- struct rq *rq = cpu_rq(cpu);
- unsigned int pc;
-
- if (!per_cpu_info || !per_cpu_info[cpu].ptable)
- /*
- * When power aware scheduling is not in use, or CPU
- * power data is not available, just use the CPU
- * capacity as a rough stand-in for real CPU power
- * numbers, assuming bigger CPUs are more power
- * hungry.
- */
- return cpu_max_possible_capacity(cpu);
-
- rcu_read_lock();
- max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
- if (!max_load) {
- pc = cpu_max_possible_capacity(cpu);
- goto unlock;
- }
-
- costs = per_cpu_info[cpu].ptable;
-
- if (demand <= max_load->freqs[0].hdemand) {
- pc = costs[0].power;
- goto unlock;
- } else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
- pc = costs[max_load->length - 1].power;
- goto unlock;
- }
-
- first = 0;
- last = max_load->length - 1;
- mid = (last - first) >> 1;
- while (1) {
- if (demand <= max_load->freqs[mid].hdemand)
- last = mid;
- else
- first = mid;
-
- if (last - first == 1)
- break;
- mid = first + ((last - first) >> 1);
- }
-
- pc = costs[last].power;
-
-unlock:
- rcu_read_unlock();
-
- if (idle_cpu(cpu) && rq->cstate) {
- total_static_pwr_cost += rq->static_cpu_pwr_cost;
- if (rq->cluster->dstate)
- total_static_pwr_cost +=
- rq->cluster->static_cluster_pwr_cost;
- }
-
- return pc + total_static_pwr_cost;
-
-}
-
-struct sched_cluster *rq_cluster(struct rq *rq)
-{
- return rq->cluster;
-}
-
-/*
- * reset_cpu_hmp_stats - reset HMP stats for a cpu
- * nr_big_tasks
- * cumulative_runnable_avg (iff reset_cra is true)
- */
-void reset_cpu_hmp_stats(int cpu, int reset_cra)
-{
- reset_cfs_rq_hmp_stats(cpu, reset_cra);
- reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
-}
-
-void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta)
-{
- u64 new_task_load;
- u64 old_task_load;
-
- if (sched_disable_window_stats)
- return;
-
- old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
- new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
-
- if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
- stats->nr_big_tasks--;
- else if (!__is_big_task(p, old_task_load) &&
- __is_big_task(p, new_task_load))
- stats->nr_big_tasks++;
-
- BUG_ON(stats->nr_big_tasks < 0);
-}
-
-/*
- * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
- */
-static void update_nr_big_tasks(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- struct task_struct *p;
-
- /* Do not reset cumulative_runnable_avg */
- reset_cpu_hmp_stats(cpu, 0);
-
- list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
- inc_hmp_sched_stats_fair(rq, p, 0);
-}
-
-/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
-void pre_big_task_count_change(const struct cpumask *cpus)
-{
- int i;
-
- local_irq_disable();
-
- for_each_cpu(i, cpus)
- raw_spin_lock(&cpu_rq(i)->lock);
-}
-
-/*
- * Reinitialize 'nr_big_tasks' counters on all affected cpus
- */
-void post_big_task_count_change(const struct cpumask *cpus)
-{
- int i;
-
- /* Assumes local_irq_disable() keeps online cpumap stable */
- for_each_cpu(i, cpus)
- update_nr_big_tasks(i);
-
- for_each_cpu(i, cpus)
- raw_spin_unlock(&cpu_rq(i)->lock);
-
- local_irq_enable();
-}
-
-static inline int invalid_value_freq_input(unsigned int *data)
-{
- if (data == &sysctl_sched_freq_aggregate)
- return !(*data == 0 || *data == 1);
-
- return 0;
-}
-
-static inline int invalid_value(unsigned int *data)
-{
- unsigned int val = *data;
-
- if (data == &sysctl_sched_ravg_hist_size)
- return (val < 2 || val > RAVG_HIST_SIZE_MAX);
-
- if (data == &sysctl_sched_window_stats_policy)
- return val >= WINDOW_STATS_INVALID_POLICY;
-
- return invalid_value_freq_input(data);
-}
-
-/*
- * Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size variables.
- */
-int sched_window_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int ret;
- unsigned int *data = (unsigned int *)table->data;
- unsigned int old_val;
-
- mutex_lock(&policy_mutex);
-
- old_val = *data;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret || !write || (write && (old_val == *data)))
- goto done;
-
- if (invalid_value(data)) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
-
- reset_all_window_stats(0, 0);
-
-done:
- mutex_unlock(&policy_mutex);
-
- return ret;
-}
-
-/*
- * Convert percentage value into absolute form. This will avoid div() operation
- * in fast path, to convert task load in percentage scale.
- */
-int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
-{
- int ret;
- unsigned int old_val;
- unsigned int *data = (unsigned int *)table->data;
- int update_task_count = 0;
-
- /*
- * The policy mutex is acquired with cpu_hotplug.lock
- * held from cpu_up()->cpufreq_governor_interactive()->
- * sched_set_window(). So enforce the same order here.
- */
- if (write && (data == &sysctl_sched_upmigrate_pct)) {
- update_task_count = 1;
- get_online_cpus();
- }
-
- mutex_lock(&policy_mutex);
-
- old_val = *data;
-
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
- if (ret || !write)
- goto done;
-
- if (write && (old_val == *data))
- goto done;
-
- if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct ||
- sysctl_sched_group_downmigrate_pct >
- sysctl_sched_group_upmigrate_pct) {
- *data = old_val;
- ret = -EINVAL;
- goto done;
- }
-
- /*
- * Big task tunable change will need to re-classify tasks on
- * runqueue as big and set their counters appropriately.
- * sysctl interface affects secondary variables (*_pct), which is then
- * "atomically" carried over to the primary variables. Atomic change
- * includes taking runqueue lock of all online cpus and re-initiatizing
- * their big counter values based on changed criteria.
- */
- if (update_task_count)
- pre_big_task_count_change(cpu_online_mask);
-
- set_hmp_defaults();
-
- if (update_task_count)
- post_big_task_count_change(cpu_online_mask);
-
-done:
- mutex_unlock(&policy_mutex);
- if (update_task_count)
- put_online_cpus();
- return ret;
-}
-
-inline int nr_big_tasks(struct rq *rq)
-{
- return rq->hmp_stats.nr_big_tasks;
-}
-
-unsigned int cpu_temp(int cpu)
-{
- struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-
- if (per_cpu_info)
- return per_cpu_info[cpu].temp;
- else
- return 0;
-}
-
-/* Return task demand in percentage scale */
-unsigned int pct_task_load(struct task_struct *p)
-{
- unsigned int load;
-
- load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
-
- return load;
-}
-
-static int __init set_sched_ravg_window(char *str)
-{
- unsigned int window_size;
-
- get_option(&str, &window_size);
-
- if (window_size < MIN_SCHED_RAVG_WINDOW ||
- window_size > MAX_SCHED_RAVG_WINDOW) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- sched_ravg_window = window_size;
- return 0;
-}
-
-early_param("sched_ravg_window", set_sched_ravg_window);
-
-#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
-
-static inline u64 scale_exec_time(u64 delta, struct rq *rq)
-{
- u32 freq;
-
- freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
- delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
- delta *= rq->cluster->exec_scale_factor;
- delta >>= 10;
-
- return delta;
-}
-
-/* Does freq_required sufficiently exceed or fall behind cur_freq? */
-static inline int
-nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
-{
- int delta = freq_required - cur_freq;
-
- if (freq_required > cur_freq)
- return delta < sysctl_sched_freq_inc_notify;
-
- delta = -delta;
-
- return delta < sysctl_sched_freq_dec_notify;
-}
-
-/* Convert busy time to frequency equivalent */
-static inline unsigned int load_to_freq(struct rq *rq, u64 load)
-{
- unsigned int freq;
-
- load = scale_load_to_cpu(load, cpu_of(rq));
- load *= 128;
- load = div64_u64(load, max_task_load());
-
- freq = load * cpu_max_possible_freq(cpu_of(rq));
- freq /= 128;
-
- return freq;
-}
-
-/*
- * Return load from all related groups in given frequency domain.
- */
-static void group_load_in_freq_domain(struct cpumask *cpus,
- u64 *grp_load, u64 *new_grp_load)
-{
- int j;
-
- for_each_cpu(j, cpus) {
- struct rq *rq = cpu_rq(j);
-
- *grp_load += rq->grp_time.prev_runnable_sum;
- *new_grp_load += rq->grp_time.nt_prev_runnable_sum;
- }
-}
-
-/*
- * Should scheduler alert governor for changing frequency?
- *
- * @check_pred - evaluate frequency based on the predictive demand
- * @check_groups - add load from all related groups on given cpu
- *
- * check_groups is set to 1 if a "related" task movement/wakeup is triggering
- * the notification check. To avoid "re-aggregation" of demand in such cases,
- * we check whether the migrated/woken tasks demand (along with demand from
- * existing tasks on the cpu) can be met on target cpu
- *
- */
-
-static int send_notification(struct rq *rq, int check_pred, int check_groups)
-{
- unsigned int cur_freq, freq_required;
- unsigned long flags;
- int rc = 0;
- u64 group_load = 0, new_load = 0;
-
- if (check_pred) {
- u64 prev = rq->old_busy_time;
- u64 predicted = rq->hmp_stats.pred_demands_sum;
-
- if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
- return 0;
-
- prev = max(prev, rq->old_estimated_time);
- if (prev > predicted)
- return 0;
-
- cur_freq = load_to_freq(rq, prev);
- freq_required = load_to_freq(rq, predicted);
-
- if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
- return 0;
- } else {
- /*
- * Protect from concurrent update of rq->prev_runnable_sum and
- * group cpu load
- */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (check_groups)
- group_load = rq->grp_time.prev_runnable_sum;
-
- new_load = rq->prev_runnable_sum + group_load;
- new_load = freq_policy_load(rq, new_load);
-
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- cur_freq = load_to_freq(rq, rq->old_busy_time);
- freq_required = load_to_freq(rq, new_load);
-
- if (nearly_same_freq(cur_freq, freq_required))
- return 0;
- }
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (!rq->cluster->notifier_sent) {
- rq->cluster->notifier_sent = 1;
- rc = 1;
- trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
- new_load);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return rc;
-}
-
-/* Alert governor if there is a need to change frequency */
-void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
-{
- int cpu = cpu_of(rq);
-
- if (!send_notification(rq, check_pred, check_groups))
- return;
-
- atomic_notifier_call_chain(
- &load_alert_notifier_head, 0,
- (void *)(long)cpu);
-}
-
-void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
- struct task_struct *p)
-{
- bool check_groups;
-
- rcu_read_lock();
- check_groups = task_in_related_thread_group(p);
- rcu_read_unlock();
-
- if (!same_freq_domain(src_cpu, dest_cpu)) {
- if (!src_cpu_dead)
- check_for_freq_change(cpu_rq(src_cpu), false,
- check_groups);
- check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
- } else {
- check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
- }
-}
-
-#define INC_STEP 8
-#define DEC_STEP 2
-#define CONSISTENT_THRES 16
-#define INC_STEP_BIG 16
-/*
- * bucket_increase - update the count of all buckets
- *
- * @buckets: array of buckets tracking busy time of a task
- * @idx: the index of bucket to be incremented
- *
- * Each time a complete window finishes, count of bucket that runtime
- * falls in (@idx) is incremented. Counts of all other buckets are
- * decayed. The rate of increase and decay could be different based
- * on current count in the bucket.
- */
-static inline void bucket_increase(u8 *buckets, int idx)
-{
- int i, step;
-
- for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
- if (idx != i) {
- if (buckets[i] > DEC_STEP)
- buckets[i] -= DEC_STEP;
- else
- buckets[i] = 0;
- } else {
- step = buckets[i] >= CONSISTENT_THRES ?
- INC_STEP_BIG : INC_STEP;
- if (buckets[i] > U8_MAX - step)
- buckets[i] = U8_MAX;
- else
- buckets[i] += step;
- }
- }
-}
-
-static inline int busy_to_bucket(u32 normalized_rt)
-{
- int bidx;
-
- bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
- bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
-
- /*
- * Combine lowest two buckets. The lowest frequency falls into
- * 2nd bucket and thus keep predicting lowest bucket is not
- * useful.
- */
- if (!bidx)
- bidx++;
-
- return bidx;
-}
-
-/*
- * get_pred_busy - calculate predicted demand for a task on runqueue
- *
- * @rq: runqueue of task p
- * @p: task whose prediction is being updated
- * @start: starting bucket. returned prediction should not be lower than
- * this bucket.
- * @runtime: runtime of the task. returned prediction should not be lower
- * than this runtime.
- * Note: @start can be derived from @runtime. It's passed in only to
- * avoid duplicated calculation in some cases.
- *
- * A new predicted busy time is returned for task @p based on @runtime
- * passed in. The function searches through buckets that represent busy
- * time equal to or bigger than @runtime and attempts to find the bucket to
- * to use for prediction. Once found, it searches through historical busy
- * time and returns the latest that falls into the bucket. If no such busy
- * time exists, it returns the medium of that bucket.
- */
-static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
- int start, u32 runtime)
-{
- int i;
- u8 *buckets = p->ravg.busy_buckets;
- u32 *hist = p->ravg.sum_history;
- u32 dmin, dmax;
- u64 cur_freq_runtime = 0;
- int first = NUM_BUSY_BUCKETS, final;
- u32 ret = runtime;
-
- /* skip prediction for new tasks due to lack of history */
- if (unlikely(is_new_task(p)))
- goto out;
-
- /* find minimal bucket index to pick */
- for (i = start; i < NUM_BUSY_BUCKETS; i++) {
- if (buckets[i]) {
- first = i;
- break;
- }
- }
- /* if no higher buckets are filled, predict runtime */
- if (first >= NUM_BUSY_BUCKETS)
- goto out;
-
- /* compute the bucket for prediction */
- final = first;
-
- /* determine demand range for the predicted bucket */
- if (final < 2) {
- /* lowest two buckets are combined */
- dmin = 0;
- final = 1;
- } else {
- dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
- }
- dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
-
- /*
- * search through runtime history and return first runtime that falls
- * into the range of predicted bucket.
- */
- for (i = 0; i < sched_ravg_hist_size; i++) {
- if (hist[i] >= dmin && hist[i] < dmax) {
- ret = hist[i];
- break;
- }
- }
- /* no historical runtime within bucket found, use average of the bin */
- if (ret < dmin)
- ret = (dmin + dmax) / 2;
- /*
- * when updating in middle of a window, runtime could be higher
- * than all recorded history. Always predict at least runtime.
- */
- ret = max(runtime, ret);
-out:
- trace_sched_update_pred_demand(rq, p, runtime,
- mult_frac((unsigned int)cur_freq_runtime, 100,
- sched_ravg_window), ret);
- return ret;
-}
-
-static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
-{
- if (p->ravg.pred_demand >= p->ravg.curr_window)
- return p->ravg.pred_demand;
-
- return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
- p->ravg.curr_window);
-}
-
-static void reset_all_task_stats(void)
-{
- struct task_struct *g, *p;
-
- do_each_thread(g, p) {
- reset_task_stats(p);
- } while_each_thread(g, p);
-}
-
-enum reset_reason_code {
- WINDOW_CHANGE,
- POLICY_CHANGE,
- HIST_SIZE_CHANGE,
- FREQ_AGGREGATE_CHANGE,
-};
-
-const char *sched_window_reset_reasons[] = {
- "WINDOW_CHANGE",
- "POLICY_CHANGE",
- "HIST_SIZE_CHANGE",
-};
-
-/* Called with IRQs enabled */
-void reset_all_window_stats(u64 window_start, unsigned int window_size)
-{
- int cpu, i;
- unsigned long flags;
- u64 start_ts = sched_ktime_clock();
- int reason = WINDOW_CHANGE;
- unsigned int old = 0, new = 0;
-
- local_irq_save(flags);
-
- read_lock(&tasklist_lock);
-
- read_lock(&related_thread_group_lock);
-
- /* Taking all runqueue locks prevents race with sched_exit(). */
- for_each_possible_cpu(cpu)
- raw_spin_lock(&cpu_rq(cpu)->lock);
-
- sched_disable_window_stats = 1;
-
- reset_all_task_stats();
-
- read_unlock(&tasklist_lock);
-
- if (window_size) {
- sched_ravg_window = window_size * TICK_NSEC;
- set_hmp_defaults();
- sched_load_granule = sched_ravg_window / NUM_LOAD_INDICES;
- }
-
- sched_disable_window_stats = 0;
-
- for_each_possible_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
-
- if (window_start)
- rq->window_start = window_start;
- rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
- memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
- for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
- memset(&rq->load_subs[i], 0,
- sizeof(struct load_subtractions));
- clear_top_tasks_table(rq->top_tasks[i]);
- clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
- }
-
- rq->curr_table = 0;
- rq->curr_top = 0;
- rq->prev_top = 0;
- reset_cpu_hmp_stats(cpu, 1);
- }
-
- if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
- reason = POLICY_CHANGE;
- old = sched_window_stats_policy;
- new = sysctl_sched_window_stats_policy;
- sched_window_stats_policy = sysctl_sched_window_stats_policy;
- } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
- reason = HIST_SIZE_CHANGE;
- old = sched_ravg_hist_size;
- new = sysctl_sched_ravg_hist_size;
- sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
- } else if (sched_freq_aggregate !=
- sysctl_sched_freq_aggregate) {
- reason = FREQ_AGGREGATE_CHANGE;
- old = sched_freq_aggregate;
- new = sysctl_sched_freq_aggregate;
- sched_freq_aggregate = sysctl_sched_freq_aggregate;
- }
-
- for_each_possible_cpu(cpu)
- raw_spin_unlock(&cpu_rq(cpu)->lock);
-
- read_unlock(&related_thread_group_lock);
-
- local_irq_restore(flags);
-
- trace_sched_reset_all_window_stats(window_start, window_size,
- sched_ktime_clock() - start_ts, reason, old, new);
-}
-
-void sched_get_cpus_busy(struct sched_load *busy,
- const struct cpumask *query_cpus)
-{
- unsigned long flags;
- struct rq *rq;
- const int cpus = cpumask_weight(query_cpus);
- u64 load[cpus], group_load[cpus];
- u64 nload[cpus], ngload[cpus];
- u64 pload[cpus];
- unsigned int max_freq[cpus];
- int notifier_sent = 0;
- int early_detection[cpus];
- int cpu, i = 0;
- unsigned int window_size;
- u64 max_prev_sum = 0;
- int max_busy_cpu = cpumask_first(query_cpus);
- u64 total_group_load = 0, total_ngload = 0;
- bool aggregate_load = false;
- struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
-
- if (unlikely(cpus == 0))
- return;
-
- local_irq_save(flags);
-
- /*
- * This function could be called in timer context, and the
- * current task may have been executing for a long time. Ensure
- * that the window stats are current by doing an update.
- */
-
- for_each_cpu(cpu, query_cpus)
- raw_spin_lock(&cpu_rq(cpu)->lock);
-
- window_size = sched_ravg_window;
-
- /*
- * We don't really need the cluster lock for this entire for loop
- * block. However, there is no advantage in optimizing this as rq
- * locks are held regardless and would prevent migration anyways
- */
- raw_spin_lock(&cluster->load_lock);
-
- for_each_cpu(cpu, query_cpus) {
- rq = cpu_rq(cpu);
-
- update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
- 0);
-
- /*
- * Ensure that we don't report load for 'cpu' again via the
- * cpufreq_update_util path in the window that started at
- * rq->window_start
- */
- rq->load_reported_window = rq->window_start;
-
- account_load_subtractions(rq);
- load[i] = rq->prev_runnable_sum;
- nload[i] = rq->nt_prev_runnable_sum;
- pload[i] = rq->hmp_stats.pred_demands_sum;
- rq->old_estimated_time = pload[i];
-
- if (load[i] > max_prev_sum) {
- max_prev_sum = load[i];
- max_busy_cpu = cpu;
- }
-
- /*
- * sched_get_cpus_busy() is called for all CPUs in a
- * frequency domain. So the notifier_sent flag per
- * cluster works even when a frequency domain spans
- * more than 1 cluster.
- */
- if (rq->cluster->notifier_sent) {
- notifier_sent = 1;
- rq->cluster->notifier_sent = 0;
- }
- early_detection[i] = (rq->ed_task != NULL);
- max_freq[i] = cpu_max_freq(cpu);
- i++;
- }
-
- raw_spin_unlock(&cluster->load_lock);
-
- group_load_in_freq_domain(
- &cpu_rq(max_busy_cpu)->freq_domain_cpumask,
- &total_group_load, &total_ngload);
- aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
-
- i = 0;
- for_each_cpu(cpu, query_cpus) {
- group_load[i] = 0;
- ngload[i] = 0;
-
- if (early_detection[i])
- goto skip_early;
-
- rq = cpu_rq(cpu);
- if (aggregate_load) {
- if (cpu == max_busy_cpu) {
- group_load[i] = total_group_load;
- ngload[i] = total_ngload;
- }
- } else {
- group_load[i] = rq->grp_time.prev_runnable_sum;
- ngload[i] = rq->grp_time.nt_prev_runnable_sum;
- }
-
- load[i] += group_load[i];
- nload[i] += ngload[i];
-
- load[i] = freq_policy_load(rq, load[i]);
- rq->old_busy_time = load[i];
-
- /*
- * Scale load in reference to cluster max_possible_freq.
- *
- * Note that scale_load_to_cpu() scales load in reference to
- * the cluster max_freq.
- */
- load[i] = scale_load_to_cpu(load[i], cpu);
- nload[i] = scale_load_to_cpu(nload[i], cpu);
- pload[i] = scale_load_to_cpu(pload[i], cpu);
-skip_early:
- i++;
- }
-
- for_each_cpu(cpu, query_cpus)
- raw_spin_unlock(&(cpu_rq(cpu))->lock);
-
- local_irq_restore(flags);
-
- i = 0;
- for_each_cpu(cpu, query_cpus) {
- rq = cpu_rq(cpu);
-
- if (early_detection[i]) {
- busy[i].prev_load = div64_u64(sched_ravg_window,
- NSEC_PER_USEC);
- busy[i].new_task_load = 0;
- busy[i].predicted_load = 0;
- goto exit_early;
- }
-
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cpu_max_possible_freq(cpu));
-
- pload[i] = scale_load_to_freq(pload[i], max_freq[i],
- rq->cluster->max_possible_freq);
-
- busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
- busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
- busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
-
-exit_early:
- trace_sched_get_busy(cpu, busy[i].prev_load,
- busy[i].new_task_load,
- busy[i].predicted_load,
- early_detection[i]);
- i++;
- }
-}
-
-int sched_set_window(u64 window_start, unsigned int window_size)
-{
- u64 now, cur_jiffies, jiffy_ktime_ns;
- s64 ws;
- unsigned long flags;
-
- if (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)
- return -EINVAL;
-
- mutex_lock(&policy_mutex);
-
- /*
- * Get a consistent view of ktime, jiffies, and the time
- * since the last jiffy (based on last_jiffies_update).
- */
- local_irq_save(flags);
- cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
- local_irq_restore(flags);
-
- /* translate window_start from jiffies to nanoseconds */
- ws = (window_start - cur_jiffies); /* jiffy difference */
- ws *= TICK_NSEC;
- ws += jiffy_ktime_ns;
-
- /*
- * Roll back calculated window start so that it is in
- * the past (window stats must have a current window).
- */
- while (ws > now)
- ws -= (window_size * TICK_NSEC);
-
- BUG_ON(sched_ktime_clock() < ws);
-
- reset_all_window_stats(ws, window_size);
-
- sched_update_freq_max_load(cpu_possible_mask);
-
- mutex_unlock(&policy_mutex);
-
- return 0;
-}
-
-static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
-{
- rq->load_subs[index].window_start = ws;
- rq->load_subs[index].subs = 0;
- rq->load_subs[index].new_subs = 0;
-}
-
-#define sched_up_down_migrate_auto_update 1
-static void check_for_up_down_migrate_update(const struct cpumask *cpus)
-{
- int i = cpumask_first(cpus);
-
- if (!sched_up_down_migrate_auto_update)
- return;
-
- if (cpu_max_possible_capacity(i) == max_possible_capacity)
- return;
-
- if (cpu_max_possible_freq(i) == cpu_max_freq(i))
- up_down_migrate_scale_factor = 1024;
- else
- up_down_migrate_scale_factor = (1024 *
- cpu_max_possible_freq(i)) / cpu_max_freq(i);
-
- update_up_down_migrate();
-}
-
-void update_cpu_cluster_capacity(const cpumask_t *cpus)
-{
- int i;
- struct sched_cluster *cluster;
- struct cpumask cpumask;
-
- cpumask_copy(&cpumask, cpus);
- pre_big_task_count_change(cpu_possible_mask);
-
- for_each_cpu(i, &cpumask) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
- cluster->capacity = compute_capacity(cluster);
- cluster->load_scale_factor = compute_load_scale_factor(cluster);
-
- /* 'cpus' can contain cpumask more than one cluster */
- check_for_up_down_migrate_update(&cluster->cpus);
- }
-
- __update_min_max_capacity();
-
- post_big_task_count_change(cpu_possible_mask);
-}
-
-static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
-void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
-{
- struct cpumask cpumask;
- struct sched_cluster *cluster;
- int i, update_capacity = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
- cpumask_copy(&cpumask, cpus);
- for_each_cpu(i, &cpumask) {
- cluster = cpu_rq(i)->cluster;
- cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
- update_capacity += (cluster->max_mitigated_freq != fmax);
- cluster->max_mitigated_freq = fmax;
- }
- spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
-
- if (update_capacity)
- update_cpu_cluster_capacity(cpus);
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
- unsigned int cpu = freq->cpu, new_freq = freq->new;
- unsigned long flags;
- struct sched_cluster *cluster;
- struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
- int i, j;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- BUG_ON(!new_freq);
-
- if (cpu_cur_freq(cpu) == new_freq)
- return 0;
-
- for_each_cpu(i, &policy_cpus) {
- cluster = cpu_rq(i)->cluster;
-
- for_each_cpu(j, &cluster->cpus) {
- struct rq *rq = cpu_rq(j);
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- update_task_ravg(rq->curr, rq, TASK_UPDATE,
- sched_ktime_clock(), 0);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
-
- cluster->cur_freq = new_freq;
- cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
- }
-
- return 0;
-}
-
-static int pwr_stats_ready_notifier(struct notifier_block *nb,
- unsigned long cpu, void *data)
-{
- cpumask_t mask = CPU_MASK_NONE;
-
- cpumask_set_cpu(cpu, &mask);
- sched_update_freq_max_load(&mask);
-
- mutex_lock(&cluster_lock);
- sort_clusters();
- mutex_unlock(&cluster_lock);
-
- return 0;
-}
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_notifier_trans
-};
-
-static struct notifier_block notifier_pwr_stats_ready = {
- .notifier_call = pwr_stats_ready_notifier
-};
-
-int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static int register_sched_callback(void)
-{
- cpufreq_register_notifier(¬ifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- register_cpu_pwr_stats_ready_notifier(¬ifier_pwr_stats_ready);
-
- return 0;
-}
-
-/*
- * cpufreq callbacks can be registered at core_initcall or later time.
- * Any registration done prior to that is "forgotten" by cpufreq. See
- * initialization of variable init_cpufreq_transition_notifier_list_called
- * for further information.
- */
-core_initcall(register_sched_callback);
-
-void update_avg_burst(struct task_struct *p)
-{
- update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
- p->ravg.curr_burst = 0;
-}
-
-void note_task_waking(struct task_struct *p, u64 wallclock)
-{
- u64 sleep_time = wallclock - p->last_switch_out_ts;
-
- p->last_wake_ts = wallclock;
- update_avg(&p->ravg.avg_sleep_time, sleep_time);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
- struct cftype *cft)
-{
- struct task_group *tg = css_tg(css);
-
- return tg->upmigrate_discouraged;
-}
-
-int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 upmigrate_discourage)
-{
- struct task_group *tg = css_tg(css);
- int discourage = upmigrate_discourage > 0;
-
- if (tg->upmigrate_discouraged == discourage)
- return 0;
-
- /*
- * Revisit big-task classification for tasks of this cgroup. It would
- * have been efficient to walk tasks of just this cgroup in running
- * state, but we don't have easy means to do that. Walk all tasks in
- * running state on all cpus instead and re-visit their big task
- * classification.
- */
- get_online_cpus();
- pre_big_task_count_change(cpu_online_mask);
-
- tg->upmigrate_discouraged = discourage;
-
- post_big_task_count_change(cpu_online_mask);
- put_online_cpus();
-
- return 0;
-}
-#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b852cbe..5405d3f 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,14 +78,6 @@
{
}
-#ifdef CONFIG_SCHED_WALT
-static void
-fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
-}
-#endif
-
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
@@ -114,7 +106,4 @@
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
-#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle,
-#endif
};
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index a2d6eb7..ec91fcc 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -201,8 +201,9 @@
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update))
return;
@@ -211,7 +212,6 @@
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 65b34b4..6b935e7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -11,60 +11,6 @@
#include <linux/irq_work.h>
#include <trace/events/sched.h>
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#ifdef CONFIG_SMP
-static int find_lowest_rq(struct task_struct *task);
-
-#ifdef CONFIG_SCHED_HMP
-static int
-select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
-{
- int target;
-
- rcu_read_lock();
- target = find_lowest_rq(p);
- if (target != -1)
- cpu = target;
- rcu_read_unlock();
-
- return cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
-#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_HMP */
-
#include "walt.h"
int sched_rr_timeslice = RR_TIMESLICE;
@@ -1436,7 +1382,7 @@
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
- inc_hmp_sched_stats_rt(rq, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
enqueue_pushable_task(rq, p);
@@ -1448,7 +1394,7 @@
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
- dec_hmp_sched_stats_rt(rq, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
}
@@ -1515,10 +1461,6 @@
struct rq *rq;
bool may_not_preempt;
-#ifdef CONFIG_SCHED_HMP
- return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
-#endif
-
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
@@ -1771,105 +1713,6 @@
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-#ifdef CONFIG_SCHED_HMP
-static int find_lowest_rq_hmp(struct task_struct *task)
-{
- struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
- struct cpumask candidate_mask = CPU_MASK_NONE;
- struct sched_cluster *cluster;
- int best_cpu = -1;
- int prev_cpu = task_cpu(task);
- u64 cpu_load, min_load = ULLONG_MAX;
- int i;
- int restrict_cluster;
- int boost_on_big;
- int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX;
-
- boost_on_big = sched_boost() == FULL_THROTTLE_BOOST &&
- sched_boost_policy() == SCHED_BOOST_ON_BIG;
-
- restrict_cluster = sysctl_sched_restrict_cluster_spill;
-
- /* Make sure the mask is initialized first */
- if (unlikely(!lowest_mask))
- return best_cpu;
-
- if (task->nr_cpus_allowed == 1)
- return best_cpu; /* No other targets possible */
-
- if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
- return best_cpu; /* No targets found */
-
- pack_task = is_short_burst_task(task);
-
- /*
- * At this point we have built a mask of cpus representing the
- * lowest priority tasks in the system. Now we want to elect
- * the best one based on our affinity and topology.
- */
-
- for_each_sched_cluster(cluster) {
- if (boost_on_big && cluster->capacity != max_possible_capacity)
- continue;
-
- cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
- cpumask_andnot(&candidate_mask, &candidate_mask,
- cpu_isolated_mask);
-
- if (cpumask_empty(&candidate_mask))
- continue;
-
- for_each_cpu(i, &candidate_mask) {
- if (sched_cpu_high_irqload(i))
- continue;
-
- cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
- if (!restrict_cluster)
- cpu_load = scale_load_to_cpu(cpu_load, i);
-
- if (pack_task) {
- wakeup_latency = cpu_rq(i)->wakeup_latency;
-
- if (wakeup_latency > least_wakeup_latency)
- continue;
-
- if (wakeup_latency < least_wakeup_latency) {
- least_wakeup_latency = wakeup_latency;
- min_load = cpu_load;
- best_cpu = i;
- continue;
- }
- }
-
- if (cpu_load < min_load ||
- (cpu_load == min_load &&
- (i == prev_cpu || (best_cpu != prev_cpu &&
- cpus_share_cache(prev_cpu, i))))) {
- min_load = cpu_load;
- best_cpu = i;
- }
- }
-
- if (restrict_cluster && best_cpu != -1)
- break;
- }
-
- return best_cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
-
-static inline unsigned long task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_task_util) {
- u64 demand = p->ravg.demand;
-
- return (demand << 10) / sched_ravg_window;
- }
-#endif
- return p->se.avg.util_avg;
-}
-
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
@@ -1888,10 +1731,6 @@
long max_spare_cap = -LONG_MAX;
bool placement_boost;
-#ifdef CONFIG_SCHED_HMP
- return find_lowest_rq_hmp(task);
-#endif
-
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
@@ -2733,7 +2572,7 @@
.update_curr = update_curr_rt,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 566e103..318d289 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,10 +25,12 @@
struct rq;
struct cpuidle_state;
+extern __read_mostly bool sched_predl;
+
#ifdef CONFIG_SCHED_WALT
extern unsigned int sched_ravg_window;
-struct hmp_sched_stats {
+struct walt_sched_stats {
int nr_big_tasks;
u64 cumulative_runnable_avg;
u64 pred_demands_sum;
@@ -106,6 +108,12 @@
static inline void cpu_load_update_active(struct rq *this_rq) { }
#endif
+#ifdef CONFIG_SCHED_SMT
+extern void update_idle_core(struct rq *rq);
+#else
+static inline void update_idle_core(struct rq *rq) { }
+#endif
+
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
@@ -318,10 +326,6 @@
struct task_group {
struct cgroup_subsys_state css;
-#ifdef CONFIG_SCHED_HMP
- bool upmigrate_discouraged;
-#endif
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
@@ -507,13 +511,10 @@
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
-#ifdef CONFIG_SCHED_WALT
- u64 cumulative_runnable_avg;
-#endif
-
#ifdef CONFIG_CFS_BANDWIDTH
+
#ifdef CONFIG_SCHED_WALT
- struct hmp_sched_stats hmp_stats;
+ struct walt_sched_stats walt_stats;
#endif
int runtime_enabled;
@@ -772,13 +773,13 @@
#ifdef CONFIG_SCHED_WALT
struct sched_cluster *cluster;
struct cpumask freq_domain_cpumask;
- struct hmp_sched_stats hmp_stats;
+ struct walt_sched_stats walt_stats;
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
s64 cum_window_start;
u64 load_reported_window;
- unsigned long hmp_flags;
+ unsigned long walt_flags;
u64 cur_irqload;
u64 avg_irqload;
@@ -801,7 +802,6 @@
u8 curr_table;
int prev_top;
int curr_top;
- struct irq_work irq_work;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -864,23 +864,6 @@
#endif
}
-
-#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
- if (static_branch_unlikely(&sched_smt_present))
- __update_idle_core(rq);
-}
-
-#else
-static inline void update_idle_core(struct rq *rq) { }
-#endif
-
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
@@ -1312,7 +1295,6 @@
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
-#define WF_NO_NOTIFIER 0x08 /* do not notify governor */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1430,7 +1412,7 @@
void (*task_change_group) (struct task_struct *p, int type);
#endif
#ifdef CONFIG_SCHED_WALT
- void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+ void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
u32 new_task_load, u32 new_pred_demand);
#endif
};
@@ -1462,7 +1444,6 @@
extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
-extern void nohz_balance_clear_nohz_mask(int cpu);
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
@@ -1722,9 +1703,18 @@
return cpu_rq(cpu)->cpu_capacity_orig;
}
-extern unsigned int sysctl_sched_use_walt_cpu_util;
extern unsigned int walt_disabled;
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_task_util)
+ return p->ravg.demand /
+ (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+#endif
+ return p->se.avg.util_avg;
+}
+
/*
* cpu_util returns the amount of capacity of a CPU that is used by CFS
* tasks. The unit of the return value must be the one of capacity so we can
@@ -1758,7 +1748,7 @@
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg;
+ util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg;
util = div64_u64(util,
sched_ravg_window >> SCHED_CAPACITY_SHIFT);
}
@@ -1779,6 +1769,7 @@
unsigned long prev_window_util;
unsigned long nl;
unsigned long pl;
+ u64 ws;
};
static inline unsigned long cpu_util_cum(int cpu, int delta)
@@ -1821,13 +1812,19 @@
if (walt_load) {
u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
rq->grp_time.nt_prev_runnable_sum;
+ u64 pl = rq->walt_stats.pred_demands_sum;
nl = div64_u64(nl, sched_ravg_window >>
SCHED_CAPACITY_SHIFT);
+ pl = div64_u64(pl, sched_ravg_window >>
+ SCHED_CAPACITY_SHIFT);
walt_load->prev_window_util = util;
walt_load->nl = nl;
- walt_load->pl = 0;
+ walt_load->pl = pl;
+ rq->old_busy_time = util;
+ rq->old_estimated_time = pl;
+ walt_load->ws = rq->window_start;
}
}
#endif
@@ -2207,6 +2204,17 @@
}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#ifdef CONFIG_SCHED_WALT
+u64 sched_ktime_clock(void);
+void note_task_waking(struct task_struct *p, u64 wallclock);
+#else /* CONFIG_SCHED_WALT */
+static inline u64 sched_ktime_clock(void)
+{
+ return 0;
+}
+static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef CONFIG_CPU_FREQ
DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
@@ -2237,13 +2245,18 @@
struct update_util_data *data;
#ifdef CONFIG_SCHED_WALT
+ unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
+ SCHED_CPUFREQ_PL;
+
/*
* Skip if we've already reported, but not if this is an inter-cluster
- * migration
+ * migration. Also only allow WALT update sites.
*/
+ if (!(flags & SCHED_CPUFREQ_WALT))
+ return;
if (!sched_disable_window_stats &&
(rq->load_reported_window == rq->window_start) &&
- !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+ !(flags & exception_flags))
return;
rq->load_reported_window = rq->window_start;
#endif
@@ -2251,7 +2264,7 @@
data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
cpu_of(rq)));
if (data)
- data->func(data, sched_clock(), flags);
+ data->func(data, sched_ktime_clock(), flags);
}
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
@@ -2336,15 +2349,12 @@
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
-extern u64 sched_ktime_clock(void);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg(u64 *avg, u64 sample);
#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
@@ -2472,6 +2482,11 @@
return cpu_max_possible_capacity(cpu) == max_possible_capacity;
}
+static inline bool is_min_capacity_cpu(int cpu)
+{
+ return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -2526,6 +2541,11 @@
return p->ravg.demand;
}
+static inline unsigned int task_pl(struct task_struct *p)
+{
+ return p->ravg.pred_demand;
+}
+
#define pct_to_real(tunable) \
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
@@ -2559,54 +2579,11 @@
#define BOOST_KICK 0
#define CPU_RESERVED 1
-static inline u64 cpu_cravg_sync(int cpu, int sync)
-{
- struct rq *rq = cpu_rq(cpu);
- u64 load;
-
- load = rq->hmp_stats.cumulative_runnable_avg;
-
- /*
- * If load is being checked in a sync wakeup environment,
- * we may want to discount the load of the currently running
- * task.
- */
- if (sync && cpu == smp_processor_id()) {
- if (load > rq->curr->ravg.demand)
- load -= rq->curr->ravg.demand;
- else
- load = 0;
- }
-
- return load;
-}
-
-extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
extern int sched_boost(void);
-extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
- enum sched_boost_policy boost_policy);
-extern int task_will_fit(struct task_struct *p, int cpu);
-extern u64 cpu_load(int cpu);
-extern u64 cpu_load_sync(int cpu, int sync);
extern int preferred_cluster(struct sched_cluster *cluster,
struct task_struct *p);
-extern void inc_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int upmigrate_discouraged(struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
-extern int nr_big_tasks(struct rq *rq);
extern void reset_task_stats(struct task_struct *p);
-extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
-extern void inc_hmp_sched_stats_fair(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
- struct cftype *cft);
-extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 upmigrate_discourage);
extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
@@ -2642,53 +2619,42 @@
{
struct rq *rq = cpu_rq(cpu);
- return test_bit(CPU_RESERVED, &rq->hmp_flags);
+ return test_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline int mark_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+ return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline void clear_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- return (p->on_rq || p->last_sleep_ts >= rq->window_start);
+ clear_bit(CPU_RESERVED, &rq->walt_flags);
}
static inline bool
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
- return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
+ return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
+ rq->window_start);
}
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- rq->cum_window_demand -= p->ravg.demand;
- WARN_ON_ONCE(rq->cum_window_demand < 0);
-}
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta)
{
rq->cum_window_demand += delta;
+ if (unlikely((s64)rq->cum_window_demand < 0))
+ rq->cum_window_demand = 0;
}
extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
extern unsigned long thermal_cap(int cpu);
-extern void clear_hmp_request(int cpu);
+extern void clear_walt_request(int cpu);
extern int got_boost_kick(void);
extern void clear_boost_kick(int cpu);
@@ -2697,18 +2663,14 @@
extern void clear_ed_task(struct task_struct *p, struct rq *rq);
extern bool early_detection_notify(struct rq *rq, u64 wallclock);
-#ifdef CONFIG_SCHED_HMP
-extern unsigned int power_cost(int cpu, u64 demand);
-#else
static inline unsigned int power_cost(int cpu, u64 demand)
{
return cpu_max_possible_capacity(cpu);
}
-#endif
#else /* CONFIG_SCHED_WALT */
-struct hmp_sched_stats;
+struct walt_sched_stats;
struct related_thread_group;
struct sched_cluster;
@@ -2719,43 +2681,14 @@
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
-static inline int task_will_fit(struct task_struct *p, int cpu)
-{
- return 1;
-}
-
static inline int sched_boost(void)
{
return 0;
}
-static inline int is_big_task(struct task_struct *p)
-{
- return 0;
-}
-
-static inline int nr_big_tasks(struct rq *rq)
-{
- return 0;
-}
-
-static inline int is_cpu_throttling_imminent(int cpu)
-{
- return 0;
-}
-
-static inline int is_task_migration_throttled(struct task_struct *p)
-{
- return 0;
-}
-
+static inline bool hmp_capable(void) { return false; }
static inline bool is_max_capacity_cpu(int cpu) { return true; }
-
-static inline void
-inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
-static inline void
-dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+static inline bool is_min_capacity_cpu(int cpu) { return true; }
static inline int
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
@@ -2792,6 +2725,7 @@
}
static inline u32 task_load(struct task_struct *p) { return 0; }
+static inline u32 task_pl(struct task_struct *p) { return 0; }
static inline int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load)
@@ -2816,17 +2750,7 @@
#define trace_sched_cpu_load_cgroup(...)
#define trace_sched_cpu_load_wakeup(...)
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
- return 0;
-}
-
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { }
static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
@@ -2837,7 +2761,7 @@
}
#endif
-static inline void clear_hmp_request(int cpu) { }
+static inline void clear_walt_request(int cpu) { }
static inline int got_boost_kick(void)
{
@@ -2867,88 +2791,7 @@
#endif /* CONFIG_SCHED_WALT */
-#ifdef CONFIG_SCHED_HMP
-#define energy_aware() false
-
-extern int is_big_task(struct task_struct *p);
-extern unsigned int pct_task_load(struct task_struct *p);
-extern void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta);
-extern unsigned int cpu_temp(int cpu);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
-extern void update_avg_burst(struct task_struct *p);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-
-extern unsigned int nr_eligible_big_tasks(int cpu);
-
-static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks++;
-}
-
-static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
- if (sched_disable_window_stats)
- return;
-
- if (is_big_task(p))
- stats->nr_big_tasks--;
-
- BUG_ON(stats->nr_big_tasks < 0);
-}
-
-static inline bool is_short_burst_task(struct task_struct *p)
-{
- return p->ravg.avg_burst < sysctl_sched_short_burst &&
- p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-#else
static inline bool energy_aware(void)
{
return sched_feat(ENERGY_AWARE);
}
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
- bool src_cpu_dead, struct task_struct *p) { }
-
-static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 delta) { }
-
-static inline unsigned int cpu_temp(int cpu)
-{
- return 0;
-}
-
-static inline void pre_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void post_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void set_hmp_defaults(void) { }
-
-static inline void update_avg_burst(struct task_struct *p) { }
-
-static inline void set_task_last_switch_out(struct task_struct *p,
- u64 wallclock) { }
-
-#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 7f86c0b..166c643 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -33,6 +33,8 @@
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
+static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
+
#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
* sched_get_nr_running_avg
@@ -120,6 +122,27 @@
}
EXPORT_SYMBOL(sched_get_nr_running_avg);
+#define BUSY_NR_RUN 3
+#define BUSY_LOAD_FACTOR 2
+static inline void update_last_busy_time(int cpu, bool dequeue,
+ unsigned long prev_nr_run, u64 curr_time)
+{
+ bool nr_run_trigger = false, load_trigger = false;
+
+ if (!hmp_capable() || is_min_capacity_cpu(cpu))
+ return;
+
+ if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
+ nr_run_trigger = true;
+
+ if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
+ capacity_orig_of(cpu))
+ load_trigger = true;
+
+ if (nr_run_trigger || load_trigger)
+ atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
+}
+
/**
* sched_update_nr_prod
* @cpu: The core id of the nr running driver.
@@ -148,6 +171,8 @@
if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+ update_last_busy_time(cpu, !inc, nr_running, curr_time);
+
per_cpu(nr_prod_sum, cpu) += nr_running * diff;
per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
@@ -162,16 +187,14 @@
unsigned int sched_get_cpu_util(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- u64 util = 0;
- unsigned long capacity = SCHED_CAPACITY_SCALE, flags;
+ u64 util;
+ unsigned long capacity, flags;
unsigned int busy;
raw_spin_lock_irqsave(&rq->lock, flags);
-#ifdef CONFIG_SMP
util = rq->cfs.avg.util_avg;
capacity = capacity_orig_of(cpu);
-#endif
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
@@ -186,3 +209,8 @@
busy = (util * 100) / capacity;
return busy;
}
+
+u64 sched_get_cpu_last_busy_time(int cpu)
+{
+ return atomic64_read(&per_cpu(last_busy_time, cpu));
+}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index dcc4a36..11a1888 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -18,41 +18,6 @@
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
- u32 new_task_load, u32 new_pred_demand)
-{
- s64 task_load_delta = (s64)new_task_load - task_load(p);
- s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
- fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
- pred_demand_delta);
-}
-
-#else /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-#endif /* CONFIG_SCHED_WALT */
-
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{
@@ -78,14 +43,14 @@
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
- inc_hmp_sched_stats_stop(rq, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
}
static void
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
- dec_hmp_sched_stats_stop(rq, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
}
static void yield_task_stop(struct rq *rq)
@@ -173,6 +138,6 @@
.switched_to = switched_to_stop,
.update_curr = update_curr_stop,
#ifdef CONFIG_SCHED_WALT
- .fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
+ .fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
};
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 50f889b..48f3512 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -49,6 +49,9 @@
DEFINE_MUTEX(cluster_lock);
static atomic64_t walt_irq_work_lastq_ws;
+static struct irq_work walt_cpufreq_irq_work;
+static struct irq_work walt_migration_irq_work;
+
u64 sched_ktime_clock(void)
{
if (unlikely(sched_ktime_suspended))
@@ -113,8 +116,6 @@
* IMPORTANT: Initialize both copies to same value!!
*/
-static __read_mostly bool sched_predl;
-
__read_mostly unsigned int sched_ravg_hist_size = 5;
__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
@@ -204,27 +205,28 @@
}
early_param("sched_predl", set_sched_predl);
-void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
- inc_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+ inc_nr_big_task(&rq->walt_stats, p);
+ walt_inc_cumulative_runnable_avg(rq, p);
}
-void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
- dec_nr_big_task(&rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+ dec_nr_big_task(&rq->walt_stats, p);
+ walt_dec_cumulative_runnable_avg(rq, p);
}
-void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
{
- stats->nr_big_tasks = 0; /* never happens on EAS */
- if (reset_cra) {
- stats->cumulative_runnable_avg = 0;
- stats->pred_demands_sum = 0;
- }
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta,
+ pred_demand_delta);
+
+ walt_fixup_cum_window_demand(rq, task_load_delta);
}
/*
@@ -292,9 +294,7 @@
nr_windows = div64_u64(delta, sched_ravg_window);
rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
- rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
- if (event == PUT_PREV_TASK)
- rq->cum_window_demand += rq->curr->ravg.demand;
+ rq->cum_window_demand = rq->walt_stats.cumulative_runnable_avg;
return old_window_start;
}
@@ -376,12 +376,12 @@
struct rq *rq = cpu_rq(cpu);
if (!is_max_capacity_cpu(cpu))
- return rq->hmp_stats.nr_big_tasks;
+ return rq->walt_stats.nr_big_tasks;
return rq->nr_running;
}
-void clear_hmp_request(int cpu)
+void clear_walt_request(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -471,24 +471,28 @@
u64 freq_policy_load(struct rq *rq)
{
unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+ int freq_aggr_thresh = sched_freq_aggregate_threshold;
struct sched_cluster *cluster = rq->cluster;
u64 aggr_grp_load = cluster->aggr_grp_load;
- u64 load;
+ u64 load, tt_load = 0;
- if (rq->ed_task != NULL)
- return sched_ravg_window;
+ if (rq->ed_task != NULL) {
+ load = sched_ravg_window;
+ goto done;
+ }
- if (aggr_grp_load > sched_freq_aggregate_threshold)
+ if (aggr_grp_load > freq_aggr_thresh)
load = rq->prev_runnable_sum + aggr_grp_load;
else
load = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+ tt_load = top_task_load(rq);
switch (reporting_policy) {
case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
- load = max_t(u64, load, top_task_load(rq));
+ load = max_t(u64, load, tt_load);
break;
case FREQ_REPORT_TOP_TASK:
- load = top_task_load(rq);
+ load = tt_load;
break;
case FREQ_REPORT_CPU_LOAD:
break;
@@ -496,6 +500,9 @@
break;
}
+done:
+ trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, freq_aggr_thresh,
+ load, reporting_policy);
return load;
}
@@ -612,55 +619,6 @@
raw_spin_unlock(&cluster->load_lock);
}
-#ifdef CONFIG_SCHED_HMP
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
- p->ravg.curr_burst = 0;
- /*
- * Initialize the avg_burst to twice the threshold, so that
- * a task would not be classified as short burst right away
- * after fork. It takes at least 6 sleep-wakeup cycles for
- * the avg_burst to go below the threshold.
- */
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
- p->ravg.avg_sleep_time = 0;
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
-{
- /*
- * update_task_demand() has checks for idle task and
- * exit task. The runtime may include the wait time,
- * so update the burst only for the cases where the
- * task is running.
- */
- if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
- rq->curr == p))
- p->ravg.curr_burst += runtime;
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
- p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-}
-#else
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
-{
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
-}
-#endif
-
static inline void inter_cluster_migration_fixup
(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
{
@@ -813,9 +771,15 @@
update_task_cpu_cycles(p, new_cpu);
- if (__task_in_cum_window_demand(src_rq, p)) {
- dec_cum_window_demand(src_rq, p);
- inc_cum_window_demand(dest_rq, p, p->ravg.demand);
+ /*
+ * When a task is migrating during the wakeup, adjust
+ * the task's contribution towards cumulative window
+ * demand.
+ */
+ if (p->state == TASK_WAKING && p->last_sleep_ts >=
+ src_rq->window_start) {
+ walt_fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
+ walt_fixup_cum_window_demand(dest_rq, p->ravg.demand);
}
new_task = is_new_task(p);
@@ -870,10 +834,8 @@
migrate_top_tasks(p, src_rq, dest_rq);
- if (!same_freq_domain(new_cpu, task_cpu(p))) {
- cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
- cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
- }
+ if (!same_freq_domain(new_cpu, task_cpu(p)))
+ irq_work_queue(&walt_migration_irq_work);
if (p == src_rq->ed_task) {
src_rq->ed_task = NULL;
@@ -1098,7 +1060,7 @@
if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
!p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p,
+ p->sched_class->fixup_walt_sched_stats(rq, p,
p->ravg.demand,
new);
@@ -1315,6 +1277,33 @@
return delta;
}
+/* Convert busy time to frequency equivalent
+ * Assumes load is scaled to 1024
+ */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+ return mult_frac(cpu_max_possible_freq(cpu_of(rq)), load,
+ capacity_orig_of(cpu_of(rq)));
+}
+
+bool do_pl_notif(struct rq *rq)
+{
+ u64 prev = rq->old_busy_time;
+ u64 pl = rq->walt_stats.pred_demands_sum;
+ int cpu = cpu_of(rq);
+
+ /* If already at max freq, bail out */
+ if (capacity_orig_of(cpu) == capacity_curr_of(cpu))
+ return false;
+
+ prev = max(prev, rq->old_estimated_time);
+
+ pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+
+ /* 400 MHz filter. */
+ return (pl > prev) && (load_to_freq(rq, pl - prev) > 400000);
+}
+
static void rollover_cpu_window(struct rq *rq, bool full_window)
{
u64 curr_sum = rq->curr_runnable_sum;
@@ -1693,20 +1682,28 @@
/*
* A throttled deadline sched class task gets dequeued without
- * changing p->on_rq. Since the dequeue decrements hmp stats
+ * changing p->on_rq. Since the dequeue decrements walt stats
* avoid decrementing it here again.
+ *
+ * When window is rolled over, the cumulative window demand
+ * is reset to the cumulative runnable average (contribution from
+ * the tasks on the runqueue). If the current task is dequeued
+ * already, it's demand is not included in the cumulative runnable
+ * average. So add the task demand separately to cumulative window
+ * demand.
*/
- if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
- !p->dl.dl_throttled))
- p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
- pred_demand);
+ if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
+ if (task_on_rq_queued(p))
+ p->sched_class->fixup_walt_sched_stats(rq, p, demand,
+ pred_demand);
+ else if (rq->curr == p)
+ walt_fixup_cum_window_demand(rq, demand);
+ }
p->ravg.demand = demand;
+ p->ravg.coloc_demand = div64_u64(sum, sched_ravg_hist_size);
p->ravg.pred_demand = pred_demand;
- if (__task_in_cum_window_demand(rq, p))
- inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);
-
done:
trace_sched_update_history(rq, p, runtime, samples, event);
}
@@ -1898,14 +1895,14 @@
result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
rq->window_start);
if (result == old_window_start)
- irq_work_queue(&rq->irq_work);
+ irq_work_queue(&walt_cpufreq_irq_work);
}
/* Reflect task activity on its demand and cpu's busy time statistics */
void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
- u64 runtime, old_window_start;
+ u64 old_window_start;
if (!rq->window_start || sched_disable_window_stats ||
p->ravg.mark_start == wallclock)
@@ -1921,9 +1918,7 @@
}
update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
- runtime = update_task_demand(p, rq, event, wallclock);
- if (runtime)
- update_task_burst(p, rq, event, runtime);
+ update_task_demand(p, rq, event, wallclock);
update_cpu_busy_time(p, rq, event, wallclock, irqtime);
update_task_pred_demand(rq, p, event);
done:
@@ -1964,8 +1959,6 @@
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- init_new_task_load_hmp(p, idle_task);
-
p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
@@ -1980,6 +1973,7 @@
(u64)sched_ravg_window, 100);
p->ravg.demand = init_load_windows;
+ p->ravg.coloc_demand = init_load_windows;
p->ravg.pred_demand = 0;
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
p->ravg.sum_history[i] = init_load_windows;
@@ -2020,8 +2014,6 @@
p->ravg.curr_window_cpu = curr_window_ptr;
p->ravg.prev_window_cpu = prev_window_ptr;
- reset_task_stats_hmp(p);
-
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
@@ -2503,7 +2495,7 @@
(sched_ravg_window * sched_ravg_hist_size))
continue;
- combined_demand += p->ravg.demand;
+ combined_demand += p->ravg.coloc_demand;
}
@@ -2892,6 +2884,11 @@
update_cpu_cluster_capacity(cpus);
}
+void note_task_waking(struct task_struct *p, u64 wallclock)
+{
+ p->last_wake_ts = wallclock;
+}
+
/*
* Task's cpu usage is accounted in:
* rq->curr/prev_runnable_sum, when its ->grp is NULL
@@ -3012,6 +3009,11 @@
struct rq *rq;
int cpu;
u64 wc;
+ int flag = SCHED_CPUFREQ_WALT;
+
+ /* Am I the window rollover work or the migration work? */
+ if (irq_work == &walt_migration_irq_work)
+ flag |= SCHED_CPUFREQ_INTERCLUSTER_MIG;
for_each_cpu(cpu, cpu_possible_mask)
raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3040,10 +3042,90 @@
for_each_sched_cluster(cluster)
for_each_cpu(cpu, &cluster->cpus)
- cpufreq_update_util(cpu_rq(cpu), 0);
+ cpufreq_update_util(cpu_rq(cpu), flag);
for_each_cpu(cpu, cpu_possible_mask)
raw_spin_unlock(&cpu_rq(cpu)->lock);
- core_ctl_check(this_rq()->window_start);
+ if (irq_work != &walt_migration_irq_work)
+ core_ctl_check(this_rq()->window_start);
+}
+
+int walt_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int *data = (unsigned int *)table->data;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write) {
+ mutex_unlock(&mutex);
+ return ret;
+ }
+
+ if (data == &sysctl_sched_group_upmigrate_pct)
+ sched_group_upmigrate =
+ pct_to_real(sysctl_sched_group_upmigrate_pct);
+ else if (data == &sysctl_sched_group_downmigrate_pct)
+ sched_group_downmigrate =
+ pct_to_real(sysctl_sched_group_downmigrate_pct);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+
+void walt_sched_init(struct rq *rq)
+{
+ int j;
+
+ cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask);
+ init_irq_work(&walt_migration_irq_work, walt_irq_work);
+ init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
+ rq->walt_stats.cumulative_runnable_avg = 0;
+ rq->window_start = 0;
+ rq->cum_window_start = 0;
+ rq->walt_stats.nr_big_tasks = 0;
+ rq->walt_flags = 0;
+ rq->cur_irqload = 0;
+ rq->avg_irqload = 0;
+ rq->irqload_ts = 0;
+ rq->static_cpu_pwr_cost = 0;
+ rq->cc.cycles = 1;
+ rq->cc.time = 1;
+ rq->cstate = 0;
+ rq->wakeup_latency = 0;
+ rq->wakeup_energy = 0;
+
+ /*
+ * All cpus part of same cluster by default. This avoids the
+ * need to check for rq->cluster being non-NULL in hot-paths
+ * like select_best_cpu()
+ */
+ rq->cluster = &init_cluster;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
+ rq->old_busy_time = 0;
+ rq->old_estimated_time = 0;
+ rq->old_busy_time_group = 0;
+ rq->walt_stats.pred_demands_sum = 0;
+ rq->ed_task = NULL;
+ rq->curr_table = 0;
+ rq->prev_top = 0;
+ rq->curr_top = 0;
+ for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
+ memset(&rq->load_subs[j], 0,
+ sizeof(struct load_subtractions));
+ rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
+ sizeof(u8), GFP_NOWAIT);
+ /* No other choice */
+ BUG_ON(!rq->top_tasks[j]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
+ }
+ rq->cum_window_demand = 0;
}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index f153332..535f14b 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -65,9 +65,8 @@
extern unsigned int nr_eligible_big_tasks(int cpu);
-#ifndef CONFIG_SCHED_HMP
static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
@@ -77,7 +76,7 @@
}
static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
{
if (sched_disable_window_stats)
return;
@@ -87,60 +86,22 @@
BUG_ON(stats->nr_big_tasks < 0);
}
-#endif
static inline void
-adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
+walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
{
- struct rq *rq = container_of(stats, struct rq, hmp_stats);
-
if (sched_disable_window_stats)
return;
sched_update_nr_prod(cpu_of(rq), 0, true);
- stats->nr_big_tasks += inc ? delta : -delta;
+ rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
- BUG_ON(stats->nr_big_tasks < 0);
+ BUG_ON(rq->walt_stats.nr_big_tasks < 0);
}
static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg += task_load;
- stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
-{
- u32 task_load;
-
- if (sched_disable_window_stats)
- return;
-
- task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
- stats->cumulative_runnable_avg -= task_load;
-
- BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
- stats->pred_demands_sum -= p->ravg.pred_demand;
- BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p, s64 task_load_delta,
- s64 pred_demand_delta)
+fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
+ s64 task_load_delta, s64 pred_demand_delta)
{
if (sched_disable_window_stats)
return;
@@ -152,17 +113,56 @@
BUG_ON((s64)stats->pred_demands_sum < 0);
}
-extern void inc_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
- struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+static inline void
+walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand,
+ p->ravg.pred_demand);
+
+ /*
+ * Add a task's contribution to the cumulative window demand when
+ *
+ * (1) task is enqueued with on_rq = 1 i.e migration,
+ * prio/cgroup/class change.
+ * (2) task is waking for the first time in this window.
+ */
+ if (p->on_rq || (p->last_sleep_ts < rq->window_start))
+ walt_fixup_cum_window_demand(rq, p->ravg.demand);
+}
+
+static inline void
+walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+ if (sched_disable_window_stats)
+ return;
+
+ fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand,
+ -(s64)p->ravg.pred_demand);
+
+ /*
+ * on_rq will be 1 for sleeping tasks. So check if the task
+ * is migrating or dequeuing in RUNNING state to change the
+ * prio/cgroup/class.
+ */
+ if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
+ walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
+}
+
+extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load,
+ u32 new_pred_demand);
+extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
+extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
extern void fixup_busy_time(struct task_struct *p, int new_cpu);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern void mark_task_starting(struct task_struct *p);
extern void set_window_start(struct rq *rq);
void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
u64 wallclock);
+extern bool do_pl_notif(struct rq *rq);
#define SCHED_HIGH_IRQ_TIMEOUT 3
static inline u64 sched_irqload(int cpu)
@@ -194,8 +194,6 @@
return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
}
-extern u64 sched_ktime_clock(void);
-
static inline struct sched_cluster *cpu_cluster(int cpu)
{
return cpu_rq(cpu)->cluster;
@@ -293,12 +291,16 @@
void walt_irq_work(struct irq_work *irq_work);
+void walt_sched_init(struct rq *rq);
+
#else /* CONFIG_SCHED_WALT */
+static inline void walt_sched_init(struct rq *rq) { }
+
static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime) { }
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
- struct task_struct *p)
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
+ struct task_struct *p)
{
}
@@ -307,21 +309,21 @@
return 0;
}
-static inline void adjust_nr_big_tasks(struct hmp_sched_stats *stats,
+static inline void walt_adjust_nr_big_tasks(struct rq *rq,
int delta, bool inc)
{
}
-static inline void inc_nr_big_task(struct hmp_sched_stats *stats,
+static inline void inc_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
-static inline void dec_nr_big_task(struct hmp_sched_stats *stats,
+static inline void dec_nr_big_task(struct walt_sched_stats *stats,
struct task_struct *p)
{
}
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
struct task_struct *p)
{
}
@@ -335,11 +337,6 @@
static inline void set_window_start(struct rq *rq) { }
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
-static inline u64 sched_ktime_clock(void)
-{
- return 0;
-}
-
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
u64 wallclock)
{
@@ -353,6 +350,19 @@
}
static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+static inline bool do_pl_notif(struct rq *rq) { return false; }
+
+static inline void
+inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+}
#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/signal.c b/kernel/signal.c
index 0b14157..deb04d5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -503,7 +503,8 @@
return !tsk->ptrace;
}
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+ bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
@@ -525,6 +526,12 @@
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
+
+ *resched_timer =
+ (first->flags & SIGQUEUE_PREALLOC) &&
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
__sigqueue_free(first);
} else {
/*
@@ -541,12 +548,12 @@
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- siginfo_t *info)
+ siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
if (sig)
- collect_signal(sig, pending, info);
+ collect_signal(sig, pending, info, resched_timer);
return sig;
}
@@ -558,15 +565,16 @@
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
+ bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
- signr = __dequeue_signal(&tsk->pending, mask, info);
+ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
- mask, info);
+ mask, info, &resched_timer);
/*
* itimer signal ?
*
@@ -611,7 +619,7 @@
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
- if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+ if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 534431a..1d894fc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -128,9 +128,6 @@
static unsigned long one_ul = 1;
static int one_hundred = 100;
static int one_thousand = 1000;
-#ifdef CONFIG_SCHED_HMP
-static int max_freq_reporting_policy = FREQ_REPORT_INVALID_POLICY - 1;
-#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -300,202 +297,24 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#endif
-#ifdef CONFIG_SCHED_HMP
- {
- .procname = "sched_freq_reporting_policy",
- .data = &sysctl_sched_freq_reporting_policy,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &max_freq_reporting_policy,
- },
- {
- .procname = "sched_freq_inc_notify",
- .data = &sysctl_sched_freq_inc_notify,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_freq_dec_notify",
- .data = &sysctl_sched_freq_dec_notify,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_ravg_hist_size",
- .data = &sysctl_sched_ravg_hist_size,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_window_stats_policy",
- .data = &sysctl_sched_window_stats_policy,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_spill_load",
- .data = &sysctl_sched_spill_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_spill_nr_run",
- .data = &sysctl_sched_spill_nr_run,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_upmigrate",
- .data = &sysctl_sched_upmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_downmigrate",
- .data = &sysctl_sched_downmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
{
.procname = "sched_group_upmigrate",
.data = &sysctl_sched_group_upmigrate_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
+ .proc_handler = walt_proc_update_handler,
+ .extra1 = &sysctl_sched_group_downmigrate_pct,
},
{
.procname = "sched_group_downmigrate",
.data = &sysctl_sched_group_downmigrate_pct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
+ .proc_handler = walt_proc_update_handler,
.extra1 = &zero,
+ .extra2 = &sysctl_sched_group_upmigrate_pct,
},
- {
- .procname = "sched_init_task_load",
- .data = &sysctl_sched_init_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_select_prev_cpu_us",
- .data = &sysctl_sched_select_prev_cpu_us,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- },
- {
- .procname = "sched_restrict_cluster_spill",
- .data = &sysctl_sched_restrict_cluster_spill,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {
- .procname = "sched_small_wakee_task_load",
- .data = &sysctl_sched_small_wakee_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_big_waker_task_load",
- .data = &sysctl_sched_big_waker_task_load_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_prefer_sync_wakee_to_waker",
- .data = &sysctl_sched_prefer_sync_wakee_to_waker,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- {
- .procname = "sched_enable_thread_grouping",
- .data = &sysctl_sched_enable_thread_grouping,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sched_pred_alert_freq",
- .data = &sysctl_sched_pred_alert_freq,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
- .procname = "sched_freq_aggregate",
- .data = &sysctl_sched_freq_aggregate,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_window_update_handler,
- },
- {
- .procname = "sched_freq_aggregate_threshold",
- .data = &sysctl_sched_freq_aggregate_threshold_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_hmp_proc_update_handler,
- .extra1 = &zero,
- /*
- * Special handling for sched_freq_aggregate_threshold_pct
- * which can be greater than 100. Use 1000 as an upper bound
- * value which works for all practical use cases.
- */
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_short_burst_ns",
- .data = &sysctl_sched_short_burst,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sched_short_sleep_ns",
- .data = &sysctl_sched_short_sleep,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif /* CONFIG_SCHED_HMP */
+#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
@@ -577,6 +396,20 @@
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
+ {
+ .procname = "sched_upmigrate",
+ .data = &sysctl_sched_capacity_margin,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_updown_migrate_handler,
+ },
+ {
+ .procname = "sched_downmigrate",
+ .data = &sysctl_sched_capacity_margin_down,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_updown_migrate_handler,
+ },
#ifdef CONFIG_SMP
{
.procname = "sched_tunable_scaling",
@@ -1942,6 +1775,22 @@
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
#endif
+#ifdef CONFIG_SWAP
+ {
+ .procname = "swap_ratio",
+ .data = &sysctl_swap_ratio,
+ .maxlen = sizeof(sysctl_swap_ratio),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+ {
+ .procname = "swap_ratio_enable",
+ .data = &sysctl_swap_ratio_enable,
+ .maxlen = sizeof(sysctl_swap_ratio_enable),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+#endif
{ }
};
@@ -2440,9 +2289,12 @@
if (write) {
if (*negp)
return -EINVAL;
+ if (*lvalp > UINT_MAX)
+ return -EINVAL;
*valp = *lvalp;
} else {
unsigned int val = *valp;
+ *negp = false;
*lvalp = (unsigned long)val;
}
return 0;
@@ -3172,6 +3024,39 @@
}
}
+static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp,
+ int *valp, int write, void *data)
+{
+ if (write) {
+ if (*negp)
+ return -EINVAL;
+ *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp;
+ } else {
+ *negp = false;
+ *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp;
+ }
+
+ return 0;
+}
+
+/**
+ * proc_douintvec_capacity - read a vector of integers in percentage and convert
+ * into sched capacity
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_dointvec(table, write, buffer, lenp, ppos,
+ do_proc_douintvec_capacity_conv, NULL);
+}
+
#else /* CONFIG_PROC_SYSCTL */
int proc_dostring(struct ctl_table *table, int write,
@@ -3229,6 +3114,11 @@
return -ENOSYS;
}
+int proc_douintvec_capacity(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_PROC_SYSCTL */
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fa80192..842928a 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -568,7 +568,7 @@
{
struct alarm_base *base = &alarm_bases[alarm->type];
- start = ktime_add(start, base->gettime());
+ start = ktime_add_safe(start, base->gettime());
alarm_start(alarm, start);
}
EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -654,7 +654,7 @@
overrun++;
}
- alarm->node.expires = ktime_add(alarm->node.expires, interval);
+ alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
return overrun;
}
EXPORT_SYMBOL_GPL(alarm_forward);
@@ -840,13 +840,22 @@
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+
+ /*
+ * Rate limit to the tick as a hot fix to prevent DOS. Will be
+ * mopped up later.
+ */
+ if (timr->it.alarm.interval.tv64 &&
+ ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+ timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
+
exp = timespec_to_ktime(new_setting->it_value);
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now;
now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
- exp = ktime_add(now, exp);
+ exp = ktime_add_safe(now, exp);
}
alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9055429..44cc350 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
+#include <linux/timer.h>
#include <linux/context_tracking.h>
#include <linux/rq_stats.h>
@@ -933,6 +934,11 @@
now = tick_nohz_start_idle(ts);
+#ifdef CONFIG_SMP
+ if (check_pending_deferrable_timers(cpu))
+ raise_softirq_irqoff(TIMER_SOFTIRQ);
+#endif
+
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index bfe589e..234d3e4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -116,6 +116,26 @@
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function. This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+ struct clocksource *clock = READ_ONCE(tkr->clock);
+
+ return clock->read(clock);
+}
+
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
@@ -173,7 +193,7 @@
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tkr->read(tkr->clock);
+ now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
@@ -207,7 +227,7 @@
cycle_t cycle_now, delta;
/* read clocksource */
- cycle_now = tkr->read(tkr->clock);
+ cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -236,12 +256,10 @@
++tk->cs_was_changed_seq;
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
- tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask;
- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
- tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
@@ -260,8 +278,7 @@
/* Go back from cycles -> shifted ns */
tk->xtime_interval = (u64) interval * clock->mult;
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
- tk->raw_interval =
- ((u64) interval * clock->mult) >> clock->shift;
+ tk->raw_interval = interval * clock->mult;
/* if changing clocks, convert xtime_nsec shift units */
if (old_clock) {
@@ -405,7 +422,7 @@
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
- tkr->read(tkr->clock),
+ tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
@@ -462,6 +479,10 @@
return cycles_at_suspend;
}
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
@@ -478,13 +499,13 @@
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- cycles_at_suspend = tkr->read(tkr->clock);
- tkr_dummy.read = dummy_clock_read;
+ cycles_at_suspend = tk_clock_read(tkr);
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- tkr_dummy.read = dummy_clock_read;
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
@@ -650,11 +671,10 @@
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->tkr_mono.clock;
cycle_t cycle_now, delta;
s64 nsec;
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
@@ -930,8 +950,7 @@
do {
seq = read_seqcount_begin(&tk_core.seq);
-
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
base_real = ktime_add(tk->tkr_mono.base,
@@ -1110,7 +1129,7 @@
* Check whether the system counter value provided by the
* device driver is on the current timekeeping interval.
*/
- now = tk->tkr_mono.read(tk->tkr_mono.clock);
+ now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
if (!cycle_between(interval_start, cycles, now)) {
clock_was_set_seq = tk->clock_was_set_seq;
@@ -1668,7 +1687,7 @@
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 num, max = ULLONG_MAX;
@@ -2032,7 +2051,7 @@
unsigned int *clock_set)
{
cycle_t interval = tk->cycle_interval << shift;
- u64 raw_nsecs;
+ u64 snsec_per_sec;
/* If the offset is smaller than a shifted interval, do nothing */
if (offset < interval)
@@ -2047,14 +2066,15 @@
*clock_set |= accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = (u64)tk->raw_interval << shift;
- raw_nsecs += tk->raw_time.tv_nsec;
- if (raw_nsecs >= NSEC_PER_SEC) {
- u64 raw_secs = raw_nsecs;
- raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- tk->raw_time.tv_sec += raw_secs;
+ tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+ snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+ while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+ tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+ tk->raw_time.tv_sec++;
}
- tk->raw_time.tv_nsec = raw_nsecs;
+ tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
+ tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
/* Accumulate error between NTP and clock interval */
tk->ntp_error += tk->ntp_tick << shift;
@@ -2086,7 +2106,7 @@
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index adede73..3a2dd86 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -207,6 +207,7 @@
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
struct timer_base timer_base_deferrable;
+static atomic_t deferrable_pending;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
@@ -1460,6 +1461,31 @@
return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
}
+
+#ifdef CONFIG_SMP
+/*
+ * check_pending_deferrable_timers - Check for unbound deferrable timer expiry
+ * @cpu - Current CPU
+ *
+ * The function checks whether any global deferrable pending timers
+ * are exipired or not. This function does not check cpu bounded
+ * diferrable pending timers expiry.
+ *
+ * The function returns true when a cpu unbounded deferrable timer is expired.
+ */
+bool check_pending_deferrable_timers(int cpu)
+{
+ if (cpu == tick_do_timer_cpu ||
+ tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
+ if (time_after_eq(jiffies, timer_base_deferrable.clk)
+ && !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
/**
* get_next_timer_interrupt - return the time (clock mono) of the next timer
* @basej: base time jiffies
@@ -1619,10 +1645,13 @@
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
__run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
- __run_timers(&timer_base_deferrable);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
- }
+
+ if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
+ tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
+ tick_do_timer_cpu == smp_processor_id())
+ __run_timers(&timer_base_deferrable);
}
/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 221eb59..4f7ea84 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3590,7 +3590,7 @@
int exclude_mod = 0;
int found = 0;
int ret;
- int clear_filter;
+ int clear_filter = 0;
if (func) {
func_g.type = filter_parse_regex(func, len, &func_g.search,
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 6d310ab..ec9bde7 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -314,8 +314,11 @@
ilctxt->write_page->hdr.end_time = t_now;
ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
- if (WARN_ON(ilctxt->write_page == NULL))
+ if (WARN_ON(ilctxt->write_page == NULL)) {
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
return;
+ }
ilctxt->write_page->hdr.write_offset = 0;
ilctxt->write_page->hdr.start_time = t_now;
memcpy((ilctxt->write_page->data +
@@ -546,6 +549,7 @@
struct decode_context *dctxt);
struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
unsigned long flags;
+ int ret;
if (size < MAX_MSG_DECODED_SIZE)
return -EINVAL;
@@ -555,6 +559,11 @@
dctxt.size = size;
read_lock_irqsave(&context_list_lock_lha1, flags);
spin_lock(&ilctxt->context_lock_lhb1);
+ if (ilctxt->destroyed) {
+ ret = -EIO;
+ goto done;
+ }
+
while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
!is_nd_read_empty(ilctxt)) {
msg_read(ilctxt, &ectxt);
@@ -570,11 +579,17 @@
read_lock_irqsave(&context_list_lock_lha1, flags);
spin_lock(&ilctxt->context_lock_lhb1);
}
- if ((size - dctxt.size) == 0)
- reinit_completion(&ilctxt->read_avail);
+ ret = size - dctxt.size;
+ if (ret == 0) {
+ if (!ilctxt->destroyed)
+ reinit_completion(&ilctxt->read_avail);
+ else
+ ret = -EIO;
+ }
+done:
spin_unlock(&ilctxt->context_lock_lhb1);
read_unlock_irqrestore(&context_list_lock_lha1, flags);
- return size - dctxt.size;
+ return ret;
}
EXPORT_SYMBOL(ipc_log_extract);
@@ -832,6 +847,8 @@
ctxt->nd_read_page = ctxt->first_page;
ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
ctxt->header_size = sizeof(struct ipc_log_page_header);
+ kref_init(&ctxt->refcount);
+ ctxt->destroyed = false;
create_ctx_debugfs(ctxt, mod_name);
/* set magic last to signal context init is complete */
@@ -854,6 +871,21 @@
}
EXPORT_SYMBOL(ipc_log_context_create);
+void ipc_log_context_free(struct kref *kref)
+{
+ struct ipc_log_context *ilctxt = container_of(kref,
+ struct ipc_log_context, refcount);
+ struct ipc_log_page *pg = NULL;
+
+ while (!list_empty(&ilctxt->page_list)) {
+ pg = get_first_page(ilctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+
+ kfree(ilctxt);
+}
+
/*
* Destroy debug log context
*
@@ -862,25 +894,24 @@
int ipc_log_context_destroy(void *ctxt)
{
struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
- struct ipc_log_page *pg = NULL;
unsigned long flags;
if (!ilctxt)
return 0;
- while (!list_empty(&ilctxt->page_list)) {
- pg = get_first_page(ctxt);
- list_del(&pg->hdr.list);
- kfree(pg);
- }
+ debugfs_remove_recursive(ilctxt->dent);
+
+ spin_lock(&ilctxt->context_lock_lhb1);
+ ilctxt->destroyed = true;
+ complete_all(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
write_lock_irqsave(&context_list_lock_lha1, flags);
list_del(&ilctxt->list);
write_unlock_irqrestore(&context_list_lock_lha1, flags);
- debugfs_remove_recursive(ilctxt->dent);
+ ipc_log_context_put(ilctxt);
- kfree(ilctxt);
return 0;
}
EXPORT_SYMBOL(ipc_log_context_destroy);
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index a545387..d733724 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -74,23 +74,42 @@
static ssize_t debug_read_helper(struct file *file, char __user *buff,
size_t count, loff_t *ppos, int cont)
{
- struct ipc_log_context *ilctxt = file->private_data;
+ struct ipc_log_context *ilctxt;
+ struct dentry *d = file->f_path.dentry;
char *buffer;
int bsize;
+ int srcu_idx;
+ int r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (!r) {
+ ilctxt = file->private_data;
+ r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
+ }
+ debugfs_use_file_finish(srcu_idx);
+ if (r)
+ return r;
buffer = kmalloc(count, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
+ if (!buffer) {
+ bsize = -ENOMEM;
+ goto done;
+ }
bsize = debug_log(ilctxt, buffer, count, cont);
+
if (bsize > 0) {
if (copy_to_user(buff, buffer, bsize)) {
+ bsize = -EFAULT;
kfree(buffer);
- return -EFAULT;
+ goto done;
}
*ppos += bsize;
}
kfree(buffer);
+
+done:
+ ipc_log_context_put(ilctxt);
return bsize;
}
@@ -127,7 +146,7 @@
struct ipc_log_context *ilctxt,
const struct file_operations *fops)
{
- debugfs_create_file(name, mode, dent, ilctxt, fops);
+ debugfs_create_file_unsafe(name, mode, dent, ilctxt, fops);
}
static void dfunc_string(struct encode_context *ectxt,
diff --git a/kernel/trace/ipc_logging_private.h b/kernel/trace/ipc_logging_private.h
index 594027a..47c41e9 100644
--- a/kernel/trace/ipc_logging_private.h
+++ b/kernel/trace/ipc_logging_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -119,6 +119,8 @@
struct list_head dfunc_info_list;
spinlock_t context_lock_lhb1;
struct completion read_avail;
+ struct kref refcount;
+ bool destroyed;
};
struct dfunc_info {
@@ -147,6 +149,13 @@
((x) < TSV_TYPE_MSG_END))
#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+void ipc_log_context_free(struct kref *kref);
+
+static inline void ipc_log_context_put(struct ipc_log_context *ilctxt)
+{
+ kref_put(&ilctxt->refcount, ipc_log_context_free);
+}
+
#if (defined(CONFIG_DEBUG_FS))
void check_and_create_debugfs(void);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ebf9498..4a848f7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1928,7 +1928,7 @@
#endif
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
}
@@ -7257,6 +7257,7 @@
}
kfree(tr->topts);
+ free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 562fa69..997ac0b 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -259,7 +259,8 @@
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
- hlist_del_rcu(&p_event->hlist_entry);
+ if (!hlist_unhashed(&p_event->hlist_entry))
+ hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8d2b4d8..5ff45ca 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -667,30 +667,25 @@
pr_info("Probe point is not specified.\n");
return -EINVAL;
}
- if (isdigit(argv[1][0])) {
- if (is_return) {
- pr_info("Return probe point must be a symbol.\n");
- return -EINVAL;
- }
- /* an address specified */
- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
- if (ret) {
- pr_info("Failed to parse address.\n");
- return ret;
- }
- } else {
+
+ /* try to parse an address. if that fails, try to read the
+ * input as a symbol. */
+ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
/* a symbol specified */
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret) {
- pr_info("Failed to parse symbol.\n");
+ pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
if (offset && is_return) {
pr_info("Return probe must be used without offset.\n");
return -EINVAL;
}
+ } else if (is_return) {
+ pr_info("Return probe point must be a symbol.\n");
+ return -EINVAL;
}
argc -= 2; argv += 2;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index f4ac185..c761cdb 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -231,11 +231,10 @@
* properly.
*/
user_header = register_sysctl("user", empty);
+ kmemleak_ignore(user_header);
BUG_ON(!user_header);
BUG_ON(!setup_userns_sysctls(&init_user_ns));
#endif
return 0;
}
subsys_initcall(user_namespace_sysctl_init);
-
-
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 44ae68a..cffb5f2 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -25,33 +25,15 @@
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
-#include <linux/perf_event.h>
#include <linux/kthread.h>
#include <soc/qcom/watchdog.h>
-/*
- * The run state of the lockup detectors is controlled by the content of the
- * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
- * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
- *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
- */
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
-
static DEFINE_MUTEX(watchdog_proc_mutex);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
#else
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
#endif
int __read_mostly nmi_watchdog_enabled;
int __read_mostly soft_watchdog_enabled;
@@ -61,9 +43,6 @@
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
#endif
static struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -72,6 +51,8 @@
#define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
/*
* The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are
@@ -103,55 +84,9 @@
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static cpumask_t __read_mostly watchdog_cpus;
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
static unsigned long soft_lockup_nmi_warn;
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-unsigned int __read_mostly hardlockup_panic =
- CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long __maybe_unused hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
- if (!strncmp(str, "panic", 5))
- hardlockup_panic = 1;
- else if (!strncmp(str, "nopanic", 7))
- hardlockup_panic = 0;
- else if (!strncmp(str, "0", 1))
- watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
- else if (!strncmp(str, "1", 1))
- watchdog_enabled |= NMI_WATCHDOG_ENABLED;
- return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
@@ -272,32 +207,14 @@
wq_watchdog_touch(-1);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void touch_nmi_watchdog(void)
-{
- /*
- * Using __raw here because some code paths have
- * preemption enabled. If preemption is enabled
- * then interrupts should be enabled too, in which
- * case we shouldn't have to worry about the watchdog
- * going off.
- */
- raw_cpu_write(watchdog_nmi_touch, true);
- touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-#endif
-
void touch_softlockup_watchdog_sync(void)
{
__this_cpu_write(softlockup_touch_sync, true);
__this_cpu_write(watchdog_touch_ts, 0);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
/* watchdog detector functions */
-static bool is_hardlockup(void)
+bool is_hardlockup(void)
{
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
@@ -307,80 +224,6 @@
__this_cpu_write(hrtimer_interrupts_saved, hrint);
return false;
}
-#endif
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static unsigned int watchdog_next_cpu(unsigned int cpu)
-{
- cpumask_t cpus = watchdog_cpus;
- unsigned int next_cpu;
-
- next_cpu = cpumask_next(cpu, &cpus);
- if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(&cpus);
-
- if (next_cpu == cpu)
- return nr_cpu_ids;
-
- return next_cpu;
-}
-
-static int is_hardlockup_other_cpu(unsigned int cpu)
-{
- unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
-
- if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
- return 1;
-
- per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
- return 0;
-}
-
-static void watchdog_check_hardlockup_other_cpu(void)
-{
- unsigned int next_cpu;
-
- /*
- * Test for hardlockups every 3 samples. The sample period is
- * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
- * watchdog_thresh (over by 20%).
- */
- if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
- return;
-
- /* check for a hardlockup on the next cpu */
- next_cpu = watchdog_next_cpu(smp_processor_id());
- if (next_cpu >= nr_cpu_ids)
- return;
-
- smp_rmb();
-
- if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
- per_cpu(watchdog_nmi_touch, next_cpu) = false;
- return;
- }
-
- if (is_hardlockup_other_cpu(next_cpu)) {
- /* only warn once */
- if (per_cpu(hard_watchdog_warn, next_cpu) == true)
- return;
-
- if (hardlockup_panic) {
- pr_err("Watchdog detected hard LOCKUP on cpu %u",
- next_cpu);
- msm_trigger_wdog_bite();
- }
- else
- WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
-
- per_cpu(hard_watchdog_warn, next_cpu) = true;
- } else {
- per_cpu(hard_watchdog_warn, next_cpu) = false;
- }
-}
-#else
-static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
-#endif
static int is_softlockup(unsigned long touch_ts)
{
@@ -394,80 +237,22 @@
return 0;
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-
-static struct perf_event_attr wd_hw_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- .size = sizeof(struct perf_event_attr),
- .pinned = 1,
- .disabled = 1,
-};
-
-/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- /* Ensure the watchdog never gets throttled */
- event->hw.interrupts = 0;
-
- if (__this_cpu_read(watchdog_nmi_touch) == true) {
- __this_cpu_write(watchdog_nmi_touch, false);
- return;
- }
-
- /* check for a hardlockup
- * This is done by making sure our timer interrupt
- * is incrementing. The timer interrupt should have
- * fired multiple times before we overflow'd. If it hasn't
- * then this is a good indication the cpu is stuck
- */
- if (is_hardlockup()) {
- int this_cpu = smp_processor_id();
-
- /* only print hardlockups once */
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
-
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- if (hardlockup_panic)
- msm_trigger_wdog_bite();
-
- print_modules();
- print_irqtrace_events(current);
- if (regs)
- show_regs(regs);
- else
- dump_stack();
-
- /*
- * Perform all-CPU dump only once to avoid multiple hardlockups
- * generating interleaving traces
- */
- if (sysctl_hardlockup_all_cpu_backtrace &&
- !test_and_set_bit(0, &hardlockup_allcpu_dumped))
- trigger_allbutself_cpu_backtrace();
-
- if (hardlockup_panic)
- nmi_panic(regs, "Hard LOCKUP");
-
- __this_cpu_write(hard_watchdog_warn, true);
- return;
- }
-
- __this_cpu_write(hard_watchdog_warn, false);
- return;
-}
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
static void watchdog_interrupt_count(void)
{
__this_cpu_inc(hrtimer_interrupts);
}
-static int watchdog_nmi_enable(unsigned int cpu);
-static void watchdog_nmi_disable(unsigned int cpu);
+/*
+ * These two functions are mostly architecture specific
+ * defining them as weak here.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+ return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
static int watchdog_enable_all_cpus(void);
static void watchdog_disable_all_cpus(void);
@@ -480,12 +265,12 @@
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return HRTIMER_NORESTART;
+
/* kick the hardlockup detector */
watchdog_interrupt_count();
- /* test for hardlockups on the next cpu */
- watchdog_check_hardlockup_other_cpu();
-
/* kick the softlockup detector */
wake_up_process(__this_cpu_read(softlockup_watchdog));
@@ -694,144 +479,6 @@
watchdog_nmi_disable(cpu);
}
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long cpu0_err;
-
-static int watchdog_nmi_enable(unsigned int cpu)
-{
- struct perf_event_attr *wd_attr;
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
- /* nothing to do if the hard lockup detector is disabled */
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- goto out;
-
- /* is it already setup and enabled? */
- if (event && event->state > PERF_EVENT_STATE_OFF)
- goto out;
-
- /* it is setup but not enabled */
- if (event != NULL)
- goto out_enable;
-
- wd_attr = &wd_hw_attr;
- wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
- /* Try to register using hardware perf events */
- event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
- /* save cpu0 error for future comparision */
- if (cpu == 0 && IS_ERR(event))
- cpu0_err = PTR_ERR(event);
-
- if (!IS_ERR(event)) {
- /* only print for cpu0 or different than cpu0 */
- if (cpu == 0 || cpu0_err)
- pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
- goto out_save;
- }
-
- /*
- * Disable the hard lockup detector if _any_ CPU fails to set up
- * set up the hardware perf event. The watchdog() function checks
- * the NMI_WATCHDOG_ENABLED bit periodically.
- *
- * The barriers are for syncing up watchdog_enabled across all the
- * cpus, as clear_bit() does not use barriers.
- */
- smp_mb__before_atomic();
- clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
- smp_mb__after_atomic();
-
- /* skip displaying the same error again */
- if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
- return PTR_ERR(event);
-
- /* vary the KERN level based on the returned errno */
- if (PTR_ERR(event) == -EOPNOTSUPP)
- pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
- else if (PTR_ERR(event) == -ENOENT)
- pr_warn("disabled (cpu%i): hardware events not enabled\n",
- cpu);
- else
- pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
- cpu, PTR_ERR(event));
-
- pr_info("Shutting down hard lockup detector on all cpus\n");
-
- return PTR_ERR(event);
-
- /* success path */
-out_save:
- per_cpu(watchdog_ev, cpu) = event;
-out_enable:
- perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
- return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
- if (event) {
- perf_event_disable(event);
- per_cpu(watchdog_ev, cpu) = NULL;
-
- /* should be in cleanup, but blocks oprofile */
- perf_event_release_kernel(event);
- }
- if (cpu == 0) {
- /* watchdog_nmi_enable() expects this to be zero initially. */
- cpu0_err = 0;
- }
-}
-
-#else
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
-static int watchdog_nmi_enable(unsigned int cpu)
-{
- /*
- * The new cpu will be marked online before the first hrtimer interrupt
- * runs on it. If another cpu tests for a hardlockup on the new cpu
- * before it has run its first hrtimer, it will get a false positive.
- * Touch the watchdog on the new cpu to delay the first check for at
- * least 3 sampling periods to guarantee one hrtimer has run on the new
- * cpu.
- */
- per_cpu(watchdog_nmi_touch, cpu) = true;
- smp_wmb();
- cpumask_set_cpu(cpu, &watchdog_cpus);
- return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
- unsigned int next_cpu = watchdog_next_cpu(cpu);
-
- /*
- * Offlining this cpu will cause the cpu before this one to start
- * checking the one after this one. If this cpu just finished checking
- * the next cpu and updating hrtimer_interrupts_saved, and then the
- * previous cpu checks it within one sample period, it will trigger a
- * false positive. Touch the watchdog on the next cpu to prevent it.
- */
- if (next_cpu < nr_cpu_ids)
- per_cpu(watchdog_nmi_touch, next_cpu) = true;
- smp_wmb();
- cpumask_clear_cpu(cpu, &watchdog_cpus);
-}
-#else
-static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
-static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
-#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
-
static struct smp_hotplug_thread watchdog_threads = {
.store = &softlockup_watchdog,
.thread_should_run = watchdog_should_run,
@@ -859,12 +506,16 @@
{
int cpu, ret = 0;
+ atomic_set(&watchdog_park_in_progress, 1);
+
for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret)
break;
}
+ atomic_set(&watchdog_park_in_progress, 0);
+
return ret;
}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
new file mode 100644
index 0000000..12b8dd6
--- /dev/null
+++ b/kernel/watchdog_hld.c
@@ -0,0 +1,230 @@
+/*
+ * Detect hard lockups on a system
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
+ * to those contributors as well.
+ */
+
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+ CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+static unsigned long hardlockup_allcpu_dumped;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+ if (!strncmp(str, "panic", 5))
+ hardlockup_panic = 1;
+ else if (!strncmp(str, "nopanic", 7))
+ hardlockup_panic = 0;
+ else if (!strncmp(str, "0", 1))
+ watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+ else if (!strncmp(str, "1", 1))
+ watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+ return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+void touch_nmi_watchdog(void)
+{
+ /*
+ * Using __raw here because some code paths have
+ * preemption enabled. If preemption is enabled
+ * then interrupts should be enabled too, in which
+ * case we shouldn't have to worry about the watchdog
+ * going off.
+ */
+ raw_cpu_write(watchdog_nmi_touch, true);
+ touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+/* Callback function for perf event subsystem */
+static void watchdog_overflow_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ /* Ensure the watchdog never gets throttled */
+ event->hw.interrupts = 0;
+
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return;
+
+ if (__this_cpu_read(watchdog_nmi_touch) == true) {
+ __this_cpu_write(watchdog_nmi_touch, false);
+ return;
+ }
+
+ /* check for a hardlockup
+ * This is done by making sure our timer interrupt
+ * is incrementing. The timer interrupt should have
+ * fired multiple times before we overflow'd. If it hasn't
+ * then this is a good indication the cpu is stuck
+ */
+ if (is_hardlockup()) {
+ int this_cpu = smp_processor_id();
+
+ /* only print hardlockups once */
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
+
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ print_modules();
+ print_irqtrace_events(current);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+
+ /*
+ * Perform all-CPU dump only once to avoid multiple hardlockups
+ * generating interleaving traces
+ */
+ if (sysctl_hardlockup_all_cpu_backtrace &&
+ !test_and_set_bit(0, &hardlockup_allcpu_dumped))
+ trigger_allbutself_cpu_backtrace();
+
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
+ }
+
+ __this_cpu_write(hard_watchdog_warn, false);
+ return;
+}
+
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
+int watchdog_nmi_enable(unsigned int cpu)
+{
+ struct perf_event_attr *wd_attr;
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ /* nothing to do if the hard lockup detector is disabled */
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ goto out;
+
+ /* is it already setup and enabled? */
+ if (event && event->state > PERF_EVENT_STATE_OFF)
+ goto out;
+
+ /* it is setup but not enabled */
+ if (event != NULL)
+ goto out_enable;
+
+ wd_attr = &wd_hw_attr;
+ wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+
+ /* Try to register using hardware perf events */
+ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+ /* save cpu0 error for future comparision */
+ if (cpu == 0 && IS_ERR(event))
+ cpu0_err = PTR_ERR(event);
+
+ if (!IS_ERR(event)) {
+ /* only print for cpu0 or different than cpu0 */
+ if (cpu == 0 || cpu0_err)
+ pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
+ goto out_save;
+ }
+
+ /*
+ * Disable the hard lockup detector if _any_ CPU fails to set up
+ * set up the hardware perf event. The watchdog() function checks
+ * the NMI_WATCHDOG_ENABLED bit periodically.
+ *
+ * The barriers are for syncing up watchdog_enabled across all the
+ * cpus, as clear_bit() does not use barriers.
+ */
+ smp_mb__before_atomic();
+ clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+ smp_mb__after_atomic();
+
+ /* skip displaying the same error again */
+ if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+ return PTR_ERR(event);
+
+ /* vary the KERN level based on the returned errno */
+ if (PTR_ERR(event) == -EOPNOTSUPP)
+ pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+ else if (PTR_ERR(event) == -ENOENT)
+ pr_warn("disabled (cpu%i): hardware events not enabled\n",
+ cpu);
+ else
+ pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+ cpu, PTR_ERR(event));
+
+ pr_info("Shutting down hard lockup detector on all cpus\n");
+
+ return PTR_ERR(event);
+
+ /* success path */
+out_save:
+ per_cpu(watchdog_ev, cpu) = event;
+out_enable:
+ perf_event_enable(per_cpu(watchdog_ev, cpu));
+out:
+ return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+ struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+ if (event) {
+ perf_event_disable(event);
+ per_cpu(watchdog_ev, cpu) = NULL;
+
+ /* should be in cleanup, but blocks oprofile */
+ perf_event_release_kernel(event);
+ }
+ if (cpu == 0) {
+ /* watchdog_nmi_enable() expects this to be zero initially. */
+ cpu0_err = 0;
+ }
+}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6878aa8..2f9f7aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -775,27 +775,15 @@
The overhead should be minimal. A periodic hrtimer runs to
generate interrupts and kick the watchdog task every 4 seconds.
An NMI is generated every 10 seconds or so to check for hardlockups.
- If NMIs are not available on the platform, every 12 seconds the
- hrtimer interrupt on one cpu will be used to check for hardlockups
- on the next cpu.
The frequency of hrtimer and NMI events and the soft and hard lockup
thresholds can be controlled through the sysctl watchdog_thresh.
-config HARDLOCKUP_DETECTOR_NMI
+config HARDLOCKUP_DETECTOR
def_bool y
depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
-config HARDLOCKUP_DETECTOR_OTHER_CPU
- def_bool y
- depends on LOCKUP_DETECTOR && SMP
- depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
-
-config HARDLOCKUP_DETECTOR
- def_bool y
- depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
-
config BOOTPARAM_HARDLOCKUP_PANIC
bool "Panic (Reboot) On Hard Lockups"
depends on HARDLOCKUP_DETECTOR
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf7..79069d7 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -22,14 +22,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
- for (x = *pint; x < upper_range; x++)
+ for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x;
return inc_counter;
}
@@ -96,7 +96,7 @@
break;
if (res == 3) {
int range_nums;
- range_nums = get_range((char **)&str, ints + i);
+ range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0)
break;
/*
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 81dedaa..4731a08 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,6 +43,38 @@
}
EXPORT_SYMBOL(cpumask_any_but);
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ int next;
+
+again:
+ next = cpumask_next(n, mask);
+
+ if (wrap && n < start && next >= start) {
+ return nr_cpumask_bits;
+
+ } else if (next >= nr_cpumask_bits) {
+ wrap = true;
+ n = -1;
+ goto again;
+ }
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7..9f79547 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
@@ -53,7 +53,9 @@
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
}
EXPORT_SYMBOL(crc32c);
diff --git a/lib/string.c b/lib/string.c
index ed83562..ccabe16 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -952,3 +952,10 @@
return s;
}
EXPORT_SYMBOL(strreplace);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ad1d296..b7812df 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -456,11 +456,11 @@
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
diff --git a/mm/Kconfig b/mm/Kconfig
index eb10c90..3363a70 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -730,3 +730,19 @@
always using ZONE_DMA memory.
If unsure, say "n".
+
+config PROCESS_RECLAIM
+ bool "Enable process reclaim"
+ depends on PROC_FS
+ default n
+ help
+ It allows to reclaim pages of the process by /proc/pid/reclaim.
+
+ (echo file > /proc/PID/reclaim) reclaims file-backed pages only.
+ (echo anon > /proc/PID/reclaim) reclaims anonymous pages only.
+ (echo all > /proc/PID/reclaim) reclaims all pages.
+
+ (echo addr size-byte > /proc/PID/reclaim) reclaims pages in
+ (addr, addr + size-bytes) of the process.
+
+ Any other vaule is ignored.
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index b9019d4..78e68f9 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -65,6 +65,16 @@
If unsure, say N
+config PAGE_POISONING_ENABLE_DEFAULT
+ bool "Enable page poisoning by default?"
+ default n
+ depends on PAGE_POISONING
+ ---help---
+ Enable page poisoning of free pages by default? This value
+ can be overridden by page_poison=off|on. This can be used
+ to avoid passing the kernel parameter and let page poisoning
+ feature enabled by default.
+
config PAGE_POISONING_NO_SANITY
depends on PAGE_POISONING
bool "Only poison, don't sanity check"
diff --git a/mm/Makefile b/mm/Makefile
index a7e9b6a..7a9642f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y) showmem.o
+ debug.o $(mmu-y) showmem.o vmpressure.o
obj-y += init-mm.o
@@ -53,7 +53,7 @@
endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_ratio.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
@@ -76,7 +76,7 @@
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
-obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
+obj-$(CONFIG_MEMCG) += memcontrol.o
obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o
obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
@@ -100,3 +100,4 @@
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+obj-$(CONFIG_PROCESS_RECLAIM) += process_reclaim.o
diff --git a/mm/cma.c b/mm/cma.c
index 0306bab..2984dac 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -35,6 +35,8 @@
#include <linux/cma.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/show_mem_notifier.h>
#include <trace/events/cma.h>
#include "cma.h"
@@ -53,6 +55,11 @@
return cma->count << PAGE_SHIFT;
}
+const char *cma_get_name(const struct cma *cma)
+{
+ return cma->name ? cma->name : "(undefined)";
+}
+
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
int align_order)
{
@@ -94,6 +101,29 @@
mutex_unlock(&cma->lock);
}
+static int cma_showmem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int i;
+ unsigned long used;
+ struct cma *cma;
+
+ for (i = 0; i < cma_area_count; i++) {
+ cma = &cma_areas[i];
+ used = bitmap_weight(cma->bitmap,
+ (int)cma_bitmap_maxno(cma));
+ used <<= cma->order_per_bit;
+ pr_info("cma-%d pages: => %lu used of %lu total pages\n",
+ i, used, cma->count);
+ }
+
+ return 0;
+}
+
+static struct notifier_block cma_nb = {
+ .notifier_call = cma_showmem_notifier,
+};
+
static int __init cma_activate_area(struct cma *cma)
{
int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
@@ -157,6 +187,8 @@
return ret;
}
+ show_mem_notifier_register(&cma_nb);
+
return 0;
}
core_initcall(cma_init_reserved_areas);
@@ -172,6 +204,7 @@
*/
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma)
{
struct cma *cma;
@@ -202,6 +235,13 @@
* subsystems (like slab allocator) are available.
*/
cma = &cma_areas[cma_area_count];
+ if (name) {
+ cma->name = name;
+ } else {
+ cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
+ if (!cma->name)
+ return -ENOMEM;
+ }
cma->base_pfn = PFN_DOWN(base);
cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;
@@ -233,7 +273,7 @@
int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma)
+ bool fixed, const char *name, struct cma **res_cma)
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
@@ -344,7 +384,7 @@
base = addr;
}
- ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
+ ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
if (ret)
goto err;
@@ -357,6 +397,32 @@
return ret;
}
+#ifdef CONFIG_CMA_DEBUG
+static void cma_debug_show_areas(struct cma *cma)
+{
+ unsigned long next_zero_bit, next_set_bit;
+ unsigned long start = 0;
+ unsigned int nr_zero, nr_total = 0;
+
+ mutex_lock(&cma->lock);
+ pr_info("number of available pages: ");
+ for (;;) {
+ next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start);
+ if (next_zero_bit >= cma->count)
+ break;
+ next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit);
+ nr_zero = next_set_bit - next_zero_bit;
+ pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit);
+ nr_total += nr_zero;
+ start = next_zero_bit + nr_zero;
+ }
+ pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
+ mutex_unlock(&cma->lock);
+}
+#else
+static inline void cma_debug_show_areas(struct cma *cma) { }
+#endif
+
/**
* cma_alloc() - allocate pages from contiguous area
* @cma: Contiguous memory region for which the allocation is performed.
@@ -373,7 +439,8 @@
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
- int ret;
+ int retry_after_sleep = 0;
+ int ret = -ENOMEM;
if (!cma || !cma->count)
return NULL;
@@ -400,8 +467,24 @@
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- mutex_unlock(&cma->lock);
- break;
+ if (retry_after_sleep < 2) {
+ start = 0;
+ /*
+ * Page may be momentarily pinned by some other
+ * process which has been scheduled out, eg.
+ * in exit path, during unmap call, or process
+ * fork and so cannot be freed there. Sleep
+ * for 100ms and retry twice to see if it has
+ * been freed later.
+ */
+ mutex_unlock(&cma->lock);
+ msleep(100);
+ retry_after_sleep++;
+ continue;
+ } else {
+ mutex_unlock(&cma->lock);
+ break;
+ }
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
/*
@@ -434,6 +517,12 @@
trace_cma_alloc(pfn, page, count, align);
+ if (ret) {
+ pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
+ __func__, count, ret);
+ cma_debug_show_areas(cma);
+ }
+
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
diff --git a/mm/cma.h b/mm/cma.h
index 17c75a4..4986128 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -11,6 +11,7 @@
struct hlist_head mem_head;
spinlock_t mem_head_lock;
#endif
+ const char *name;
};
extern struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index f8e4b60..3f7f84c 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -167,7 +167,7 @@
char name[16];
int u32s;
- sprintf(name, "cma-%d", idx);
+ scnprintf(name, sizeof(name), "cma-%s", cma->name);
tmp = debugfs_create_dir(name, cma_debugfs_root);
diff --git a/mm/compaction.c b/mm/compaction.c
index 70e6bec..f002a7f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -635,20 +635,52 @@
}
/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool __too_many_isolated(struct zone *zone, int safe)
{
unsigned long active, inactive, isolated;
- inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
+ if (safe) {
+ inactive = node_page_state_snapshot(zone->zone_pgdat,
+ NR_INACTIVE_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_INACTIVE_ANON);
+ active = node_page_state_snapshot(zone->zone_pgdat,
+ NR_ACTIVE_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_ACTIVE_ANON);
+ isolated = node_page_state_snapshot(zone->zone_pgdat,
+ NR_ISOLATED_FILE) +
+ node_page_state_snapshot(zone->zone_pgdat,
+ NR_ISOLATED_ANON);
+ } else {
+ inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
- active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
+ active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
- isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
+ isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
+ }
return isolated > (inactive + active) / 2;
}
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct compact_control *cc)
+{
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(cc->zone, 0))) {
+ if (cc->mode != MIGRATE_ASYNC)
+ return __too_many_isolated(cc->zone, 1);
+ }
+
+ return false;
+}
+
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
@@ -686,7 +718,7 @@
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(zone))) {
+ while (unlikely(too_many_isolated(cc))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index edfb90e..b4c09ec 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -251,10 +251,12 @@
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
- if (PageUptodate(page) && PageMappedToDisk(page))
+ if (PageUptodate(page) && PageMappedToDisk(page)) {
+ count_vm_event(PGPGOUTCLEAN);
cleancache_put_page(page);
- else
+ } else {
cleancache_invalidate_page(mapping, page);
+ }
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(page_mapped(page), page);
diff --git a/mm/gup.c b/mm/gup.c
index ec4f827..c63a034 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -370,11 +370,6 @@
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
- /* For mm_populate(), just skip the stack guard page. */
- if ((*flags & FOLL_POPULATE) &&
- (stack_guard_page_start(vma, address) ||
- stack_guard_page_end(vma, address + PAGE_SIZE)))
- return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d5b2b75..8258e9e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1227,8 +1227,11 @@
*/
if (unlikely(pmd_trans_migrating(*fe->pmd))) {
page = pmd_page(*fe->pmd);
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(fe->ptl);
wait_on_page_locked(page);
+ put_page(page);
goto out;
}
@@ -1260,8 +1263,11 @@
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(fe->ptl);
wait_on_page_locked(page);
+ put_page(page);
page_nid = -1;
goto out;
}
@@ -1367,8 +1373,8 @@
get_page(page);
spin_unlock(ptl);
split_huge_page(page);
- put_page(page);
unlock_page(page);
+ put_page(page);
goto out_unlocked;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 35d2db8..5cbd2de 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -359,10 +360,13 @@
if (likely(!kasan_report_enabled()))
return;
+ disable_trace_on_warning();
+
info.access_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
+ info.first_bad_addr = NULL;
kasan_report_error(&info);
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 5f1855b..927aa34 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1964,6 +1964,7 @@
stable_node = page_stable_node(page);
if (!stable_node)
return ret;
+
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 234676e..7a40fa2 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@
l = list_lru_from_kmem(nlru, item);
list_add_tail(item, &l->list);
l->nr_items++;
+ nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@@ -136,6 +137,7 @@
l = list_lru_from_kmem(nlru, item);
list_del_init(item);
l->nr_items--;
+ nlru->nr_items--;
spin_unlock(&nlru->lock);
return true;
}
@@ -183,15 +185,10 @@
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
- long count = 0;
- int memcg_idx;
+ struct list_lru_node *nlru;
- count += __list_lru_count_one(lru, nid, -1);
- if (list_lru_memcg_aware(lru)) {
- for_each_memcg_cache_index(memcg_idx)
- count += __list_lru_count_one(lru, nid, memcg_idx);
- }
- return count;
+ nlru = &lru->node[nid];
+ return nlru->nr_items;
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
@@ -226,6 +223,7 @@
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
isolated++;
+ nlru->nr_items--;
/*
* If the lru lock has been dropped, our list
* traversal is now invalid and so we have to
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fdc790a..3b38b73 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5996,7 +5996,7 @@
VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (vm_swap_full())
+ if (vm_swap_full(page_swap_info(page)))
return true;
if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
return false;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4bd4480..b335423 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -972,7 +972,7 @@
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
- ret = try_to_unmap(hpage, ttu);
+ ret = try_to_unmap(hpage, ttu, NULL);
if (ret != SWAP_SUCCESS)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));
@@ -1176,7 +1176,10 @@
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
- page_flags = p->flags;
+ if (PageHuge(p))
+ page_flags = hpage->flags;
+ else
+ page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index 91e1653..49d9b42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2699,40 +2699,6 @@
}
/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
-/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2748,10 +2714,6 @@
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, fe->address) < 0)
- return VM_FAULT_SIGSEGV;
-
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/migrate.c b/mm/migrate.c
index 435f674..f0b786d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1020,7 +1020,7 @@
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
@@ -1238,7 +1238,7 @@
if (page_mapped(hpage)) {
try_to_unmap(hpage,
- TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 143d62f..6f90f07 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
bool populate;
@@ -228,7 +229,8 @@
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
@@ -251,10 +253,22 @@
static long vma_compute_subtree_gap(struct vm_area_struct *vma)
{
- unsigned long max, subtree_gap;
- max = vma->vm_start;
- if (vma->vm_prev)
- max -= vma->vm_prev->vm_end;
+ unsigned long max, prev_end, subtree_gap;
+
+ /*
+ * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+ * allow two stack_guard_gaps between them here, and when choosing
+ * an unmapped area; whereas when expanding we only require one.
+ * That's a little inconsistent, but keeps the code here simpler.
+ */
+ max = vm_start_gap(vma);
+ if (vma->vm_prev) {
+ prev_end = vm_end_gap(vma->vm_prev);
+ if (max > prev_end)
+ max -= prev_end;
+ else
+ max = 0;
+ }
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -350,7 +364,7 @@
anon_vma_unlock_read(anon_vma);
}
- highest_address = vma->vm_end;
+ highest_address = vm_end_gap(vma);
vma = vma->vm_next;
i++;
}
@@ -539,7 +553,7 @@
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = vma->vm_end;
+ mm->highest_vm_end = vm_end_gap(vma);
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -854,7 +868,7 @@
vma_gap_update(vma);
if (end_changed) {
if (!next)
- mm->highest_vm_end = end;
+ mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
@@ -939,7 +953,7 @@
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
- VM_WARN_ON(mm->highest_vm_end != end);
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
@@ -1792,7 +1806,7 @@
while (true) {
/* Visit left subtree if it looks promising */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
@@ -1803,12 +1817,13 @@
}
}
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
- if (gap_end >= low_limit && gap_end - gap_start >= length)
+ if (gap_end >= low_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
@@ -1830,8 +1845,8 @@
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
- gap_start = vma->vm_prev->vm_end;
- gap_end = vma->vm_start;
+ gap_start = vm_end_gap(vma->vm_prev);
+ gap_end = vm_start_gap(vma);
goto check_current;
}
}
@@ -1895,7 +1910,7 @@
while (true) {
/* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
@@ -1908,10 +1923,11 @@
check_current:
/* Check if current node has a suitable gap */
- gap_end = vma->vm_start;
+ gap_end = vm_start_gap(vma);
if (gap_end < low_limit)
return -ENOMEM;
- if (gap_start <= high_limit && gap_end - gap_start >= length)
+ if (gap_start <= high_limit &&
+ gap_end > gap_start && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
@@ -1934,7 +1950,7 @@
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ vm_end_gap(vma->vm_prev) : 0;
goto check_current;
}
}
@@ -1972,7 +1988,7 @@
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
@@ -1983,9 +1999,10 @@
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2008,7 +2025,7 @@
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -2023,9 +2040,10 @@
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
@@ -2160,21 +2178,19 @@
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, vma->vm_flags, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
@@ -2212,16 +2228,32 @@
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /* Guard against wrapping around to address 0. */
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
@@ -2266,7 +2298,7 @@
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
- mm->highest_vm_end = address;
+ mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2287,6 +2319,8 @@
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2294,6 +2328,17 @@
if (error)
return error;
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
@@ -2348,28 +2393,25 @@
return error;
}
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *next;
-
- address &= PAGE_MASK;
- next = vma->vm_next;
- if (next && next->vm_start == address + PAGE_SIZE) {
- if (!(next->vm_flags & VM_GROWSUP))
- return -ENOMEM;
- }
return expand_upwards(vma, address);
}
@@ -2391,14 +2433,6 @@
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
- struct vm_area_struct *prev;
-
- address &= PAGE_MASK;
- prev = vma->vm_prev;
- if (prev && prev->vm_end == address) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
- return -ENOMEM;
- }
return expand_downwards(vma, address);
}
@@ -2496,7 +2530,7 @@
vma->vm_prev = prev;
vma_gap_update(vma);
} else
- mm->highest_vm_end = prev ? prev->vm_end : 0;
+ mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
tail_vma->vm_next = NULL;
/* Kill the cache */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ec9f11d..3a22b14 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -37,6 +37,7 @@
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/init.h>
+#include <linux/show_mem_notifier.h>
#include <asm/tlb.h>
#include "internal.h"
@@ -368,7 +369,7 @@
* State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
* swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
@@ -416,8 +417,11 @@
dump_stack();
if (oc->memcg)
mem_cgroup_print_oom_info(oc->memcg, p);
- else
+ else {
show_mem(SHOW_MEM_FILTER_NODES);
+ show_mem_call_notifiers();
+ }
+
if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 27ddaae..44085b2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,6 +64,7 @@
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
+#include <linux/show_mem_notifier.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -233,10 +234,10 @@
"Unmovable",
"Movable",
"Reclaimable",
- "HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
+ "HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
@@ -1706,10 +1707,10 @@
return 1;
}
-static inline bool free_pages_prezeroed(bool poisoned)
+static inline bool free_pages_prezeroed(void)
{
return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
- page_poisoning_enabled() && poisoned;
+ page_poisoning_enabled();
}
#ifdef CONFIG_DEBUG_VM
@@ -1763,17 +1764,10 @@
unsigned int alloc_flags)
{
int i;
- bool poisoned = true;
-
- for (i = 0; i < (1 << order); i++) {
- struct page *p = page + i;
- if (poisoned)
- poisoned &= page_is_poisoned(p);
- }
post_alloc_hook(page, order, gfp_flags);
- if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
+ if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
clear_highpage(page + i);
@@ -1839,6 +1833,11 @@
#endif
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
@@ -2209,17 +2208,30 @@
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
-
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ page = __rmqueue_fallback(zone, order, migratetype);
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ struct page *page = 0;
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+ return page;
+}
+#else
+static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ return NULL;
+}
+#endif
+
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
@@ -2233,7 +2245,17 @@
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- struct page *page = __rmqueue(zone, order, migratetype);
+ struct page *page;
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
+ else
+ page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
break;
@@ -2271,6 +2293,28 @@
return alloced;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2631,22 +2675,33 @@
int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
do {
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no
+ * free CMA pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2674,9 +2729,14 @@
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
if (!page)
page = __rmqueue(zone, order, migratetype);
} while (page && check_new_pages(page, order));
+
spin_unlock(&zone->lock);
if (!page)
goto failed;
@@ -2834,6 +2894,14 @@
return true;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+#ifdef CONFIG_CMA
+ /*
+ * Note that this check is needed only
+ * when MIGRATE_CMA < MIGRATE_PCPTYPES.
+ */
+ if (mt == MIGRATE_CMA)
+ continue;
+#endif
if (!list_empty(&area->free_list[mt]))
return true;
}
@@ -3063,8 +3131,10 @@
pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
dump_stack();
- if (!should_suppress_show_mem())
+ if (!should_suppress_show_mem()) {
show_mem(filter);
+ show_mem_call_notifiers();
+ }
}
static inline struct page *
@@ -7311,6 +7381,7 @@
if (ret)
return ret;
+ cc.zone->cma_alloc = 1;
/*
* In case of -EBUSY, we'd like to know which page causes problem.
* So, just fall through. We will check it in test_pages_isolated().
@@ -7386,6 +7457,7 @@
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ cc.zone->cma_alloc = 0;
return ret;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 121dcff..fc3e7ff 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -59,9 +59,6 @@
static struct page_ext_operations *page_ext_ops[] = {
&debug_guardpage_ops,
-#ifdef CONFIG_PAGE_POISONING
- &page_poisoning_ops,
-#endif
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d2db436..65e24fb 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -285,7 +285,11 @@
continue;
if (PageBuddy(page)) {
- pfn += (1UL << page_order(page)) - 1;
+ unsigned long freepage_order;
+
+ freepage_order = page_order_unsafe(page);
+ if (freepage_order < MAX_ORDER)
+ pfn += (1UL << freepage_order) - 1;
continue;
}
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 0abd75e..a2f6a4e 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,8 +6,8 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
-static bool __page_poisoning_enabled __read_mostly;
-static bool want_page_poisoning __read_mostly;
+static bool want_page_poisoning __read_mostly
+ = IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);
static int early_page_poison_param(char *buf)
{
@@ -19,74 +19,21 @@
bool page_poisoning_enabled(void)
{
- return __page_poisoning_enabled;
-}
-
-static bool need_page_poisoning(void)
-{
- return want_page_poisoning;
-}
-
-static void init_page_poisoning(void)
-{
/*
- * page poisoning is debug page alloc for some arches. If either
- * of those options are enabled, enable poisoning
+ * Assumes that debug_pagealloc_enabled is set before
+ * free_all_bootmem.
+ * Page poisoning is debug page alloc for some arches. If
+ * either of those options are enabled, enable poisoning.
*/
- if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
- if (!want_page_poisoning && !debug_pagealloc_enabled())
- return;
- } else {
- if (!want_page_poisoning)
- return;
- }
-
- __page_poisoning_enabled = true;
-}
-
-struct page_ext_operations page_poisoning_ops = {
- .need = need_page_poisoning,
- .init = init_page_poisoning,
-};
-
-static inline void set_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return;
-
- __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-static inline void clear_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return;
-
- __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-bool page_is_poisoned(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ return (want_page_poisoning ||
+ (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+ debug_pagealloc_enabled()));
}
static void poison_page(struct page *page)
{
void *addr = kmap_atomic(page);
- set_page_poison(page);
memset(addr, PAGE_POISON, PAGE_SIZE);
kunmap_atomic(addr);
}
@@ -144,12 +91,13 @@
{
void *addr;
- if (!page_is_poisoned(page))
- return;
-
addr = kmap_atomic(page);
+ /*
+ * Page poisoning when enabled poisons each and every page
+ * that is freed to buddy. Thus no extra check is done to
+ * see if a page was posioned.
+ */
check_poison_mem(page, addr, PAGE_SIZE);
- clear_page_poison(page);
kunmap_atomic(addr);
}
diff --git a/mm/process_reclaim.c b/mm/process_reclaim.c
new file mode 100644
index 0000000..36516eb
--- /dev/null
+++ b/mm/process_reclaim.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/sort.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+#include <linux/notifier.h>
+#include <linux/vmpressure.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/process_reclaim.h>
+
+#define MAX_SWAP_TASKS SWAP_CLUSTER_MAX
+
+static void swap_fn(struct work_struct *work);
+DECLARE_WORK(swap_work, swap_fn);
+
+/* User knob to enable/disable process reclaim feature */
+static int enable_process_reclaim;
+module_param_named(enable_process_reclaim, enable_process_reclaim, int, 0644);
+
+/* The max number of pages tried to be reclaimed in a single run */
+int per_swap_size = SWAP_CLUSTER_MAX * 32;
+module_param_named(per_swap_size, per_swap_size, int, 0644);
+
+int reclaim_avg_efficiency;
+module_param_named(reclaim_avg_efficiency, reclaim_avg_efficiency, int, 0444);
+
+/* The vmpressure region where process reclaim operates */
+static unsigned long pressure_min = 50;
+static unsigned long pressure_max = 90;
+module_param_named(pressure_min, pressure_min, ulong, 0644);
+module_param_named(pressure_max, pressure_max, ulong, 0644);
+
+static short min_score_adj = 360;
+module_param_named(min_score_adj, min_score_adj, short, 0644);
+
+/*
+ * Scheduling process reclaim workqueue unecessarily
+ * when the reclaim efficiency is low does not make
+ * sense. We try to detect a drop in efficiency and
+ * disable reclaim for a time period. This period and the
+ * period for which we monitor a drop in efficiency is
+ * defined by swap_eff_win. swap_opt_eff is the optimal
+ * efficincy used as theshold for this.
+ */
+static int swap_eff_win = 2;
+module_param_named(swap_eff_win, swap_eff_win, int, 0644);
+
+static int swap_opt_eff = 50;
+module_param_named(swap_opt_eff, swap_opt_eff, int, 0644);
+
+static atomic_t skip_reclaim = ATOMIC_INIT(0);
+/* Not atomic since only a single instance of swap_fn run at a time */
+static int monitor_eff;
+
+struct selected_task {
+ struct task_struct *p;
+ int tasksize;
+ short oom_score_adj;
+};
+
+int selected_cmp(const void *a, const void *b)
+{
+ const struct selected_task *x = a;
+ const struct selected_task *y = b;
+ int ret;
+
+ ret = x->tasksize < y->tasksize ? -1 : 1;
+
+ return ret;
+}
+
+static int test_task_flag(struct task_struct *p, int flag)
+{
+ struct task_struct *t = p;
+
+ rcu_read_lock();
+ for_each_thread(p, t) {
+ task_lock(t);
+ if (test_tsk_thread_flag(t, flag)) {
+ task_unlock(t);
+ rcu_read_unlock();
+ return 1;
+ }
+ task_unlock(t);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void swap_fn(struct work_struct *work)
+{
+ struct task_struct *tsk;
+ struct reclaim_param rp;
+
+ /* Pick the best MAX_SWAP_TASKS tasks in terms of anon size */
+ struct selected_task selected[MAX_SWAP_TASKS] = {{0, 0, 0},};
+ int si = 0;
+ int i;
+ int tasksize;
+ int total_sz = 0;
+ int total_scan = 0;
+ int total_reclaimed = 0;
+ int nr_to_reclaim;
+ int efficiency;
+
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *p;
+ short oom_score_adj;
+
+ if (tsk->flags & PF_KTHREAD)
+ continue;
+
+ if (test_task_flag(tsk, TIF_MEMDIE))
+ continue;
+
+ p = find_lock_task_mm(tsk);
+ if (!p)
+ continue;
+
+ oom_score_adj = p->signal->oom_score_adj;
+ if (oom_score_adj < min_score_adj) {
+ task_unlock(p);
+ continue;
+ }
+
+ tasksize = get_mm_counter(p->mm, MM_ANONPAGES);
+ task_unlock(p);
+
+ if (tasksize <= 0)
+ continue;
+
+ if (si == MAX_SWAP_TASKS) {
+ sort(&selected[0], MAX_SWAP_TASKS,
+ sizeof(struct selected_task),
+ &selected_cmp, NULL);
+ if (tasksize < selected[0].tasksize)
+ continue;
+ selected[0].p = p;
+ selected[0].oom_score_adj = oom_score_adj;
+ selected[0].tasksize = tasksize;
+ } else {
+ selected[si].p = p;
+ selected[si].oom_score_adj = oom_score_adj;
+ selected[si].tasksize = tasksize;
+ si++;
+ }
+ }
+
+ for (i = 0; i < si; i++)
+ total_sz += selected[i].tasksize;
+
+ /* Skip reclaim if total size is too less */
+ if (total_sz < SWAP_CLUSTER_MAX) {
+ rcu_read_unlock();
+ return;
+ }
+
+ for (i = 0; i < si; i++)
+ get_task_struct(selected[i].p);
+
+ rcu_read_unlock();
+
+ while (si--) {
+ nr_to_reclaim =
+ (selected[si].tasksize * per_swap_size) / total_sz;
+ /* scan atleast a page */
+ if (!nr_to_reclaim)
+ nr_to_reclaim = 1;
+
+ rp = reclaim_task_anon(selected[si].p, nr_to_reclaim);
+
+ trace_process_reclaim(selected[si].tasksize,
+ selected[si].oom_score_adj, rp.nr_scanned,
+ rp.nr_reclaimed, per_swap_size, total_sz,
+ nr_to_reclaim);
+ total_scan += rp.nr_scanned;
+ total_reclaimed += rp.nr_reclaimed;
+ put_task_struct(selected[si].p);
+ }
+
+ if (total_scan) {
+ efficiency = (total_reclaimed * 100) / total_scan;
+
+ if (efficiency < swap_opt_eff) {
+ if (++monitor_eff == swap_eff_win) {
+ atomic_set(&skip_reclaim, swap_eff_win);
+ monitor_eff = 0;
+ }
+ } else {
+ monitor_eff = 0;
+ }
+
+ reclaim_avg_efficiency =
+ (efficiency + reclaim_avg_efficiency) / 2;
+ trace_process_reclaim_eff(efficiency, reclaim_avg_efficiency);
+ }
+}
+
+static int vmpressure_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned long pressure = action;
+
+ if (!enable_process_reclaim)
+ return 0;
+
+ if (!current_is_kswapd())
+ return 0;
+
+ if (atomic_dec_if_positive(&skip_reclaim) >= 0)
+ return 0;
+
+ if ((pressure >= pressure_min) && (pressure < pressure_max))
+ if (!work_pending(&swap_work))
+ queue_work(system_unbound_wq, &swap_work);
+ return 0;
+}
+
+static struct notifier_block vmpr_nb = {
+ .notifier_call = vmpressure_notifier,
+};
+
+static int __init process_reclaim_init(void)
+{
+ vmpressure_notifier_register(&vmpr_nb);
+ return 0;
+}
+
+static void __exit process_reclaim_exit(void)
+{
+ vmpressure_notifier_unregister(&vmpr_nb);
+}
+
+module_init(process_reclaim_init);
+module_exit(process_reclaim_exit);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd37c1c..dfb19f0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1616,9 +1616,12 @@
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
+ * @vma : target vma for reclaim
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
+ * If @vma is not NULL, this function try to remove @page from only @vma
+ * without peeking all mapped vma for @page.
* Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
@@ -1626,7 +1629,8 @@
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
-int try_to_unmap(struct page *page, enum ttu_flags flags)
+int try_to_unmap(struct page *page, enum ttu_flags flags,
+ struct vm_area_struct *vma)
{
int ret;
struct rmap_private rp = {
@@ -1639,6 +1643,7 @@
.arg = &rp,
.done = page_mapcount_is_zero,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = vma,
};
/*
@@ -1698,6 +1703,7 @@
.arg = &rp,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = NULL,
};
@@ -1760,6 +1766,11 @@
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
+ if (rwc->target_vma) {
+ unsigned long address = vma_address(page, rwc->target_vma);
+ return rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ }
+
if (locked) {
anon_vma = page_anon_vma(page);
/* anon_vma disappear under us? */
@@ -1767,6 +1778,7 @@
} else {
anon_vma = rmap_walk_anon_lock(page, rwc);
}
+
if (!anon_vma)
return ret;
@@ -1811,6 +1823,7 @@
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff;
struct vm_area_struct *vma;
+ unsigned long address;
int ret = SWAP_AGAIN;
/*
@@ -1827,6 +1840,13 @@
pgoff = page_to_pgoff(page);
if (!locked)
i_mmap_lock_read(mapping);
+
+ if (rwc->target_vma) {
+ address = vma_address(page, rwc->target_vma);
+ ret = rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ goto done;
+ }
+
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
diff --git a/mm/shmem.c b/mm/shmem.c
index e9c2b6e..142887f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -412,6 +412,7 @@
struct shrink_control *sc, unsigned long nr_to_split)
{
LIST_HEAD(list), *pos, *next;
+ LIST_HEAD(to_remove);
struct inode *inode;
struct shmem_inode_info *info;
struct page *page;
@@ -438,9 +439,8 @@
/* Check if there's anything to gain */
if (round_up(inode->i_size, PAGE_SIZE) ==
round_up(inode->i_size, HPAGE_PMD_SIZE)) {
- list_del_init(&info->shrinklist);
+ list_move(&info->shrinklist, &to_remove);
removed++;
- iput(inode);
goto next;
}
@@ -451,6 +451,13 @@
}
spin_unlock(&sbinfo->shrinklist_lock);
+ list_for_each_safe(pos, next, &to_remove) {
+ info = list_entry(pos, struct shmem_inode_info, shrinklist);
+ inode = &info->vfs_inode;
+ list_del_init(&info->shrinklist);
+ iput(inode);
+ }
+
list_for_each_safe(pos, next, &list) {
int ret;
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b..3405b4e 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
+
+ if (!(idx % SWAP_CLUSTER_MAX))
+ cond_resched();
}
return 0;
not_enough_page:
@@ -201,6 +204,8 @@
struct page *page = map[i];
if (page)
__free_page(page);
+ if (!(i % SWAP_CLUSTER_MAX))
+ cond_resched();
}
vfree(map);
}
diff --git a/mm/swap_ratio.c b/mm/swap_ratio.c
new file mode 100644
index 0000000..4ca5783
--- /dev/null
+++ b/mm/swap_ratio.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm_types.h>
+#include <linux/swapfile.h>
+#include <linux/swap.h>
+
+#define SWAP_RATIO_GROUP_START (SWAP_FLAG_PRIO_MASK - 9) /* 32758 */
+#define SWAP_RATIO_GROUP_END (SWAP_FLAG_PRIO_MASK) /* 32767 */
+#define SWAP_FAST_WRITES (SWAPFILE_CLUSTER * (SWAP_CLUSTER_MAX / 8))
+#define SWAP_SLOW_WRITES SWAPFILE_CLUSTER
+
+/*
+ * The fast/slow swap write ratio.
+ * 100 indicates that all writes should
+ * go to fast swap device.
+ */
+int sysctl_swap_ratio = 100;
+
+/* Enable the swap ratio feature */
+int sysctl_swap_ratio_enable;
+
+static bool is_same_group(struct swap_info_struct *a,
+ struct swap_info_struct *b)
+{
+ if (!sysctl_swap_ratio_enable)
+ return false;
+
+ if (!is_swap_ratio_group(a->prio))
+ return false;
+
+ if (a->prio == b->prio)
+ return true;
+
+ return false;
+}
+
+/* Caller must hold swap_avail_lock */
+static int calculate_write_pending(struct swap_info_struct *si,
+ struct swap_info_struct *n)
+{
+ int ratio = sysctl_swap_ratio;
+
+ if ((ratio < 0) || (ratio > 100))
+ return -EINVAL;
+
+ if (WARN_ON(!(si->flags & SWP_FAST)))
+ return -ENODEV;
+
+ if ((n->flags & SWP_FAST) || !is_same_group(si, n))
+ return -ENODEV;
+
+ si->max_writes = ratio ? SWAP_FAST_WRITES : 0;
+ n->max_writes = ratio ? (SWAP_FAST_WRITES * 100) /
+ ratio - SWAP_FAST_WRITES : SWAP_SLOW_WRITES;
+
+ si->write_pending = si->max_writes;
+ n->write_pending = n->max_writes;
+
+ return 0;
+}
+
+static int swap_ratio_slow(struct swap_info_struct **si)
+{
+ struct swap_info_struct *n = NULL;
+ int ret = 0;
+
+ spin_lock(&(*si)->lock);
+ spin_lock(&swap_avail_lock);
+ if (&(*si)->avail_list == plist_last(&swap_avail_head)) {
+ /* just to make skip work */
+ n = *si;
+ ret = -ENODEV;
+ goto skip;
+ }
+ n = plist_next_entry(&(*si)->avail_list,
+ struct swap_info_struct,
+ avail_list);
+ if (n == *si) {
+ /* No other swap device */
+ ret = -ENODEV;
+ goto skip;
+ }
+
+ spin_unlock(&swap_avail_lock);
+ spin_lock(&n->lock);
+ spin_lock(&swap_avail_lock);
+
+ if ((*si)->flags & SWP_FAST) {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ goto exit;
+ } else {
+ if ((n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Requeue fast device, since there are pending
+ * writes for slow device.
+ */
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if (calculate_write_pending(*si, n) < 0) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ /* Restart from fast device */
+ (*si)->write_pending--;
+ }
+ }
+ } else {
+ if (!(n->flags & SWP_FAST) || !is_same_group(*si, n)) {
+ /* Should never happen */
+ ret = -ENODEV;
+ } else if (n->write_pending) {
+ /*
+ * Pending writes for fast device.
+ * We reach here when slow device is swapped on first,
+ * before fast device.
+ */
+ /* requeue slow device to the end */
+ plist_requeue(&(*si)->avail_list, &swap_avail_head);
+ n->write_pending--;
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ } else {
+ if ((*si)->write_pending) {
+ (*si)->write_pending--;
+ } else {
+ if (calculate_write_pending(n, *si) < 0) {
+ ret = -ENODEV;
+ goto exit;
+ }
+ n->write_pending--;
+ plist_requeue(&(*si)->avail_list,
+ &swap_avail_head);
+ spin_unlock(&(*si)->lock);
+ *si = n;
+ goto skip;
+ }
+ }
+ }
+exit:
+ spin_unlock(&(*si)->lock);
+skip:
+ spin_unlock(&swap_avail_lock);
+ /* n and si would have got interchanged */
+ spin_unlock(&n->lock);
+ return ret;
+}
+
+bool is_swap_ratio_group(int prio)
+{
+ return ((prio >= SWAP_RATIO_GROUP_START) &&
+ (prio <= SWAP_RATIO_GROUP_END)) ? true : false;
+}
+
+void setup_swap_ratio(struct swap_info_struct *p, int prio)
+{
+ /* Used only if sysctl_swap_ratio_enable is set */
+ if (is_swap_ratio_group(prio)) {
+ if (p->flags & SWP_FAST)
+ p->write_pending = SWAP_FAST_WRITES;
+ else
+ p->write_pending = SWAP_SLOW_WRITES;
+ p->max_writes = p->write_pending;
+ }
+}
+
+int swap_ratio(struct swap_info_struct **si)
+{
+ if (!sysctl_swap_ratio_enable)
+ return -ENODEV;
+
+ if (is_swap_ratio_group((*si)->prio))
+ return swap_ratio_slow(si);
+ else
+ return -ENODEV;
+}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 35d7e0e..5ac5846 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -478,7 +478,7 @@
unsigned long mask;
struct blk_plug plug;
- mask = swapin_nr_pages(offset) - 1;
+ mask = is_swap_fast(entry) ? 0 : swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d76b2a1..9cf2595 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -81,8 +81,8 @@
* is held and the locking order requires swap_lock to be taken
* before any swap_info_struct->lock.
*/
-static PLIST_HEAD(swap_avail_head);
-static DEFINE_SPINLOCK(swap_avail_lock);
+PLIST_HEAD(swap_avail_head);
+DEFINE_SPINLOCK(swap_avail_lock);
struct swap_info_struct *swap_info[MAX_SWAPFILES];
@@ -97,6 +97,26 @@
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
+bool is_swap_fast(swp_entry_t entry)
+{
+ struct swap_info_struct *p;
+ unsigned long type;
+
+ if (non_swap_entry(entry))
+ return false;
+
+ type = swp_type(entry);
+ if (type >= nr_swapfiles)
+ return false;
+
+ p = swap_info[type];
+
+ if (p->flags & SWP_FAST)
+ return true;
+
+ return false;
+}
+
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
@@ -196,7 +216,6 @@
}
}
-#define SWAPFILE_CLUSTER 256
#define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info,
@@ -573,7 +592,7 @@
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&si->lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -613,7 +632,8 @@
spin_lock(&si->lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
@@ -628,7 +648,8 @@
spin_lock(&si->lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&si->lock);
goto checks;
}
@@ -649,18 +670,39 @@
{
struct swap_info_struct *si, *next;
pgoff_t offset;
+ int swap_ratio_off = 0;
if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
atomic_long_dec(&nr_swap_pages);
+lock_and_start:
spin_lock(&swap_avail_lock);
start_over:
plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) {
+
+ if (sysctl_swap_ratio && !swap_ratio_off) {
+ int ret;
+
+ spin_unlock(&swap_avail_lock);
+ ret = swap_ratio(&si);
+ if (ret < 0) {
+ /*
+ * Error. Start again with swap
+ * ratio disabled.
+ */
+ swap_ratio_off = 1;
+ goto lock_and_start;
+ } else {
+ goto start;
+ }
+ }
+
/* requeue si to after same-priority siblings */
plist_requeue(&si->avail_list, &swap_avail_head);
spin_unlock(&swap_avail_lock);
+start:
spin_lock(&si->lock);
if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
spin_lock(&swap_avail_lock);
@@ -2539,11 +2581,16 @@
}
}
+ if (p->bdev && blk_queue_fast(bdev_get_queue(p->bdev)))
+ p->flags |= SWP_FAST;
+
mutex_lock(&swapon_mutex);
prio = -1;
- if (swap_flags & SWAP_FLAG_PREFER)
+ if (swap_flags & SWAP_FLAG_PREFER) {
prio =
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
+ setup_swap_ratio(p, prio);
+ }
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f2481cb..ed89128 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -244,11 +244,21 @@
*/
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
+ /*
+ * Don't dereference bad PUD or PMD (below) entries. This will also
+ * identify huge mappings, which we may encounter on architectures
+ * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
+ * identified as vmalloc addresses by is_vmalloc_addr(), but are
+ * not [unambiguously] associated with a struct page, so there is
+ * no correct value to return for them.
+ */
if (!pgd_none(*pgd)) {
pud_t *pud = pud_offset(pgd, addr);
- if (!pud_none(*pud)) {
+ WARN_ON_ONCE(pud_bad(*pud));
+ if (!pud_none(*pud) && !pud_bad(*pud)) {
pmd_t *pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd)) {
+ WARN_ON_ONCE(pmd_bad(*pmd));
+ if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
pte_t *ptep, pte;
ptep = pte_offset_map(pmd, addr);
@@ -291,6 +301,57 @@
static unsigned long vmap_area_pcpu_hole;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+#define POSSIBLE_VMALLOC_START PAGE_OFFSET
+
+#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \
+ PAGE_SHIFT)
+#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT)
+#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE)
+
+unsigned long total_vmalloc_size;
+unsigned long vmalloc_reserved;
+
+DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
+
+void mark_vmalloc_reserved_area(void *x, unsigned long size)
+{
+ unsigned long addr = (unsigned long)x;
+
+ bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
+ vmalloc_reserved += size;
+}
+
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
+ return 0;
+
+ if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
+ return 0;
+
+ return 1;
+}
+
+static void calc_total_vmalloc_size(void)
+{
+ total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
+ vmalloc_reserved;
+}
+#else
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
+static void calc_total_vmalloc_size(void) { }
+#endif
+EXPORT_SYMBOL(is_vmalloc_addr);
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -365,7 +426,7 @@
BUG_ON(offset_in_page(size));
BUG_ON(!is_power_of_2(align));
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+ might_sleep();
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);
@@ -601,6 +662,13 @@
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+/*
+ * Serialize vmap purging. There is no actual criticial section protected
+ * by this look, but we want to avoid concurrent calls for performance
+ * reasons and to make the pcpu_get_vm_areas more deterministic.
+ */
+static DEFINE_MUTEX(vmap_purge_lock);
+
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
@@ -615,59 +683,40 @@
/*
* Purges all lazily-freed vmap areas.
- *
- * If sync is 0 then don't purge if there is already a purge in progress.
- * If force_flush is 1, then flush kernel TLBs between *start and *end even
- * if we found no lazy vmap areas to unmap (callers can use this to optimise
- * their own TLB flushing).
- * Returns with *start = min(*start, lowest purged address)
- * *end = max(*end, highest purged address)
*/
-static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
- int sync, int force_flush)
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
- static DEFINE_SPINLOCK(purge_lock);
struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
- int nr = 0;
+ bool do_free = false;
- /*
- * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
- * should not expect such behaviour. This just simplifies locking for
- * the case that isn't actually used at the moment anyway.
- */
- if (!sync && !force_flush) {
- if (!spin_trylock(&purge_lock))
- return;
- } else
- spin_lock(&purge_lock);
-
- if (sync)
- purge_fragmented_blocks_allcpus();
+ lockdep_assert_held(&vmap_purge_lock);
valist = llist_del_all(&vmap_purge_list);
llist_for_each_entry(va, valist, purge_list) {
- if (va->va_start < *start)
- *start = va->va_start;
- if (va->va_end > *end)
- *end = va->va_end;
- nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+ if (va->va_start < start)
+ start = va->va_start;
+ if (va->va_end > end)
+ end = va->va_end;
+ do_free = true;
}
- if (nr)
+ if (!do_free)
+ return false;
+
+ flush_tlb_kernel_range(start, end);
+
+ spin_lock(&vmap_area_lock);
+ llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+ int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
+ __free_vmap_area(va);
atomic_sub(nr, &vmap_lazy_nr);
-
- if (nr || force_flush)
- flush_tlb_kernel_range(*start, *end);
-
- if (nr) {
- spin_lock(&vmap_area_lock);
- llist_for_each_entry_safe(va, n_va, valist, purge_list)
- __free_vmap_area(va);
- spin_unlock(&vmap_area_lock);
+ cond_resched_lock(&vmap_area_lock);
}
- spin_unlock(&purge_lock);
+ spin_unlock(&vmap_area_lock);
+ return true;
}
/*
@@ -676,9 +725,10 @@
*/
static void try_purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 0, 0);
+ if (mutex_trylock(&vmap_purge_lock)) {
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
+ }
}
/*
@@ -686,9 +736,10 @@
*/
static void purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 1, 0);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
}
/*
@@ -711,22 +762,13 @@
}
/*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
- */
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
-{
- unmap_vmap_area(va);
- free_vmap_area_noflush(va);
-}
-
-/*
* Free and unmap a vmap area
*/
static void free_unmap_vmap_area(struct vmap_area *va)
{
flush_cache_vunmap(va->va_start, va->va_end);
- free_unmap_vmap_area_noflush(va);
+ unmap_vmap_area(va);
+ free_vmap_area_noflush(va);
}
static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -740,16 +782,6 @@
return va;
}
-static void free_unmap_vmap_area_addr(unsigned long addr)
-{
- struct vmap_area *va;
-
- va = find_vmap_area(addr);
- BUG_ON(!va);
- free_unmap_vmap_area(va);
-}
-
-
/*** Per cpu kva allocator ***/
/*
@@ -1070,6 +1102,8 @@
if (unlikely(!vmap_initialized))
return;
+ might_sleep();
+
for_each_possible_cpu(cpu) {
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
struct vmap_block *vb;
@@ -1094,7 +1128,11 @@
rcu_read_unlock();
}
- __purge_vmap_area_lazy(&start, &end, 1, flush);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ if (!__purge_vmap_area_lazy(start, end) && flush)
+ flush_tlb_kernel_range(start, end);
+ mutex_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
@@ -1107,7 +1145,9 @@
{
unsigned long size = (unsigned long)count << PAGE_SHIFT;
unsigned long addr = (unsigned long)mem;
+ struct vmap_area *va;
+ might_sleep();
BUG_ON(!addr);
BUG_ON(addr < VMALLOC_START);
BUG_ON(addr > VMALLOC_END);
@@ -1116,10 +1156,14 @@
debug_check_no_locks_freed(mem, size);
vmap_debug_free_range(addr, addr+size);
- if (likely(count <= VMAP_MAX_ALLOC))
+ if (likely(count <= VMAP_MAX_ALLOC)) {
vb_free(mem, size);
- else
- free_unmap_vmap_area_addr(addr);
+ return;
+ }
+
+ va = find_vmap_area(addr);
+ BUG_ON(!va);
+ free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
@@ -1168,6 +1212,33 @@
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
+
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ if (tmp->addr < vm->addr + vm->size)
+ return 1;
+ } else {
+ if (tmp->addr + tmp->size > vm->addr)
+ return 1;
+ }
+ }
+ return 0;
+}
+
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
@@ -1248,7 +1319,7 @@
}
vmap_area_pcpu_hole = VMALLOC_END;
-
+ calc_total_vmalloc_size();
vmap_initialized = true;
}
@@ -1412,16 +1483,27 @@
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
+#endif
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
}
/**
@@ -1455,6 +1537,8 @@
{
struct vmap_area *va;
+ might_sleep();
+
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->vm;
@@ -1510,7 +1594,39 @@
kfree(area);
return;
}
-
+
+static inline void __vfree_deferred(const void *addr)
+{
+ /*
+ * Use raw_cpu_ptr() because this can be called from preemptible
+ * context. Preemption is absolutely fine here, because the llist_add()
+ * implementation is lockless, so it works even if we are adding to
+ * nother cpu's list. schedule_work() should be fine with this too.
+ */
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
+}
+
+/**
+ * vfree_atomic - release memory allocated by vmalloc()
+ * @addr: memory base address
+ *
+ * This one is just like vfree() but can be called in any atomic context
+ * except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+ BUG_ON(in_nmi());
+
+ kmemleak_free(addr);
+
+ if (!addr)
+ return;
+ __vfree_deferred(addr);
+}
+
/**
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
@@ -1533,11 +1649,9 @@
if (!addr)
return;
- if (unlikely(in_interrupt())) {
- struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
- if (llist_add((struct llist_node *)addr, &p->list))
- schedule_work(&p->wq);
- } else
+ if (unlikely(in_interrupt()))
+ __vfree_deferred(addr);
+ else
__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
@@ -2673,6 +2787,9 @@
if (is_vmalloc_addr(v->pages))
seq_puts(m, " vpages");
+ if (v->flags & VM_LOWMEM)
+ seq_puts(m, " lowmem");
+
show_numa_info(m, v);
seq_putc(m, '\n');
return 0;
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581..1306f32 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -22,6 +22,9 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/printk.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/vmpressure.h>
/*
@@ -38,7 +41,7 @@
* TODO: Make the window size depend on machine size, as we do for vmstat
* thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
*/
-static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
+static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
/*
* These thresholds are used when we account memory pressure through
@@ -49,6 +52,33 @@
static const unsigned int vmpressure_level_med = 60;
static const unsigned int vmpressure_level_critical = 95;
+static unsigned long vmpressure_scale_max = 100;
+module_param_named(vmpressure_scale_max, vmpressure_scale_max,
+ ulong, 0644);
+
+/* vmpressure values >= this will be scaled based on allocstalls */
+static unsigned long allocstall_threshold = 70;
+module_param_named(allocstall_threshold, allocstall_threshold,
+ ulong, 0644);
+
+static struct vmpressure global_vmpressure;
+static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier);
+
+int vmpressure_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vmpressure_notifier, nb);
+}
+
+int vmpressure_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&vmpressure_notifier, nb);
+}
+
+static void vmpressure_notify(unsigned long pressure)
+{
+ blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL);
+}
+
/*
* When there are too little pages left to scan, vmpressure() may miss the
* critical pressure as number of pages will be less than "window size".
@@ -75,6 +105,7 @@
return container_of(work, struct vmpressure, work);
}
+#ifdef CONFIG_MEMCG
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
{
struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
@@ -85,6 +116,12 @@
return NULL;
return memcg_to_vmpressure(memcg);
}
+#else
+static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
+{
+ return NULL;
+}
+#endif
enum vmpressure_levels {
VMPRESSURE_LOW = 0,
@@ -108,7 +145,7 @@
return VMPRESSURE_LOW;
}
-static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
+static unsigned long vmpressure_calc_pressure(unsigned long scanned,
unsigned long reclaimed)
{
unsigned long scale = scanned + reclaimed;
@@ -135,7 +172,20 @@
pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
scanned, reclaimed);
- return vmpressure_level(pressure);
+ return pressure;
+}
+
+static unsigned long vmpressure_account_stall(unsigned long pressure,
+ unsigned long stall, unsigned long scanned)
+{
+ unsigned long scale;
+
+ if (pressure < allocstall_threshold)
+ return pressure;
+
+ scale = ((vmpressure_scale_max - pressure) * stall) / scanned;
+
+ return pressure + scale;
}
struct vmpressure_event {
@@ -169,6 +219,7 @@
struct vmpressure *vmpr = work_to_vmpressure(work);
unsigned long scanned;
unsigned long reclaimed;
+ unsigned long pressure;
enum vmpressure_levels level;
spin_lock(&vmpr->sr_lock);
@@ -191,7 +242,8 @@
vmpr->tree_reclaimed = 0;
spin_unlock(&vmpr->sr_lock);
- level = vmpressure_calc_level(scanned, reclaimed);
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ level = vmpressure_level(pressure);
do {
if (vmpressure_event(vmpr, level))
@@ -203,28 +255,8 @@
} while ((vmpr = vmpressure_parent(vmpr)));
}
-/**
- * vmpressure() - Account memory pressure through scanned/reclaimed ratio
- * @gfp: reclaimer's gfp mask
- * @memcg: cgroup memory controller handle
- * @tree: legacy subtree mode
- * @scanned: number of pages scanned
- * @reclaimed: number of pages reclaimed
- *
- * This function should be called from the vmscan reclaim path to account
- * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
- * pressure index is then further refined and averaged over time.
- *
- * If @tree is set, vmpressure is in traditional userspace reporting
- * mode: @memcg is considered the pressure root and userspace is
- * notified of the entire subtree's reclaim efficiency.
- *
- * If @tree is not set, reclaim efficiency is recorded for @memcg, and
- * only in-kernel users are notified.
- *
- * This function does not return any value.
- */
-void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+#ifdef CONFIG_MEMCG
+static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
unsigned long scanned, unsigned long reclaimed)
{
struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
@@ -265,6 +297,7 @@
schedule_work(&vmpr->work);
} else {
enum vmpressure_levels level;
+ unsigned long pressure;
/* For now, no users for root-level efficiency */
if (!memcg || memcg == root_mem_cgroup)
@@ -280,7 +313,8 @@
vmpr->scanned = vmpr->reclaimed = 0;
spin_unlock(&vmpr->sr_lock);
- level = vmpressure_calc_level(scanned, reclaimed);
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ level = vmpressure_level(pressure);
if (level > VMPRESSURE_LOW) {
/*
@@ -295,6 +329,106 @@
}
}
}
+#else
+static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ unsigned long scanned, unsigned long reclaimed) { }
+#endif
+
+static void calculate_vmpressure_win(void)
+{
+ long x;
+
+ x = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ total_swapcache_pages() +
+ global_page_state(NR_FREE_PAGES);
+ if (x < 1)
+ x = 1;
+ /*
+ * For low (free + cached), vmpressure window should be
+ * small, and high for higher values of (free + cached).
+ * But it should not be linear as well. This ensures
+ * timely vmpressure notifications when system is under
+ * memory pressure, and optimal number of events when
+ * cached is high. The sqaure root function is empirically
+ * found to serve the purpose.
+ */
+ x = int_sqrt(x);
+ vmpressure_win = x;
+}
+
+static void vmpressure_global(gfp_t gfp, unsigned long scanned,
+ unsigned long reclaimed)
+{
+ struct vmpressure *vmpr = &global_vmpressure;
+ unsigned long pressure;
+ unsigned long stall;
+
+ if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
+ return;
+
+ if (!scanned)
+ return;
+
+ spin_lock(&vmpr->sr_lock);
+ if (!vmpr->scanned)
+ calculate_vmpressure_win();
+
+ vmpr->scanned += scanned;
+ vmpr->reclaimed += reclaimed;
+
+ if (!current_is_kswapd())
+ vmpr->stall += scanned;
+
+ stall = vmpr->stall;
+ scanned = vmpr->scanned;
+ reclaimed = vmpr->reclaimed;
+ spin_unlock(&vmpr->sr_lock);
+
+ if (scanned < vmpressure_win)
+ return;
+
+ spin_lock(&vmpr->sr_lock);
+ vmpr->scanned = 0;
+ vmpr->reclaimed = 0;
+ vmpr->stall = 0;
+ spin_unlock(&vmpr->sr_lock);
+
+ pressure = vmpressure_calc_pressure(scanned, reclaimed);
+ pressure = vmpressure_account_stall(pressure, stall, scanned);
+ vmpressure_notify(pressure);
+}
+
+/**
+ * vmpressure() - Account memory pressure through scanned/reclaimed ratio
+ * @gfp: reclaimer's gfp mask
+ * @memcg: cgroup memory controller handle
+ * @tree: legacy subtree mode
+ * @scanned: number of pages scanned
+ * @reclaimed: number of pages reclaimed
+ *
+ * This function should be called from the vmscan reclaim path to account
+ * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
+ * pressure index is then further refined and averaged over time.
+ *
+ * If @tree is set, vmpressure is in traditional userspace reporting
+ * mode: @memcg is considered the pressure root and userspace is
+ * notified of the entire subtree's reclaim efficiency.
+ *
+ * If @tree is not set, reclaim efficiency is recorded for @memcg, and
+ * only in-kernel users are notified.
+ *
+ * This function does not return any value.
+ */
+void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
+ unsigned long scanned, unsigned long reclaimed)
+{
+ if (!memcg)
+ vmpressure_global(gfp, scanned, reclaimed);
+
+ if (IS_ENABLED(CONFIG_MEMCG))
+ vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed);
+}
/**
* vmpressure_prio() - Account memory pressure through reclaimer priority level
@@ -427,3 +561,10 @@
*/
flush_work(&vmpr->work);
}
+
+static int vmpressure_global_init(void)
+{
+ vmpressure_init(&global_vmpressure);
+ return 0;
+}
+late_initcall(vmpressure_global_init);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b4d398b..7b5848cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -108,6 +108,13 @@
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+
+ /*
+ * Reclaim pages from a vma. If the page is shared by other tasks
+ * it is zapped from a vma without reclaim so it ends up remaining
+ * on memory until last task zap it.
+ */
+ struct vm_area_struct *target_vma;
};
#ifdef ARCH_HAS_PREFETCH
@@ -963,7 +970,7 @@
struct address_space *mapping;
struct page *page;
int may_enter_fs;
- enum page_references references = PAGEREF_RECLAIM_CLEAN;
+ enum page_references references = PAGEREF_RECLAIM;
bool dirty, writeback;
bool lazyfree = false;
int ret = SWAP_SUCCESS;
@@ -977,6 +984,8 @@
goto keep;
VM_BUG_ON_PAGE(PageActive(page), page);
+ if (pgdat)
+ VM_BUG_ON_PAGE(page_pgdat(page) != pgdat, page);
sc->nr_scanned++;
@@ -1055,7 +1064,7 @@
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
- test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
+ (pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
nr_immediate++;
goto keep_locked;
@@ -1129,7 +1138,8 @@
if (page_mapped(page) && mapping) {
switch (ret = try_to_unmap(page, lazyfree ?
(ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
- (ttu_flags | TTU_BATCH_FLUSH))) {
+ (ttu_flags | TTU_BATCH_FLUSH),
+ sc->target_vma)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
@@ -1151,7 +1161,8 @@
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+ (pgdat &&
+ !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -1267,6 +1278,13 @@
* appear not as the counts should be low
*/
list_add(&page->lru, &free_pages);
+ /*
+ * If pagelist are from multiple zones, we should decrease
+ * NR_ISOLATED_ANON + x on freed pages in here.
+ */
+ if (!pgdat)
+ dec_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
continue;
cull_mlocked:
@@ -1312,6 +1330,8 @@
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
.may_unmap = 1,
+ /* Doesn't allow to write out dirty page */
+ .may_writepage = 0,
};
unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
struct page *page, *next;
@@ -1333,6 +1353,42 @@
return ret;
}
+#ifdef CONFIG_PROCESS_RECLAIM
+unsigned long reclaim_pages_from_list(struct list_head *page_list,
+ struct vm_area_struct *vma)
+{
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ .target_vma = vma,
+ };
+
+ unsigned long nr_reclaimed;
+ struct page *page;
+ unsigned long dummy1, dummy2, dummy3, dummy4, dummy5;
+
+ list_for_each_entry(page, page_list, lru)
+ ClearPageActive(page);
+
+ nr_reclaimed = shrink_page_list(page_list, NULL, &sc,
+ TTU_UNMAP|TTU_IGNORE_ACCESS,
+ &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
+
+ while (!list_empty(page_list)) {
+ page = lru_to_page(page_list);
+ list_del(&page->lru);
+ dec_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ putback_lru_page(page);
+ }
+
+ return nr_reclaimed;
+}
+#endif
+
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being
@@ -1585,30 +1641,31 @@
return ret;
}
-/*
- * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
- * then get resheduled. When there are massive number of tasks doing page
- * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
- * the LRU list will go small and be scanned faster than necessary, leading to
- * unnecessary swapping, thrashing and OOM.
- */
-static int too_many_isolated(struct pglist_data *pgdat, int file,
- struct scan_control *sc)
+static int __too_many_isolated(struct pglist_data *pgdat, int file,
+ struct scan_control *sc, int safe)
{
unsigned long inactive, isolated;
- if (current_is_kswapd())
- return 0;
-
- if (!sane_reclaim(sc))
- return 0;
-
if (file) {
- inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
- isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
+ if (safe) {
+ inactive = node_page_state_snapshot(pgdat,
+ NR_INACTIVE_FILE);
+ isolated = node_page_state_snapshot(pgdat,
+ NR_ISOLATED_FILE);
+ } else {
+ inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
+ isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
+ }
} else {
- inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
- isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
+ if (safe) {
+ inactive = node_page_state_snapshot(pgdat,
+ NR_INACTIVE_ANON);
+ isolated = node_page_state_snapshot(pgdat,
+ NR_ISOLATED_ANON);
+ } else {
+ inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
+ isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
+ }
}
/*
@@ -1622,6 +1679,32 @@
return isolated > inactive;
}
+/*
+ * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
+ * then get resheduled. When there are massive number of tasks doing page
+ * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
+ * the LRU list will go small and be scanned faster than necessary, leading to
+ * unnecessary swapping, thrashing and OOM.
+ */
+static int too_many_isolated(struct pglist_data *pgdat, int file,
+ struct scan_control *sc, int safe)
+{
+ if (current_is_kswapd())
+ return 0;
+
+ if (!sane_reclaim(sc))
+ return 0;
+
+ if (unlikely(__too_many_isolated(pgdat, file, sc, 0))) {
+ if (safe)
+ return __too_many_isolated(pgdat, file, sc, safe);
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
@@ -1733,18 +1816,21 @@
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
+ int safe = 0;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
if (!inactive_reclaimable_pages(lruvec, sc, lru))
return 0;
- while (unlikely(too_many_isolated(pgdat, file, sc))) {
+ while (unlikely(too_many_isolated(pgdat, file, sc, safe))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
+
+ safe = 1;
}
lru_add_drain();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 25a1f39..513c37a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -982,6 +982,7 @@
/* enum vm_event_item counters */
"pgpgin",
"pgpgout",
+ "pgpgoutclean",
"pswpin",
"pswpout",
@@ -1121,6 +1122,7 @@
/* Walk all the zones in a node and print using a callback */
static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
+ bool nolock,
void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
{
struct zone *zone;
@@ -1131,9 +1133,11 @@
if (!populated_zone(zone))
continue;
- spin_lock_irqsave(&zone->lock, flags);
+ if (!nolock)
+ spin_lock_irqsave(&zone->lock, flags);
print(m, pgdat, zone);
- spin_unlock_irqrestore(&zone->lock, flags);
+ if (!nolock)
+ spin_unlock_irqrestore(&zone->lock, flags);
}
}
#endif
@@ -1156,7 +1160,7 @@
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, frag_show_print);
+ walk_zones_in_node(m, pgdat, false, frag_show_print);
return 0;
}
@@ -1197,7 +1201,7 @@
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
+ walk_zones_in_node(m, pgdat, false, pagetypeinfo_showfree_print);
return 0;
}
@@ -1249,7 +1253,8 @@
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
+ walk_zones_in_node(m, pgdat, false,
+ pagetypeinfo_showblockcount_print);
return 0;
}
@@ -1275,7 +1280,8 @@
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
- walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
+ walk_zones_in_node(m, pgdat, true,
+ pagetypeinfo_showmixedcount_print);
#endif /* CONFIG_PAGE_OWNER */
}
@@ -1432,7 +1438,7 @@
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, zoneinfo_show_print);
+ walk_zones_in_node(m, pgdat, false, zoneinfo_show_print);
return 0;
}
@@ -1861,7 +1867,7 @@
if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
- walk_zones_in_node(m, pgdat, unusable_show_print);
+ walk_zones_in_node(m, pgdat, false, unusable_show_print);
return 0;
}
@@ -1913,7 +1919,7 @@
{
pg_data_t *pgdat = (pg_data_t *)arg;
- walk_zones_in_node(m, pgdat, extfrag_show_print);
+ walk_zones_in_node(m, pgdat, false, extfrag_show_print);
return 0;
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index f2531ad..8d213f9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@
return 0;
out_free_newdev:
- free_netdev(new_dev);
+ if (new_dev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(new_dev);
return err;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 43faf2a..658c900 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <crypto/hash.h>
@@ -506,7 +507,7 @@
if (err)
return false;
- return !memcmp(bdaddr->b, hash, 3);
+ return !crypto_memneq(bdaddr->b, hash, 3);
}
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -559,7 +560,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
smp->debug_key = false;
@@ -973,7 +974,7 @@
if (ret)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
BT_ERR("Pairing failed (confirmation values mismatch)");
return SMP_CONFIRM_FAILED;
}
@@ -1473,7 +1474,7 @@
smp->rrnd, r, cfm))
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
smp->passkey_round++;
@@ -1857,7 +1858,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
}
@@ -2122,7 +2123,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
} else {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2603,7 +2604,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
return SMP_CONFIRM_FAILED;
}
@@ -2636,7 +2637,7 @@
else
hcon->pending_sec_level = BT_SECURITY_FIPS;
- if (!memcmp(debug_pk, smp->remote_pk, 64))
+ if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
if (smp->method == DSP_PASSKEY) {
@@ -2735,7 +2736,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(check->e, e, 16))
+ if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
if (!hcon->out) {
@@ -3446,7 +3447,7 @@
if (err)
return err;
- if (memcmp(res, exp, 3))
+ if (crypto_memneq(res, exp, 3))
return -EINVAL;
return 0;
@@ -3476,7 +3477,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3501,7 +3502,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3533,7 +3534,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3567,10 +3568,10 @@
if (err)
return err;
- if (memcmp(mackey, exp_mackey, 16))
+ if (crypto_memneq(mackey, exp_mackey, 16))
return -EINVAL;
- if (memcmp(ltk, exp_ltk, 16))
+ if (crypto_memneq(ltk, exp_ltk, 16))
return -EINVAL;
return 0;
@@ -3603,7 +3604,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3657,7 +3658,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 7dbc80d..6406010e 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,7 +323,8 @@
__mdb_entry_to_br_ip(entry, &complete_info->ip);
mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_mdb_complete;
- switchdev_port_obj_add(port_dev, &mdb.obj);
+ if (switchdev_port_obj_add(port_dev, &mdb.obj))
+ kfree(complete_info);
}
} else if (port_dev && type == RTM_DELMDB) {
switchdev_port_obj_del(port_dev, &mdb.obj);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fc..71b6ab2 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@
{
struct sk_buff *skb;
- if (likely(in_interrupt()))
- skb = alloc_skb(len + pfx, GFP_ATOMIC);
- else
- skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
if (unlikely(skb == NULL))
return NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index dff8012..7e168d0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -141,6 +141,8 @@
#include <linux/netfilter_ingress.h>
#include <linux/sctp.h>
#include <linux/crash_dump.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
#include "net-sysfs.h"
@@ -1250,8 +1252,9 @@
if (!new_ifalias)
return -ENOMEM;
dev->ifalias = new_ifalias;
+ memcpy(dev->ifalias, alias, len);
+ dev->ifalias[len] = 0;
- strlcpy(dev->ifalias, alias, len+1);
return len;
}
@@ -2988,6 +2991,10 @@
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
+ __be16 src_port = tcp_hdr(skb)->source;
+ __be16 dest_port = tcp_hdr(skb)->dest;
+
+ trace_print_skb_gso(skb, src_port, dest_port);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
goto out_kfree_skb;
@@ -4400,6 +4407,7 @@
}
out:
+ __this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1);
return netif_receive_skb_internal(skb);
}
@@ -4640,6 +4648,12 @@
}
EXPORT_SYMBOL(gro_find_complete_by_type);
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+ skb_dst_drop(skb);
+ kmem_cache_free(skbuff_head_cache, skb);
+}
+
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
switch (ret) {
@@ -4653,12 +4667,10 @@
break;
case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
- skb_dst_drop(skb);
- kmem_cache_free(skbuff_head_cache, skb);
- } else {
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
__kfree_skb(skb);
- }
break;
case GRO_HELD:
@@ -4728,10 +4740,16 @@
break;
case GRO_DROP:
- case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
+ napi_reuse_skb(napi, skb);
+ break;
+
case GRO_MERGED:
break;
}
@@ -4836,9 +4854,15 @@
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
- if (cpu_online(remsd->cpu))
+ if (cpu_online(remsd->cpu)) {
smp_call_function_single_async(remsd->cpu,
&remsd->csd);
+ } else {
+ pr_err("%s() cpu offline\n", __func__);
+ rps_lock(remsd);
+ remsd->backlog.state = 0;
+ rps_unlock(remsd);
+ }
remsd = next;
}
} else
@@ -7537,7 +7561,7 @@
{
#if BITS_PER_LONG == 64
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
- memcpy(stats64, netdev_stats, sizeof(*stats64));
+ memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + sizeof(*netdev_stats), 0,
sizeof(*stats64) - sizeof(*netdev_stats));
@@ -7579,9 +7603,9 @@
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
- storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
+ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
+ storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/dst.c b/net/core/dst.c
index 656b70d..39cc119 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -470,6 +470,20 @@
spin_lock_bh(&dst_garbage.lock);
dst = dst_garbage.list;
dst_garbage.list = NULL;
+ /* The code in dst_ifdown places a hold on the loopback device.
+ * If the gc entry processing is set to expire after a lengthy
+ * interval, this hold can cause netdev_wait_allrefs() to hang
+ * out and wait for a long time -- until the the loopback
+ * interface is released. If we're really unlucky, it'll emit
+ * pr_emerg messages to console too. Reset the interval here,
+ * so dst cleanups occur in a more timely fashion.
+ */
+ if (dst_garbage.timer_inc > DST_GC_INC) {
+ dst_garbage.timer_inc = DST_GC_INC;
+ dst_garbage.timer_expires = DST_GC_MIN;
+ mod_delayed_work(system_wq, &dst_gc_work,
+ dst_garbage.timer_expires);
+ }
spin_unlock_bh(&dst_garbage.lock);
if (last)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 047a175..e9989b8 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1394,9 +1394,12 @@
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vzalloc(reglen);
- if (reglen && !regbuf)
- return -ENOMEM;
+ regbuf = NULL;
+ if (reglen) {
+ regbuf = vzalloc(reglen);
+ if (!regbuf)
+ return -ENOMEM;
+ }
ops->get_regs(dev, ®s, regbuf);
@@ -1701,7 +1704,7 @@
static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_channels channels, max;
+ struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
u32 max_rx_in_use = 0;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f45f619..227c249 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -687,7 +687,7 @@
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
- pr_warn("Destroying alive neighbour %p\n", neigh);
+ pr_warn("Destroying alive neighbour %pK\n", neigh);
dump_stack();
return;
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 14d0934..699c4e7 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -158,12 +158,12 @@
rcu_read_unlock();
#endif
- seq_printf(seq,
- "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- sd->processed, sd->dropped, sd->time_squeeze, 0,
- 0, 0, 0, 0, /* was fastroute */
- 0, /* was cpu_collision */
- sd->received_rps, flow_limit_count);
+ seq_printf
+ (seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ sd->processed, sd->dropped, sd->time_squeeze, 0,
+ 0, 0, 0, 0, /* was fastroute */
+ 0, /* was cpu_collision */
+ sd->received_rps, flow_limit_count, sd->gro_coalesced);
return 0;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1d91607..9c6fd7f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -937,6 +937,7 @@
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
+ + nla_total_size(4) /* IFLA_GROUP */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1130,6 +1131,8 @@
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
+ memset(&ivi, 0, sizeof(ivi));
+
/* Not all SR-IOV capable drivers support the
* spoofcheck and "RSS query enable" query. Preset to
* -1 so the user space tool can detect that the driver
@@ -1138,7 +1141,6 @@
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
ivi.trusted = -1;
- memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
@@ -1464,6 +1466,7 @@
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
[IFLA_XDP] = { .type = NLA_NESTED },
+ [IFLA_GROUP] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b1dc096..403593b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
-static inline void dnrt_drop(struct dn_route *rt)
-{
- dst_release(&rt->dst);
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static void dn_dst_check_expire(unsigned long dummy)
{
int i;
@@ -248,7 +242,7 @@
}
*rtp = rt->dst.dn_next;
rt->dst.dn_next = NULL;
- dnrt_drop(rt);
+ dnrt_free(rt);
break;
}
spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@
dst_use(&rth->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
- dnrt_drop(rt);
+ dst_free(&rt->dst);
*rp = rth;
return 0;
}
@@ -380,7 +374,7 @@
for(; rt; rt = next) {
next = rcu_dereference_raw(rt->dst.dn_next);
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
- dst_free((struct dst_entry *)rt);
+ dnrt_free(rt);
}
nothing_to_declare:
@@ -1187,7 +1181,7 @@
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 85f2fdc..29246bc 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ if (skb->len < sizeof(*nlh) ||
+ nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len)
return;
if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 3ff9d97..079d76b 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1103,10 +1103,8 @@
/* Use already configured phy mode */
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
-
- return 0;
+ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
}
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index e057887..a28b1af 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -148,6 +148,7 @@
void *log_ctx;
struct kref ref;
struct completion ref_complete;
+ bool dynamic_ws;
};
#define RT_HASH_SIZE 4
@@ -215,6 +216,13 @@
UP,
};
+static bool is_wakeup_source_allowed;
+
+void msm_ipc_router_set_ws_allowed(bool flag)
+{
+ is_wakeup_source_allowed = flag;
+}
+
static void init_routing_table(void)
{
int i;
@@ -355,6 +363,8 @@
svc_id = rport_ptr->server->name.service;
svc_ins = rport_ptr->server->name.instance;
port_type = CLIENT_PORT;
+ port_ptr->last_served_svc_id =
+ rport_ptr->server->name.service;
} else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
svc_id = port_ptr->port_name.service;
svc_ins = port_ptr->port_name.instance;
@@ -580,6 +590,7 @@
}
cloned_pkt->pkt_fragment_q = pkt_fragment_q;
cloned_pkt->length = pkt->length;
+ cloned_pkt->ws_need = pkt->ws_need;
return cloned_pkt;
fail_clone:
@@ -1162,7 +1173,8 @@
}
mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
- __pm_stay_awake(port_ptr->port_rx_ws);
+ if (pkt->ws_need)
+ __pm_stay_awake(port_ptr->port_rx_ws);
list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
wake_up(&port_ptr->port_rx_wait_q);
notify = port_ptr->notify;
@@ -1320,8 +1332,9 @@
mutex_init(&port_ptr->port_rx_q_lock_lhc3);
init_waitqueue_head(&port_ptr->port_rx_wait_q);
snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
- "ipc%08x_%s",
+ "ipc%08x_%d_%s",
port_ptr->this_port.port_id,
+ task_pid_nr(current),
current->comm);
port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
if (!port_ptr->port_rx_ws) {
@@ -3837,15 +3850,18 @@
int j;
struct msm_ipc_port *port_ptr;
- seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
+ seq_printf(s, "%-11s|%-11s|%-32s|%-11s|\n",
+ "Node_id", "Port_id", "Wakelock", "Last SVCID");
seq_puts(s, "------------------------------------------------------------\n");
down_read(&local_ports_lock_lhc2);
for (j = 0; j < LP_HASH_SIZE; j++) {
list_for_each_entry(port_ptr, &local_ports[j], list) {
mutex_lock(&port_ptr->port_lock_lhc3);
- seq_printf(s, "0x%08x |0x%08x |\n",
+ seq_printf(s, "0x%08x |0x%08x |%-32s|0x%08x |\n",
port_ptr->this_port.node_id,
- port_ptr->this_port.port_id);
+ port_ptr->this_port.port_id,
+ port_ptr->rx_ws_name,
+ port_ptr->last_served_svc_id);
mutex_unlock(&port_ptr->port_lock_lhc3);
}
}
@@ -4043,6 +4059,9 @@
INIT_LIST_HEAD(&xprt_info->list);
kref_init(&xprt_info->ref);
init_completion(&xprt_info->ref_complete);
+ xprt_info->dynamic_ws = 0;
+ if (xprt->get_ws_info)
+ xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
if (!xprt_info->workqueue) {
@@ -4193,9 +4212,18 @@
if (!pkt)
return;
+ pkt->ws_need = false;
mutex_lock(&xprt_info->rx_lock_lhb2);
list_add_tail(&pkt->list, &xprt_info->pkt_list);
- __pm_stay_awake(&xprt_info->ws);
+ if (!xprt_info->dynamic_ws) {
+ __pm_stay_awake(&xprt_info->ws);
+ pkt->ws_need = true;
+ } else {
+ if (is_wakeup_source_allowed) {
+ __pm_stay_awake(&xprt_info->ws);
+ pkt->ws_need = true;
+ }
+ }
mutex_unlock(&xprt_info->rx_lock_lhb2);
queue_work(xprt_info->workqueue, &xprt_info->read_data);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 89a8cac4..51b27ae 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1263,7 +1263,7 @@
/*
* ax25 -> ASCII conversion
*/
-static char *ax2asc2(ax25_address *a, char *buf)
+static void ax2asc2(ax25_address *a, char *buf)
{
char c, *s;
int n;
@@ -1285,10 +1285,10 @@
*s++ = n + '0';
*s++ = '\0';
- if (*buf == '\0' || *buf == '-')
- return "*";
-
- return buf;
+ if (*buf == '\0' || *buf == '-') {
+ buf[0] = '*';
+ buf[1] = '\0';
+ }
}
#endif /* CONFIG_AX25 */
@@ -1322,7 +1322,7 @@
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
- seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
+ seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1bc623d..19930da 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
+ spin_lock_init(&pmc->lock);
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
@@ -2071,21 +2072,26 @@
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf;
+ struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
- for (psf = pmc->tomb; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ spin_lock_bh(&pmc->lock);
+ tomb = pmc->tomb;
pmc->tomb = NULL;
- for (psf = pmc->sources; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ sources = pmc->sources;
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
+ spin_unlock_bh(&pmc->lock);
+
+ for (psf = tomb; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
+ for (psf = sources; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
}
/* Join a multicast group
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index b3cc133..c0cc6aa 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -23,7 +23,8 @@
struct rtable *rt;
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
- __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
+ const struct sock *sk = skb_to_full_sk(skb);
+ __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
struct net_device *dev = skb_dst(skb)->dev;
unsigned int hh_len;
@@ -40,7 +41,7 @@
fl4.daddr = iph->daddr;
fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
- fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+ fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
if (!fl4.flowi4_oif)
fl4.flowi4_oif = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
@@ -61,7 +62,7 @@
xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
struct dst_entry *dst = skb_dst(skb);
skb_dst_set(skb, NULL);
- dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0);
+ dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_dst_set(skb, dst);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 08605a4..51ac77e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,6 +41,10 @@
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -684,6 +688,25 @@
.proc_handler = proc_dointvec_minmax,
.extra1 = &one
},
+ {
+ .procname = "tcp_delack_seg",
+ .data = &sysctl_tcp_delack_seg,
+ .maxlen = sizeof(sysctl_tcp_delack_seg),
+ .mode = 0644,
+ .proc_handler = tcp_proc_delayed_ack_control,
+ .extra1 = &tcp_delack_seg_min,
+ .extra2 = &tcp_delack_seg_max,
+ },
+ {
+ .procname = "tcp_use_userconfig",
+ .data = &sysctl_tcp_use_userconfig,
+ .maxlen = sizeof(sysctl_tcp_use_userconfig),
+ .mode = 0644,
+ .proc_handler = tcp_use_userconfig_sysctl_handler,
+ .extra1 = &tcp_use_userconfig_min,
+ .extra2 = &tcp_use_userconfig_max,
+ },
+
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86fbf0f..7c90130 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -300,6 +300,12 @@
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
/*
* Current number of TCP sockets.
*/
@@ -1438,8 +1444,11 @@
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+ /* Once-per-sysctl_tcp_delack_seg segments
+ * ACK was not sent by tcp_input.c
+ */
+ tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
@@ -2300,6 +2309,8 @@
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
+ dst_release(sk->sk_rx_dst);
+ sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a03f1e8..3d980d6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5104,7 +5104,8 @@
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f6c50af..3d063eb 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@
(fwmark > 0 && skb->mark == fwmark)) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
- spin_lock(&tcp_probe.lock);
+ spin_lock_bh(&tcp_probe.lock);
/* If log fills, just silently drop */
if (tcp_probe_avail() > 1) {
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
tcp_probe.lastcwnd = tp->snd_cwnd;
- spin_unlock(&tcp_probe.lock);
+ spin_unlock_bh(&tcp_probe.lock);
wake_up(&tcp_probe.wait);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b1e65b3..732060d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -24,13 +24,45 @@
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+static void set_tcp_default(void)
+{
+ sysctl_tcp_delack_seg = TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ /* The ret value will be 0 if the input validation is successful
+ * and the values are written to sysctl table. If not, the stack
+ * will continue to work with currently configured values
+ */
+ return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (write && ret == 0) {
+ if (!sysctl_tcp_use_userconfig)
+ set_tcp_default();
+ }
+ return ret;
+}
+
/**
* tcp_write_err() - close socket and save error info
* @sk: The socket the error has appeared on.
*
* Returns: Nothing (void)
*/
-
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 58d7c1d..fe5305a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -319,9 +319,9 @@
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
unsigned long delay)
{
- if (!delayed_work_pending(&ifp->dad_work))
- in6_ifa_hold(ifp);
- mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+ in6_ifa_hold(ifp);
+ if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+ in6_ifa_put(ifp);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -1879,15 +1879,7 @@
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
- if (ifp->flags&IFA_F_PERMANENT) {
- spin_lock_bh(&ifp->lock);
- addrconf_del_dad_work(ifp);
- ifp->flags |= IFA_F_TENTATIVE;
- spin_unlock_bh(&ifp->lock);
- if (dad_failed)
- ipv6_ifa_notify(0, ifp);
- in6_ifa_put(ifp);
- } else if (ifp->flags&IFA_F_TEMPORARY) {
+ if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
ifpub = ifp->ifpub;
@@ -1900,6 +1892,14 @@
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
+ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
+ spin_lock_bh(&ifp->lock);
+ addrconf_del_dad_work(ifp);
+ ifp->flags |= IFA_F_TENTATIVE;
+ spin_unlock_bh(&ifp->lock);
+ if (dad_failed)
+ ipv6_ifa_notify(0, ifp);
+ in6_ifa_put(ifp);
} else {
ipv6_del_addr(ifp);
}
@@ -3356,6 +3356,7 @@
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
struct inet6_dev *idev = __in6_dev_get(dev);
+ struct net *net = dev_net(dev);
int run_pending = 0;
int err;
@@ -3371,7 +3372,7 @@
case NETDEV_CHANGEMTU:
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
if (dev->mtu < IPV6_MIN_MTU) {
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
break;
}
@@ -3427,9 +3428,15 @@
}
if (idev) {
- if (idev->if_flags & IF_READY)
- /* device is already configured. */
+ if (idev->if_flags & IF_READY) {
+ /* device is already configured -
+ * but resend MLD reports, we might
+ * have roamed and need to update
+ * multicast snooping switches
+ */
+ ipv6_mc_up(idev);
break;
+ }
idev->if_flags |= IF_READY;
}
@@ -3481,7 +3488,7 @@
* IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
}
break;
@@ -4044,6 +4051,12 @@
if (bump_id)
rt_genid_bump_ipv6(dev_net(dev));
+
+ /* Make sure that a new temporary address will be created
+ * before this temporary address becomes deprecated.
+ */
+ if (ifp->flags & IFA_F_TEMPORARY)
+ addrconf_verify_rtnl();
}
static void addrconf_dad_run(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9217390..edf15f0 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,18 +167,22 @@
if (np->sndflow)
fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
+ if (ipv6_addr_any(&usin->sin6_addr)) {
/*
* connect to self
*/
- usin->sin6_addr.s6_addr[15] = 0x01;
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
}
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
daddr = &usin->sin6_addr;
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b5..ec849d8 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- rt = arg.result;
+ if (arg.result)
+ return arg.result;
- if (!rt) {
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
- }
-
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
- ip6_rt_put(rt);
- rt = net->ipv6.ip6_null_entry;
- dst_hold(&rt->dst);
- }
-
- return &rt->dst;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@
flp6->saddr = saddr;
}
err = rt->dst.error;
- goto out;
+ if (err != -EAGAIN)
+ goto out;
}
again:
ip6_rt_put(rt);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index e604013..7a5b9812 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -68,6 +68,7 @@
{
u32 *v = (u32 *)loc.v32;
+ __ila_hash_secret_init();
return jhash_2words(v[0], v[1], hashrnd);
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 8c88a37..4345ee3 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@
struct rt6_info *rt;
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
+ if (rt->dst.error == -EAGAIN) {
ip6_rt_put(rt);
rt = net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
@@ -772,10 +771,7 @@
goto next_iter;
}
- if (iter->dst.dev == rt->dst.dev &&
- iter->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&iter->rt6i_gateway,
- &rt->rt6i_gateway)) {
+ if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ffc83d4..c329a15 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -542,11 +542,10 @@
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- dsfield = ipv4_get_dsfield(iph);
-
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
- & IPV6_TCLASS_MASK;
+ dsfield = ipv4_get_dsfield(iph);
+ else
+ dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
@@ -584,6 +583,9 @@
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
+
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -598,9 +600,11 @@
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
- dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+ dsfield = ipv6_get_dsfield(ipv6h);
+ else
+ dsfield = ip6_tclass(t->parms.flowinfo);
+
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d472a5f..3e42221 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -67,9 +67,6 @@
struct in6_addr *nexthop;
int ret;
- skb->protocol = htons(ETH_P_IPV6);
- skb->dev = dev;
-
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
@@ -153,6 +150,9 @@
struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->dev = dev;
+
if (unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
@@ -867,7 +867,6 @@
if (skb->sk && dst_allfrag(skb_dst(skb)))
sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
- skb->dev = skb_dst(skb)->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
@@ -1028,6 +1027,11 @@
}
}
#endif
+ if (ipv6_addr_v4mapped(&fl6->saddr) &&
+ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
+ err = -EAFNOSUPPORT;
+ goto out_err_release;
+ }
return 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 97e89a2..b44e9f5 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1196,7 +1196,7 @@
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
- ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+ ip6_flow_hdr(ipv6h, dsfield,
ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = hop_limit;
ipv6h->nexthdr = proto;
@@ -1231,8 +1231,6 @@
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
- dsfield = ipv4_get_dsfield(iph);
-
if (t->parms.collect_md) {
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
@@ -1246,6 +1244,7 @@
fl6.flowi6_proto = IPPROTO_IPIP;
fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label;
+ dsfield = ip6_tclass(key->label);
} else {
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
@@ -1254,8 +1253,9 @@
fl6.flowi6_proto = IPPROTO_IPIP;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
- & IPV6_TCLASS_MASK;
+ dsfield = ipv4_get_dsfield(iph);
+ else
+ dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
}
@@ -1265,6 +1265,8 @@
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
+ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1298,8 +1300,6 @@
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
- dsfield = ipv6_get_dsfield(ipv6h);
-
if (t->parms.collect_md) {
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
@@ -1313,8 +1313,11 @@
fl6.flowi6_proto = IPPROTO_IPV6;
fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label;
+ dsfield = ip6_tclass(key->label);
} else {
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
@@ -1333,7 +1336,9 @@
fl6.flowi6_proto = IPPROTO_IPV6;
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+ dsfield = ipv6_get_dsfield(ipv6h);
+ else
+ dsfield = ip6_tclass(t->parms.flowinfo);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= ip6_flowlabel(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1345,6 +1350,8 @@
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
+ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae..e88bcb8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@
u64 buff64[SNMP_MIB_MAX];
int i;
- memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+ memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0b21d61..d8123f6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2922,17 +2922,11 @@
struct rt6_info *rt, struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
- struct rt6_info *rtnh;
int err = -EEXIST;
list_for_each_entry(nh, rt6_nh_list, next) {
/* check if rt6_info already exists */
- rtnh = nh->rt6_info;
-
- if (rtnh->dst.dev == rt->dst.dev &&
- rtnh->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&rtnh->rt6i_gateway,
- &rt->rt6i_gateway))
+ if (rt6_duplicate_nexthop(nh->rt6_info, rt))
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f8a6036..9828dc2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -148,8 +148,13 @@
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
- if (ipv6_addr_any(&usin->sin6_addr))
- usin->sin6_addr.s6_addr[15] = 0x1;
+ if (ipv6_addr_any(&usin->sin6_addr)) {
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
+ }
addr_type = ipv6_addr_type(&usin->sin6_addr);
@@ -188,7 +193,7 @@
* TCP over IPv4
*/
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
@@ -1237,9 +1242,6 @@
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
- if (tcp_filter(sk, skb))
- goto discard;
-
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 26d5718..c925fd9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1049,6 +1049,10 @@
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
+ if (ipv6_addr_any(daddr) &&
+ ipv6_addr_v4mapped(&np->saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ daddr);
break;
case AF_INET:
goto do_udp_sendmsg;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f9c9ecb..d8d95b6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -65,6 +65,10 @@
} dump;
};
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u16 *family);
+
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
{
return (struct pfkey_sock *)sk;
@@ -1135,6 +1139,7 @@
goto out;
}
+ err = -ENOBUFS;
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (sa->sadb_sa_auth) {
int keysize = 0;
@@ -1146,8 +1151,10 @@
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
- if (!x->aalg)
+ if (!x->aalg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->aalg->alg_name, a->name);
x->aalg->alg_key_len = 0;
if (key) {
@@ -1166,8 +1173,10 @@
goto out;
}
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
- if (!x->calg)
+ if (!x->calg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->calg->alg_name, a->name);
x->props.calgo = sa->sadb_sa_encrypt;
} else {
@@ -1181,8 +1190,10 @@
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
- if (!x->ealg)
+ if (!x->ealg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->ealg->alg_name, a->name);
x->ealg->alg_key_len = 0;
if (key) {
@@ -1227,8 +1238,10 @@
struct xfrm_encap_tmpl *natt;
x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
- if (!x->encap)
+ if (!x->encap) {
+ err = -ENOMEM;
goto out;
+ }
natt = x->encap;
n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -1913,19 +1926,14 @@
/* addresses present only in tunnel mode */
if (t->mode == XFRM_MODE_TUNNEL) {
- u8 *sa = (u8 *) (rq + 1);
- int family, socklen;
+ int err;
- family = pfkey_sockaddr_extract((struct sockaddr *)sa,
- &t->saddr);
- if (!family)
- return -EINVAL;
-
- socklen = pfkey_sockaddr_len(family);
- if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
- &t->id.daddr) != family)
- return -EINVAL;
- t->encap_family = family;
+ err = parse_sockaddr_pair(
+ (struct sockaddr *)(rq + 1),
+ rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+ &t->saddr, &t->id.daddr, &t->encap_family);
+ if (err)
+ return err;
} else
t->encap_family = xp->family;
@@ -1945,7 +1953,11 @@
if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
return -EINVAL;
- while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+ while (len >= sizeof(*rq)) {
+ if (len < rq->sadb_x_ipsecrequest_len ||
+ rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+ return -EINVAL;
+
if ((err = parse_ipsecrequest(xp, rq)) < 0)
return err;
len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2420,6 @@
return err;
}
-#ifdef CONFIG_NET_KEY_MIGRATE
static int pfkey_sockaddr_pair_size(sa_family_t family)
{
return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2431,7 @@
{
int af, socklen;
- if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+ if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
return -EINVAL;
af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2447,7 @@
return 0;
}
+#ifdef CONFIG_NET_KEY_MIGRATE
static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
struct xfrm_migrate *m)
{
@@ -2443,13 +2455,14 @@
struct sadb_x_ipsecrequest *rq2;
int mode;
- if (len <= sizeof(struct sadb_x_ipsecrequest) ||
- len < rq1->sadb_x_ipsecrequest_len)
+ if (len < sizeof(*rq1) ||
+ len < rq1->sadb_x_ipsecrequest_len ||
+ rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
return -EINVAL;
/* old endoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
- rq1->sadb_x_ipsecrequest_len,
+ rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
&m->old_saddr, &m->old_daddr,
&m->old_family);
if (err)
@@ -2458,13 +2471,14 @@
rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
len -= rq1->sadb_x_ipsecrequest_len;
- if (len <= sizeof(struct sadb_x_ipsecrequest) ||
- len < rq2->sadb_x_ipsecrequest_len)
+ if (len <= sizeof(*rq2) ||
+ len < rq2->sadb_x_ipsecrequest_len ||
+ rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
return -EINVAL;
/* new endpoints */
err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
- rq2->sadb_x_ipsecrequest_len,
+ rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
&m->new_saddr, &m->new_daddr,
&m->new_family);
if (err)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index e702cb95..3bce651 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,6 +278,55 @@
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+
+ if (!tunnel) {
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ session_list = l2tp_session_id_hash_2(pn, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+ }
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, session_list, hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
bool do_ref)
{
@@ -307,7 +356,8 @@
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
@@ -317,7 +367,11 @@
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
@@ -327,7 +381,49 @@
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session_walk, head, hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist;
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+ session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_for_each_entry(session_walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist_glob;
+
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ hlist_add_head(&session->hlist, head);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return 0;
+
+exist_glob:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return -EEXIST;
+}
/* Lookup a tunnel by id
*/
@@ -637,6 +733,9 @@
* a data (not control) frame before coming here. Fields up to the
* session-id have already been parsed and ptr points to the data
* after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -646,14 +745,6 @@
int offset;
u32 ns, nr;
- /* The ref count is increased since we now hold a pointer to
- * the session. Take care to decrement the refcnt when exiting
- * this function from now on...
- */
- l2tp_session_inc_refcount(session);
- if (session->ref)
- (*session->ref)(session);
-
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -806,8 +897,6 @@
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);
- l2tp_session_dec_refcount(session);
-
return;
discard:
@@ -816,8 +905,6 @@
if (session->deref)
(*session->deref)(session);
-
- l2tp_session_dec_refcount(session);
}
EXPORT_SYMBOL(l2tp_recv_common);
@@ -924,8 +1011,14 @@
}
/* Find the session context */
- session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
if (!session || !session->recv_skb) {
+ if (session) {
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ }
+
/* Not found? Pass to userspace to deal with */
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: no session found (%u/%u). Passing up.\n",
@@ -934,6 +1027,7 @@
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
@@ -1740,6 +1834,7 @@
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
+ int err;
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
@@ -1795,6 +1890,13 @@
l2tp_session_set_header_len(session, tunnel->version);
+ err = l2tp_session_add_to_tunnel(tunnel, session);
+ if (err) {
+ kfree(session);
+
+ return ERR_PTR(err);
+ }
+
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
@@ -1804,28 +1906,14 @@
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
-
- /* And to the global session list if L2TPv3 */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- }
-
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
+
+ return session;
}
- return session;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(l2tp_session_create);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index e7233ba..0095012 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -240,12 +240,16 @@
return tunnel;
}
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref);
struct l2tp_session *l2tp_session_find(struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id);
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
bool do_ref);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref);
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 965f7e3..eecc64e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -223,12 +223,6 @@
goto out;
}
- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- rc = -EEXIST;
- goto out;
- }
-
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
@@ -242,8 +236,8 @@
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
- if (!session) {
- rc = -ENOMEM;
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
goto out;
}
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 2066953..3468d56 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -165,6 +165,7 @@
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
@@ -203,6 +204,12 @@
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 26cf4dc..74d0d33 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
@@ -179,6 +179,8 @@
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
+
return 0;
pass_up:
@@ -216,6 +218,12 @@
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 9f66272..1ccd310 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -55,7 +55,8 @@
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
@@ -66,14 +67,15 @@
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
}
return session;
@@ -634,10 +636,12 @@
session_id, peer_session_id, &cfg);
if (ret >= 0) {
- session = l2tp_session_find(net, tunnel, session_id);
- if (session)
+ session = l2tp_session_get(net, tunnel, session_id, false);
+ if (session) {
ret = l2tp_session_notify(&l2tp_nl_family, info, session,
L2TP_CMD_SESSION_CREATE);
+ l2tp_session_dec_refcount(session);
+ }
}
out:
@@ -650,7 +654,7 @@
struct l2tp_session *session;
u16 pw_type;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -664,6 +668,10 @@
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -673,7 +681,7 @@
int ret = 0;
struct l2tp_session *session;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
@@ -708,6 +716,8 @@
ret = l2tp_session_notify(&l2tp_nl_family, info,
session, L2TP_CMD_SESSION_MODIFY);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
@@ -803,29 +813,34 @@
struct sk_buff *msg;
int ret;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
0, session, L2TP_CMD_SESSION_GET);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
-err_out:
+ l2tp_session_dec_refcount(session);
+
+ return ret;
+
+err_ref_msg:
nlmsg_free(msg);
-
-out:
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1387f54..1696f1f 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -583,6 +583,7 @@
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
+ bool drop_refcnt = false;
int ver = 2;
int fd;
@@ -684,36 +685,36 @@
if (tunnel->peer_tunnel_id == 0)
tunnel->peer_tunnel_id = peer_tunnel_id;
- /* Create session if it doesn't already exist. We handle the
- * case where a session was previously created by the netlink
- * interface by checking that the session doesn't already have
- * a socket and its tunnel socket are what we expect. If any
- * of those checks fail, return EEXIST to the caller.
- */
- session = l2tp_session_find(sock_net(sk), tunnel, session_id);
- if (session == NULL) {
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
- */
- cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+ if (session) {
+ drop_refcnt = true;
+ ps = l2tp_session_priv(session);
- /* Allocate and initialize a new session context. */
- session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
- if (session == NULL) {
- error = -ENOMEM;
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
+ */
+ if (ps->sock) {
+ error = -EEXIST;
+ goto end;
+ }
+
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock) {
+ error = -EEXIST;
goto end;
}
} else {
- ps = l2tp_session_priv(session);
- error = -EEXIST;
- if (ps->sock != NULL)
- goto end;
+ /* Default MTU must allow space for UDP/L2TP/PPP headers */
+ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ cfg.mru = cfg.mtu;
- /* consistency checks */
- if (ps->tunnel_sock != tunnel->sock)
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto end;
+ }
}
/* Associate session with its PPPoL2TP socket */
@@ -778,6 +779,8 @@
session->name);
end:
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
release_sock(sk);
return error;
@@ -805,12 +808,6 @@
if (tunnel->sock == NULL)
goto out;
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = l2tp_session_find(net, tunnel, session_id);
- if (session != NULL)
- goto out;
-
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -818,12 +815,13 @@
cfg->mru = cfg->mtu;
/* Allocate and initialize a new session context. */
- error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
- if (session == NULL)
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto out;
+ }
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
@@ -1141,11 +1139,18 @@
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
- l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
+ l2tp_session_get(sock_net(sk), tunnel,
+ stats.session_id, true);
+
+ if (session) {
+ err = pppol2tp_session_ioctl(session, cmd,
+ arg);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ } else {
err = -EBADR;
+ }
break;
}
#ifdef CONFIG_XFRM
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fd6541f..07001b6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -865,6 +865,8 @@
default:
return -EINVAL;
}
+ sdata->u.ap.req_smps = sdata->smps_mode;
+
sdata->needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&local->mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a31d307..62d13ea 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
+ 2 + sizeof(struct ieee80211_vht_cap) +
+ 2 + sizeof(struct ieee80211_vht_operation) +
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
@@ -487,14 +489,14 @@
struct beacon_data *presp, *old_presp;
struct cfg80211_bss *cbss;
const struct cfg80211_bss_ies *ies;
- u16 capability = 0;
+ u16 capability = WLAN_CAPABILITY_IBSS;
u64 tsf;
int ret = 0;
sdata_assert_lock(sdata);
if (ifibss->privacy)
- capability = WLAN_CAPABILITY_PRIVACY;
+ capability |= WLAN_CAPABILITY_PRIVACY;
cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8d7747e..37bec0f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -6,6 +6,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1307,6 +1308,26 @@
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT) {
switch (mgmt->u.action.u.vht_group_notif.action_code) {
+ case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+ struct ieee80211_rx_status *status;
+ enum nl80211_band band;
+ u8 opmode;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ band = status->band;
+ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+
+ if (sta)
+ ieee80211_vht_handle_opmode(sdata, sta,
+ opmode,
+ band);
+
+ mutex_unlock(&local->sta_mtx);
+ break;
+ }
case WLAN_VHT_ACTION_GROUPID_MGMT:
ieee80211_process_mu_groups(sdata, mgmt);
break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1075ac2..2bb6899 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -908,12 +908,17 @@
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (sband->ht_cap.ht_supported)
- local->rx_chains =
- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
- local->rx_chains);
+ if (!sband->ht_cap.ht_supported)
+ continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
+ local->rx_chains =
+ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+ local->rx_chains);
+
+ /* no need to mask, SM_PS_DISABLED has all bits set */
+ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1118c61..b2c706c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4486,20 +4486,20 @@
return -EOPNOTSUPP;
}
- auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
+ auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
if (!auth_data)
return -ENOMEM;
auth_data->bss = req->bss;
- if (req->sae_data_len >= 4) {
- __le16 *pos = (__le16 *) req->sae_data;
+ if (req->auth_data_len >= 4) {
+ __le16 *pos = (__le16 *)req->auth_data;
auth_data->sae_trans = le16_to_cpu(pos[0]);
auth_data->sae_status = le16_to_cpu(pos[1]);
- memcpy(auth_data->data, req->sae_data + 4,
- req->sae_data_len - 4);
- auth_data->data_len += req->sae_data_len - 4;
+ memcpy(auth_data->data, req->auth_data + 4,
+ req->auth_data_len - 4);
+ auth_data->data_len += req->auth_data_len - 4;
}
if (req->ie && req->ie_len) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index acaaf61..439e597 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1585,12 +1585,16 @@
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !ieee80211_is_back_req(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- /* PM bit is only checked in frames where it isn't reserved,
+ /*
+ * PM bit is only checked in frames where it isn't reserved,
* in AP mode it's reserved in non-bufferable management frames
* (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ * BAR frames should be ignored as specified in
+ * IEEE 802.11-2012 10.2.1.2.
*/
(!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -2467,7 +2471,8 @@
if (is_multicast_ether_addr(hdr->addr1)) {
mpp_addr = hdr->addr3;
proxied_addr = mesh_hdr->eaddr1;
- } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+ } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
+ MESH_FLAGS_AE_A5_A6) {
/* has_a4 already checked in ieee80211_rx_mesh_check */
mpp_addr = hdr->addr4;
proxied_addr = mesh_hdr->eaddr2;
@@ -2918,17 +2923,10 @@
switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
- u8 opmode;
-
/* verify opmode is present */
if (len < IEEE80211_MIN_ACTION_SIZE + 2)
goto invalid;
-
- opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
- ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
- opmode, status->band);
- goto handled;
+ goto queue;
}
case WLAN_VHT_ACTION_GROUPID_MGMT: {
if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3949,6 +3947,7 @@
stats->last_rate = sta_stats_encode_rate(status);
stats->fragments++;
+ stats->packets++;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8e05032..b2c823ff 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2148,7 +2148,7 @@
struct ieee80211_sta_rx_stats *cpurxs;
cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
- sinfo->rx_packets += cpurxs->dropped;
+ sinfo->rx_dropped_misc += cpurxs->dropped;
}
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 545c79a..031273a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3308,10 +3308,11 @@
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
int total = 1;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
lockdep_assert_held(&local->chanctx_mtx);
@@ -3322,9 +3323,6 @@
!chandef->chan))
return -EINVAL;
- if (chandef)
- num_different_channels = 1;
-
if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return -EINVAL;
@@ -3335,24 +3333,26 @@
return 0;
}
- memset(num, 0, sizeof(num));
+ if (chandef)
+ params.num_different_channels = 1;
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
list_for_each_entry(ctx, &local->chanctx_list, list) {
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- num_different_channels++;
+ params.num_different_channels++;
continue;
}
if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
cfg80211_chandef_compatible(chandef,
&ctx->conf.def))
continue;
- num_different_channels++;
+ params.num_different_channels++;
}
list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
@@ -3365,16 +3365,14 @@
local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
continue;
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(local->hw.wiphy,
- num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(local->hw.wiphy, ¶ms);
}
static void
@@ -3390,12 +3388,10 @@
int ieee80211_max_num_channels(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int num[NUM_NL80211_IFTYPES] = {};
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
- u8 radar_detect = 0;
u32 max_num_different_channels = 1;
int err;
+ struct iface_combination_params params = {0};
lockdep_assert_held(&local->chanctx_mtx);
@@ -3403,17 +3399,17 @@
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- num_different_channels++;
+ params.num_different_channels++;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
}
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- num[sdata->wdev.iftype]++;
+ params.iftype_num[sdata->wdev.iftype]++;
- err = cfg80211_iter_combinations(local->hw.wiphy,
- num_different_channels, radar_detect,
- num, ieee80211_iter_max_chans,
+ err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms,
+ ieee80211_iter_max_chans,
&max_num_different_channels);
if (err < 0)
return err;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 6832bf6..43e45bb 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -527,8 +527,10 @@
u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
- if (changed > 0)
+ if (changed > 0) {
+ ieee80211_recalc_min_chandef(sdata);
rate_control_rate_update(local, sband, sta, changed);
+ }
}
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 42ce9bd..5c71d60 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include "ieee80211_i.h"
#include "michael.h"
@@ -153,7 +154,7 @@
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+ if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
goto mic_fail;
/* remove Michael MIC from payload */
@@ -1047,7 +1048,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1097,7 +1098,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1201,7 +1202,7 @@
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
- memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
return RX_DROP_UNUSABLE;
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2c1b498..e34d3f6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -849,10 +849,8 @@
{
unsigned int verdict = NF_DROP;
- if (IP_VS_FWD_METHOD(cp) != 0) {
- pr_err("shouldn't reach here, because the box is on the "
- "half connection in the tun/dr module.\n");
- }
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
+
+ignore_cp:
verdict = NF_ACCEPT;
out:
@@ -1385,8 +1385,11 @@
*/
cp = pp->conn_out_get(ipvs, af, skb, &iph);
- if (likely(cp))
+ if (likely(cp)) {
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
return handle_response(af, skb, pd, cp, &iph, hooknum);
+ }
/* Check for real-server-started requests */
if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@
}
}
}
+
+out:
IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
"ip_vs_out: packet continues traversal as normal");
return NF_ACCEPT;
+
+ignore_cp:
+ __ip_vs_conn_put(cp);
+ goto out;
}
/*
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6bd1508..19b89b1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1022,7 +1022,7 @@
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
- INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
+ INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
gc_work->next_gc_run = HZ;
gc_work->exiting = false;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2754045..04111c1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_l4proto.h>
@@ -1800,6 +1802,8 @@
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+ nfct_seqadj_ext_add(ct);
+ nfct_synproxy_ext_add(ct);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index c3fc14e..3a8dc39 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1630,8 +1630,6 @@
ports[ports_c++] = SIP_PORT;
for (i = 0; i < ports_c; i++) {
- memset(&sip[i], 0, sizeof(sip[i]));
-
nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip",
SIP_PORT, ports[i], i, sip_exp_policy,
SIP_EXPECT_MAX,
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90d..ffb9e8a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
/* Internal logging interface, which relies on the real
LOG target modules */
-#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e5194f6f..778fcdb 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3637,10 +3637,18 @@
goto err5;
}
+ if (set->size &&
+ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+ err = -ENFILE;
+ goto err6;
+ }
+
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+err6:
+ set->ops->remove(set, &elem);
err5:
kfree(trans);
err4:
@@ -3687,15 +3695,9 @@
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- if (set->size &&
- !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
- return -ENFILE;
-
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
- if (err < 0) {
- atomic_dec(&set->nelems);
+ if (err < 0)
break;
- }
}
return err;
}
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 1b01404..c7704e9 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -38,7 +38,8 @@
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
- [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
+ [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
+ .len = NF_LOG_PREFIXLEN - 1 },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
index fc0b83f..c6f70da9 100644
--- a/net/netfilter/xt_HARDIDLETIMER.c
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -72,7 +72,7 @@
{
char iface_msg[NLMSG_MAX_SIZE];
char state_msg[NLMSG_MAX_SIZE];
- static const char * const envp[] = { iface_msg, state_msg, NULL };
+ char *envp[] = { iface_msg, state_msg, NULL };
int res;
res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 04a1b97..14e3d85 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -76,6 +76,7 @@
bool send_nl_msg;
bool active;
uid_t uid;
+ bool suspend_time_valid;
};
static LIST_HEAD(idletimer_tg_list);
@@ -245,8 +246,13 @@
switch (pm_event) {
case PM_SUSPEND_PREPARE:
get_monotonic_boottime(&timer->last_suspend_time);
+ timer->suspend_time_valid = true;
break;
case PM_POST_SUSPEND:
+ if (!timer->suspend_time_valid)
+ break;
+ timer->suspend_time_valid = false;
+
spin_lock_bh(×tamp_lock);
if (!timer->active) {
spin_unlock_bh(×tamp_lock);
@@ -281,7 +287,7 @@
{
int ret;
- info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 872db2d..119e51f 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
tcp_hdrlen = tcph->doff * 4;
- if (len < tcp_hdrlen)
+ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@
if (len > tcp_hdrlen)
return 0;
+ /* tcph->doff has 4 bits, do not wrap it to 0 */
+ if (tcp_hdrlen >= 15 * 4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 122bb81..5cf33df 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -982,6 +982,8 @@
kfree(se);
}
+ ida_simple_remove(&nfc_index_ida, dev->idx);
+
kfree(dev);
}
@@ -1056,6 +1058,7 @@
int tx_headroom, int tx_tailroom)
{
struct nfc_dev *dev;
+ int rc;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
!ops->deactivate_target || !ops->im_transceive)
@@ -1068,6 +1071,15 @@
if (!dev)
return NULL;
+ rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+ if (rc < 0)
+ goto err_free_dev;
+ dev->idx = rc;
+
+ dev->dev.class = &nfc_class;
+ dev_set_name(&dev->dev, "nfc%d", dev->idx);
+ device_initialize(&dev->dev);
+
dev->ops = ops;
dev->supported_protocols = supported_protocols;
dev->tx_headroom = tx_headroom;
@@ -1090,6 +1102,11 @@
}
return dev;
+
+err_free_dev:
+ kfree(dev);
+
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL(nfc_allocate_device);
@@ -1104,14 +1121,6 @@
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
- if (dev->idx < 0)
- return dev->idx;
-
- dev->dev.class = &nfc_class;
- dev_set_name(&dev->dev, "nfc%d", dev->idx);
- device_initialize(&dev->dev);
-
mutex_lock(&nfc_devlist_mutex);
nfc_devlist_generation++;
rc = device_add(&dev->dev);
@@ -1149,12 +1158,10 @@
*/
void nfc_unregister_device(struct nfc_dev *dev)
{
- int rc, id;
+ int rc;
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- id = dev->idx;
-
if (dev->rfkill) {
rfkill_unregister(dev->rfkill);
rfkill_destroy(dev->rfkill);
@@ -1179,8 +1186,6 @@
nfc_devlist_generation++;
device_del(&dev->dev);
mutex_unlock(&nfc_devlist_mutex);
-
- ida_simple_remove(&nfc_index_ida, id);
}
EXPORT_SYMBOL(nfc_unregister_device);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b9edf5f..e31dea1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -76,7 +76,8 @@
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -150,7 +151,8 @@
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -661,8 +663,7 @@
pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
- if (!addr || len < sizeof(struct sockaddr_nfc) ||
- addr->sa_family != AF_NFC)
+ if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
return -EINVAL;
if (addr->service_name_len == 0 && addr->dsap == 0)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 61fff42..85a3d9e 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1173,8 +1173,7 @@
return ndev;
free_nfc:
- kfree(ndev->nfc_dev);
-
+ nfc_free_device(ndev->nfc_dev);
free_nci:
kfree(ndev);
return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ea023b3..102c681 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -910,7 +910,9 @@
u32 device_idx, target_idx, protocol;
int rc;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_TARGET_INDEX] ||
+ !info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index e0b23fb..525b624 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -129,7 +129,7 @@
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index 2a30d55..50d9b51 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -1173,6 +1173,7 @@
{
int i, j;
struct net_device *vndev;
+ struct rmnet_phys_ep_config *config;
struct rmnet_logical_ep_conf_s *cfg;
struct rmnet_free_vnd_work *vnd_work;
@@ -1228,6 +1229,16 @@
kfree(vnd_work);
}
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (config) {
+ cfg = &config->local_ep;
+
+ if (cfg && cfg->refcount)
+ rmnet_unset_logical_endpoint_config
+ (cfg->egress_dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ }
+
/* Clear the mappings on the phys ep */
trace_rmnet_unregister_cb_clear_lepcs(dev);
rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 46fdf5a..35be79e 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -552,12 +552,9 @@
LOGD("headroom of %d bytes", required_headroom);
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
- LOGD("Failed to add headroom of %d bytes",
- required_headroom);
- kfree_skb(skb);
- return 1;
- }
+ LOGE("Not enough headroom for %d bytes", required_headroom);
+ kfree_skb(skb);
+ return 1;
}
if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 18c737a..7fc3407 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
/* there must be at least one name, and at least #names+1 length
* words */
@@ -247,16 +247,16 @@
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
if (toklen < 4)
@@ -265,16 +265,16 @@
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
@@ -293,7 +293,7 @@
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
@@ -307,15 +307,17 @@
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
td->data_len = len;
if (len > 0) {
td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@
const __be32 **_xdr, unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one length word */
if (toklen <= 4)
@@ -399,6 +401,9 @@
toklen -= 4;
if (len > AFSTOKEN_K5_TIX_MAX)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
*_tktlen = len;
_debug("ticket len %u", len);
@@ -407,9 +412,8 @@
*_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
*_xdr = xdr;
@@ -552,7 +556,7 @@
{
const __be32 *xdr = prep->data, *token;
const char *cp;
- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
int ret;
@@ -578,22 +582,21 @@
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
- tmp = (len + 3) & ~3;
- if (tmp > datalen)
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
- if (len < tmp)
- for (; loop < tmp; loop++)
- if (cp[loop])
- goto not_xdr;
+ for (; loop < paddedlen; loop++)
+ if (cp[loop])
+ goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
- len, tmp, len, len, (const char *) xdr);
- datalen -= tmp;
- xdr += tmp >> 2;
+ len, paddedlen, len, len, (const char *) xdr);
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
/* get the token count */
if (datalen < 12)
@@ -614,10 +617,11 @@
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
- if (toklen < 20 || toklen > datalen)
+ paddedlen = (toklen + 3) & ~3;
+ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
- datalen -= (toklen + 3) & ~3;
- xdr += (toklen + 3) >> 2;
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
} while (--loop > 0);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 744cfe6c5..daf6624 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1008,6 +1008,9 @@
return sch;
}
+ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+ if (ops->destroy)
+ ops->destroy(sch);
err_out3:
dev_put(dev);
kfree((char *) sch - sch->padded);
@@ -1206,12 +1209,7 @@
qdisc_len = q->q.qlen;
if (q->ops->change(q, &req.attr))
pr_err("%s(): qdisc change failed", __func__);
- } else {
- WARN_ONCE(1, "%s(): called on queue which does %s",
- __func__, "not support change() operation");
}
- } else {
- WARN_ONCE(1, "%s(): called on bad queue", __func__);
}
return qdisc_len;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index e3d0458..2fae8b5 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -627,7 +627,9 @@
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
sizeof(u32));
if (!q->hhf_arrays[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
@@ -638,7 +640,9 @@
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE);
if (!q->hhf_valid_bits[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 2bc8d7f..20b7f16 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@
/* pre-allocate qdiscs, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL)
+ if (!priv->qdiscs)
return -ENOMEM;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@
qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
- if (qdisc == NULL)
- goto err;
+ if (!qdisc)
+ return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mq_destroy(sch);
- return -ENOMEM;
}
static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b5c502c..9226834 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -118,10 +118,8 @@
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!priv->qdiscs)
+ return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
@@ -129,10 +127,9 @@
get_default_qdisc_ops(dev, i),
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
- if (qdisc == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!qdisc)
+ return -ENOMEM;
+
priv->qdiscs[i] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@@ -148,7 +145,7 @@
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
if (err)
- goto err;
+ return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
@@ -162,10 +159,6 @@
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mqprio_destroy(sch);
- return err;
}
static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7f195ed..bc5e995 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -742,9 +742,10 @@
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
if (!q->ht || !q->slots) {
- sfq_destroy(sch);
+ /* Note: sfq_destroy() will be called by our caller */
return -ENOMEM;
}
+
for (i = 0; i < q->divisor; i++)
q->ht[i] = SFQ_EMPTY_SLOT;
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0..4f5a2b5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@
goto out;
}
- segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+ segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
if (IS_ERR(segs))
goto out;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 14346dc..9647e31 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
- struct sctp_transport *transport;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
@@ -4456,13 +4460,13 @@
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
hash++, head++) {
- read_lock(&head->lock);
+ read_lock_bh(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
err = cb(sctp_ep(epb), p);
if (err)
break;
}
- read_unlock(&head->lock);
+ read_unlock_bh(&head->lock);
}
return err;
@@ -4502,9 +4506,8 @@
if (err)
return err;
- sctp_transport_get_idx(net, &hti, pos);
- obj = sctp_transport_get_next(net, &hti);
- for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+ obj = sctp_transport_get_idx(net, &hti, pos + 1);
+ for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
struct sctp_transport *transport = obj;
if (!sctp_transport_hold(transport))
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 90115ce..79aec90 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
+#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
@@ -927,7 +928,7 @@
if (ret)
goto out_err;
- if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
+ if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
ret = GSS_S_BAD_SIG;
goto out_err;
}
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 6b109a8..02462d6 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -169,7 +169,7 @@
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rskb = tipc_buf_acquire(MAX_H_SIZE);
+ rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!rskb)
return;
tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
- req->buf = tipc_buf_acquire(MAX_H_SIZE);
+ req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!req->buf) {
kfree(req);
return -ENOMEM;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index bda89bf..4e8647a 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1395,7 +1395,7 @@
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
- tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
pr_warn("%sunable to send packet\n", link_co_err);
return;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 17201aa..56ea0ad 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,12 +58,12 @@
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
-struct sk_buff *tipc_buf_acquire(u32 size)
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
- skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+ skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@@ -95,7 +95,7 @@
struct tipc_msg *msg;
struct sk_buff *buf;
- buf = tipc_buf_acquire(hdr_sz + data_sz);
+ buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
if (unlikely(!buf))
return NULL;
@@ -261,7 +261,7 @@
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
- skb = tipc_buf_acquire(msz);
+ skb = tipc_buf_acquire(msz, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
skb_orphan(skb);
@@ -282,7 +282,7 @@
msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
- skb = tipc_buf_acquire(pktmax);
+ skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_orphan(skb);
@@ -313,7 +313,7 @@
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
- skb = tipc_buf_acquire(pktsz);
+ skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
if (!skb) {
rc = -ENOMEM;
goto error;
@@ -448,7 +448,7 @@
if (msz > (max / 2))
return false;
- _skb = tipc_buf_acquire(max);
+ _skb = tipc_buf_acquire(max, GFP_ATOMIC);
if (!_skb)
return false;
@@ -496,7 +496,7 @@
/* Never return SHORT header; expand by replacing buffer if necessary */
if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+ *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
if (!*skb)
goto exit;
memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
@@ -508,7 +508,7 @@
}
if (skb_cloned(_skb) &&
- pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+ pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
goto exit;
/* Now reverse the concerned fields */
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 50a7398..6c0455c 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -820,7 +820,7 @@
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
-struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c1cfd92..23f8899 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -69,7 +69,7 @@
u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
+ struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
struct tipc_msg *msg;
if (buf != NULL) {
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 28bf4fe..ab8a2d5 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -110,6 +110,10 @@
char addr_string[16];
tn->own_addr = addr;
+
+ /* Ensure that the new address is visible before we reinit. */
+ smp_mb();
+
tipc_named_reinit(net);
tipc_sk_reinit(net);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2..2775332 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@
write_lock_bh(&n->lock);
}
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+ write_unlock_bh(&n->lock);
+}
+
static void tipc_node_write_unlock(struct tipc_node *n)
{
struct net *net = n->net;
@@ -417,7 +422,7 @@
}
tipc_node_write_lock(n);
list_add_tail(subscr, &n->publ_list);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
@@ -435,7 +440,7 @@
}
tipc_node_write_lock(n);
list_del_init(subscr);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849c..f89c0c2 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -91,7 +91,8 @@
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct sockaddr_tipc *saddr = con->server->saddr;
+ struct tipc_server *s = con->server;
+ struct sockaddr_tipc *saddr = s->saddr;
struct socket *sock = con->sock;
struct sock *sk;
@@ -106,6 +107,11 @@
tipc_sock_release(con);
sock_release(sock);
con->sock = NULL;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
}
tipc_clean_outqueues(con);
@@ -128,8 +134,10 @@
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
- if (con)
+ if (con && test_bit(CF_CONNECTED, &con->flags))
conn_get(con);
+ else
+ con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -198,15 +206,8 @@
static void tipc_close_conn(struct tipc_conn *con)
{
- struct tipc_server *s = con->server;
-
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
- spin_lock_bh(&s->idr_lock);
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- spin_unlock_bh(&s->idr_lock);
-
/* We shouldn't flush pending works as we may be in the
* thread. In fact the races with pending rx/tx work structs
* are harmless for us here as we have already deleted this
@@ -458,6 +459,11 @@
if (!con)
return -EINVAL;
+ if (!test_bit(CF_CONNECTED, &con->flags)) {
+ conn_put(con);
+ return 0;
+ }
+
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@@ -471,12 +477,8 @@
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags)) {
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- } else {
+ if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
- }
return 0;
}
@@ -500,7 +502,7 @@
int ret;
spin_lock_bh(&con->outqueue_lock);
- while (1) {
+ while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 41f0138..25bc5c3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -335,8 +335,6 @@
INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr;
tn = net_generic(sock_net(sk), tipc_net_id);
- tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
- NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */
sock->ops = ops;
@@ -346,6 +344,13 @@
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
+
+ /* Ensure tsk is visible before we read own_addr. */
+ smp_mb();
+
+ tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+ NAMED_H_SIZE, 0);
+
msg_set_origport(msg, tsk->portid);
setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_backlog_rcv = tipc_backlog_rcv;
@@ -2264,24 +2269,27 @@
void tipc_sk_reinit(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- const struct bucket_table *tbl;
- struct rhash_head *pos;
+ struct rhashtable_iter iter;
struct tipc_sock *tsk;
struct tipc_msg *msg;
- int i;
- rcu_read_lock();
- tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
- for (i = 0; i < tbl->size; i++) {
- rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+ rhashtable_walk_enter(&tn->sk_rht, &iter);
+
+ do {
+ tsk = ERR_PTR(rhashtable_walk_start(&iter));
+ if (tsk)
+ continue;
+
+ while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock);
msg = &tsk->phdr;
msg_set_prevnode(msg, tn->own_addr);
msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock);
}
- }
- rcu_read_unlock();
+
+ rhashtable_walk_stop(&iter);
+ } while (tsk == ERR_PTR(-EAGAIN));
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd0224..9d94e65 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@
static void tipc_subscrp_delete(struct tipc_subscription *sub);
static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
/**
* htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@
{
struct tipc_name_seq seq;
+ tipc_subscrp_get(sub);
tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
return;
@@ -132,30 +135,23 @@
tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
node);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
- struct tipc_subscriber *subscriber = sub->subscriber;
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
- spin_lock_bh(&subscriber->lock);
- tipc_subscrp_delete(sub);
- spin_unlock_bh(&subscriber->lock);
-
- tipc_subscrb_put(subscriber);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrb_kref_release(struct kref *kref)
{
- struct tipc_subscriber *subcriber = container_of(kref,
- struct tipc_subscriber, kref);
-
- kfree(subcriber);
+ kfree(container_of(kref,struct tipc_subscriber, kref));
}
static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@
kref_get(&subscriber->kref);
}
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+ struct tipc_subscription *sub = container_of(kref,
+ struct tipc_subscription,
+ kref);
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ atomic_dec(&tn->subscription_count);
+ spin_unlock_bh(&subscriber->lock);
+ kfree(sub);
+ tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+ kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+ kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+ struct tipc_subscr *s)
+{
+ struct list_head *subscription_list = &subscriber->subscrp_list;
+ struct tipc_subscription *sub, *temp;
+
+ spin_lock_bh(&subscriber->lock);
+ list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
+ if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+ continue;
+
+ tipc_subscrp_get(sub);
+ spin_unlock_bh(&subscriber->lock);
+ tipc_subscrp_delete(sub);
+ tipc_subscrp_put(sub);
+ spin_lock_bh(&subscriber->lock);
+
+ if (s)
+ break;
+ }
+ spin_unlock_bh(&subscriber->lock);
+}
+
static struct tipc_subscriber *tipc_subscrb_create(int conid)
{
struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@
pr_warn("Subscriber rejected, no memory\n");
return NULL;
}
- kref_init(&subscriber->kref);
INIT_LIST_HEAD(&subscriber->subscrp_list);
+ kref_init(&subscriber->kref);
subscriber->conid = conid;
spin_lock_init(&subscriber->lock);
@@ -187,55 +236,22 @@
static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Destroy any existing subscriptions for subscriber */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- }
- spin_unlock_bh(&subscriber->lock);
-
+ tipc_subscrb_subscrp_delete(subscriber, NULL);
tipc_subscrb_put(subscriber);
}
static void tipc_subscrp_delete(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Find first matching subscription, exit if not found */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) ||
- del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- break;
- }
- }
- spin_unlock_bh(&subscriber->lock);
+ tipc_subscrb_subscrp_delete(subscriber, s);
}
static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(*s));
atomic_inc(&tn->subscription_count);
+ kref_init(&sub->kref);
return sub;
}
@@ -288,17 +305,16 @@
spin_lock_bh(&subscriber->lock);
list_add(&sub->subscrp_list, &subscriber->subscrp_list);
- tipc_subscrb_get(subscriber);
sub->subscriber = subscriber;
tipc_nametbl_subscribe(sub);
+ tipc_subscrb_get(subscriber);
spin_unlock_bh(&subscriber->lock);
- timeout = htohl(sub->evt.s.timeout, swap);
- if (timeout == TIPC_WAIT_FOREVER)
- return;
-
setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
- mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ timeout = htohl(sub->evt.s.timeout, swap);
+
+ if (timeout != TIPC_WAIT_FOREVER)
+ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
}
/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103..ffdc214 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@
* @evt: template for events generated by subscription
*/
struct tipc_subscription {
+ struct kref kref;
struct tipc_subscriber *subscriber;
struct net *net;
struct timer_list timer;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 459577e..a3df5e1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -998,7 +998,8 @@
struct path path = { NULL, NULL };
err = -EINVAL;
- if (sunaddr->sun_family != AF_UNIX)
+ if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+ sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
@@ -1109,6 +1110,10 @@
unsigned int hash;
int err;
+ err = -EINVAL;
+ if (alen < offsetofend(struct sockaddr, sa_family))
+ goto out;
+
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5f5867f..cf7063a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -220,15 +220,7 @@
enum cfg80211_event_type type;
union {
- struct {
- u8 bssid[ETH_ALEN];
- const u8 *req_ie;
- const u8 *resp_ie;
- size_t req_ie_len;
- size_t resp_ie_len;
- struct cfg80211_bss *bss;
- int status; /* -1 = failed; 0..65535 = status code */
- } cr;
+ struct cfg80211_connect_resp_params cr;
struct {
const u8 *req_ie;
const u8 *resp_ie;
@@ -346,7 +338,7 @@
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
const u8 *key, int key_len, int key_idx,
- const u8 *sae_data, int sae_data_len);
+ const u8 *auth_data, int auth_data_len);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -384,11 +376,9 @@
struct cfg80211_connect_params *connect,
struct cfg80211_cached_keys *connkeys,
const u8 *prev_bssid);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, bool wextev,
- struct cfg80211_bss *bss);
+void __cfg80211_connect_result(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ bool wextev);
void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap);
int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
@@ -477,7 +467,7 @@
u32 *mask);
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int);
+ enum nl80211_iftype iftype, u32 beacon_int);
void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype, int num);
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index a2dff71..413deff 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -538,6 +538,12 @@
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
+country GI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
country GL: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -678,10 +684,6 @@
(5170 - 5330 @ 160), (23)
(5735 - 5835 @ 80), (30)
-country IR:
- (2402 - 2482 @ 40), (20)
- (5735 - 5835 @ 80), (30)
-
country IS: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -764,19 +766,12 @@
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (20), AUTO-BW
(5250 - 5330 @ 80), (20), DFS, AUTO-BW
- (5490 - 5710 @ 160), (30), DFS
+ (5490 - 5730 @ 160), (30), DFS
(5735 - 5835 @ 80), (30)
# 60 GHz band channels 1-4,
# ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
(57240 - 65880 @ 2160), (43)
-country KP: DFS-ETSI
- (2402 - 2482 @ 40), (20)
- (5170 - 5250 @ 80), (20)
- (5250 - 5330 @ 80), (20), DFS
- (5490 - 5630 @ 80), (30), DFS
- (5735 - 5815 @ 80), (30)
-
country KW: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1022,7 +1017,7 @@
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
(5250 - 5330 @ 80), (23), DFS, AUTO-BW
- (5490 - 5710 @ 160), (30), DFS
+ (5490 - 5730 @ 160), (30), DFS
(5735 - 5835 @ 80), (33)
country NG: DFS-ETSI
@@ -1338,9 +1333,6 @@
(5250 - 5330 @ 20), (23), DFS
(5735 - 5835 @ 20), (30)
-country SY:
- (2402 - 2482 @ 40), (20)
-
country TC: DFS-FCC
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (24), AUTO-BW
@@ -1426,7 +1418,7 @@
country US: DFS-FCC
(2402 - 2472 @ 40), (30)
- (5170 - 5250 @ 80), (24), AUTO-BW
+ (5170 - 5250 @ 80), (30), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
@@ -1467,7 +1459,7 @@
(5490 - 5710 @ 160), (30), DFS
country VE: DFS-FCC
- (2402 - 2482 @ 40), (30)
+ (2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
(5250 - 5330 @ 80), (23), DFS, AUTO-BW
(5735 - 5835 @ 80), (30)
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 76775a2..5499e9f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -26,9 +26,16 @@
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
- u8 *ie = mgmt->u.assoc_resp.variable;
- int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
- u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ cr.bssid = mgmt->bssid;
+ cr.bss = bss;
+ cr.resp_ie = mgmt->u.assoc_resp.variable;
+ cr.resp_ie_len =
+ len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+ cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
trace_cfg80211_send_rx_assoc(dev, bss);
@@ -38,7 +45,7 @@
* and got a reject -- we only try again with an assoc
* frame instead of reassoc.
*/
- if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+ if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) {
cfg80211_unhold_bss(bss_from_pub(bss));
cfg80211_put_bss(wiphy, bss);
return;
@@ -46,9 +53,7 @@
nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
/* update current_bss etc., consumes the bss reference */
- __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
- status_code,
- status_code == WLAN_STATUS_SUCCESS, bss);
+ __cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
}
EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
@@ -216,14 +221,14 @@
const u8 *ssid, int ssid_len,
const u8 *ie, int ie_len,
const u8 *key, int key_len, int key_idx,
- const u8 *sae_data, int sae_data_len)
+ const u8 *auth_data, int auth_data_len)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_auth_request req = {
.ie = ie,
.ie_len = ie_len,
- .sae_data = sae_data,
- .sae_data_len = sae_data_len,
+ .auth_data = auth_data,
+ .auth_data_len = auth_data_len,
.auth_type = auth_type,
.key = key,
.key_len = key_len,
@@ -657,8 +662,25 @@
return err;
}
- if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
- return -EINVAL;
+ if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+ /* Allow random TA to be used with Public Action frames if the
+ * driver has indicated support for this. Otherwise, only allow
+ * the local address to be used.
+ */
+ if (!ieee80211_is_action(mgmt->frame_control) ||
+ mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+ return -EINVAL;
+ if (!wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+ return -EINVAL;
+ if (wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
+ return -EINVAL;
+ }
/* Transmit the Action frame as requested by user space */
return rdev_mgmt_tx(rdev, wdev, params, cookie);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 9ed6b0f..ebd9a4b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -305,8 +305,7 @@
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
- .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -357,11 +356,12 @@
[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
- [NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
+ [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -414,7 +414,25 @@
[NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
[NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 },
[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
+ [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
+ .len = FILS_MAX_KEK_LEN },
+ [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
+ [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
[NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+ [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+ [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+ .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+ },
+ [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
+ [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_USERNAME_LEN },
+ [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_REALM_LEN },
+ [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
+ [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
+ .len = FILS_ERP_MAX_RRK_LEN },
+ [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+ [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
};
/* policy for the key attributes */
@@ -481,7 +499,8 @@
/* policy for GTK rekey offload attributes */
static const struct nla_policy
nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
- [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
+ [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY,
+ .len = FILS_MAX_KEK_LEN },
[NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
[NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
};
@@ -512,7 +531,7 @@
static const struct nla_policy
nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
[NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
- [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
+ [NL80211_NAN_FUNC_SERVICE_ID] = {
.len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
[NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
@@ -1060,6 +1079,10 @@
nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
c->radar_detect_regions)))
goto nla_put_failure;
+ if (c->beacon_int_min_gcd &&
+ nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+ c->beacon_int_min_gcd))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combi);
}
@@ -1605,6 +1628,8 @@
if (rdev->wiphy.features &
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
CMD(add_tx_ts, ADD_TX_TS);
+ CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST);
+ CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
}
/* add into the if now */
#undef CMD
@@ -3743,12 +3768,36 @@
if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
auth_type == NL80211_AUTHTYPE_SAE)
return false;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_STA) &&
+ (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+ auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+ auth_type == NL80211_AUTHTYPE_FILS_PK))
+ return false;
return true;
case NL80211_CMD_CONNECT:
+ /* SAE not supported yet */
+ if (auth_type == NL80211_AUTHTYPE_SAE)
+ return false;
+ /* FILS with SK PFS or PK not supported yet */
+ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+ auth_type == NL80211_AUTHTYPE_FILS_PK)
+ return false;
+ if (!wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ auth_type == NL80211_AUTHTYPE_FILS_SK)
+ return false;
+ return true;
case NL80211_CMD_START_AP:
/* SAE not supported yet */
if (auth_type == NL80211_AUTHTYPE_SAE)
return false;
+ /* FILS not supported yet */
+ if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+ auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+ auth_type == NL80211_AUTHTYPE_FILS_PK)
+ return false;
return true;
default:
return false;
@@ -3790,7 +3839,8 @@
params.dtim_period =
nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
- err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype,
+ params.beacon_interval);
if (err)
return err;
@@ -6326,6 +6376,10 @@
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
+ nla_for_each_nested(attr1, freqs, tmp1)
+ if (nla_len(attr1) != sizeof(u32))
+ return 0;
+
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*
@@ -6918,6 +6972,12 @@
if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
return ERR_PTR(-EINVAL);
+ if (!wiphy_ext_feature_isset(
+ wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+ (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+ return ERR_PTR(-EINVAL);
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
@@ -7124,6 +7184,26 @@
request->delay =
nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+ if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+ request->relative_rssi = nla_get_s8(
+ attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+ request->relative_rssi_set = true;
+ }
+
+ if (request->relative_rssi_set &&
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+ struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+ rssi_adjust = nla_data(
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+ request->rssi_adjust.band = rssi_adjust->band;
+ request->rssi_adjust.delta = rssi_adjust->delta;
+ if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
if (err)
goto out_free;
@@ -7719,8 +7799,8 @@
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct ieee80211_channel *chan;
- const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
- int err, ssid_len, ie_len = 0, sae_data_len = 0;
+ const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL;
+ int err, ssid_len, ie_len = 0, auth_data_len = 0;
enum nl80211_auth_type auth_type;
struct key_parse key;
bool local_state_change;
@@ -7800,17 +7880,23 @@
if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
return -EINVAL;
- if (auth_type == NL80211_AUTHTYPE_SAE &&
- !info->attrs[NL80211_ATTR_SAE_DATA])
+ if ((auth_type == NL80211_AUTHTYPE_SAE ||
+ auth_type == NL80211_AUTHTYPE_FILS_SK ||
+ auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+ auth_type == NL80211_AUTHTYPE_FILS_PK) &&
+ !info->attrs[NL80211_ATTR_AUTH_DATA])
return -EINVAL;
- if (info->attrs[NL80211_ATTR_SAE_DATA]) {
- if (auth_type != NL80211_AUTHTYPE_SAE)
+ if (info->attrs[NL80211_ATTR_AUTH_DATA]) {
+ if (auth_type != NL80211_AUTHTYPE_SAE &&
+ auth_type != NL80211_AUTHTYPE_FILS_SK &&
+ auth_type != NL80211_AUTHTYPE_FILS_SK_PFS &&
+ auth_type != NL80211_AUTHTYPE_FILS_PK)
return -EINVAL;
- sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
- sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
+ auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
+ auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
/* need to include at least Auth Transaction and Status Code */
- if (sae_data_len < 4)
+ if (auth_data_len < 4)
return -EINVAL;
}
@@ -7827,7 +7913,7 @@
err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
key.p.key, key.p.key_len, key.idx,
- sae_data, sae_data_len);
+ auth_data, auth_data_len);
wdev_unlock(dev->ieee80211_ptr);
return err;
}
@@ -8006,6 +8092,15 @@
req.flags |= ASSOC_REQ_USE_RRM;
}
+ if (info->attrs[NL80211_ATTR_FILS_KEK]) {
+ req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]);
+ req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]);
+ if (!info->attrs[NL80211_ATTR_FILS_NONCES])
+ return -EINVAL;
+ req.fils_nonces =
+ nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]);
+ }
+
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
@@ -8163,7 +8258,8 @@
ibss.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- err = cfg80211_validate_beacon_int(rdev, ibss.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC,
+ ibss.beacon_interval);
if (err)
return err;
@@ -8728,6 +8824,35 @@
}
}
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ connect.fils_erp_username =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_username_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_realm =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_realm_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_next_seq_num =
+ nla_get_u16(
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+ connect.fils_erp_rrk =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ connect.fils_erp_rrk_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ kzfree(connkeys);
+ return -EINVAL;
+ }
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys,
connect.prev_bssid);
@@ -8737,6 +8862,76 @@
return err;
}
+static int nl80211_update_connect_params(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_connect_params connect = {};
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u32 changed = 0;
+ int ret;
+
+ if (!rdev->ops->update_connect_params)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+ return -EINVAL;
+ connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ changed |= UPDATE_ASSOC_IES;
+ }
+
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ connect.fils_erp_username =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_username_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+ connect.fils_erp_realm =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_realm_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+ connect.fils_erp_next_seq_num =
+ nla_get_u16(
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+ connect.fils_erp_rrk =
+ nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ connect.fils_erp_rrk_len =
+ nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+ changed |= UPDATE_FILS_ERP_INFO;
+ } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+ info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+ u32 auth_type =
+ nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(rdev, auth_type,
+ NL80211_CMD_CONNECT))
+ return -EINVAL;
+ connect.auth_type = auth_type;
+ changed |= UPDATE_AUTH_TYPE;
+ }
+
+ wdev_lock(dev->ieee80211_ptr);
+ if (!wdev->current_bss)
+ ret = -ENOLINK;
+ else
+ ret = rdev_update_connect_params(rdev, dev, &connect, changed);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return ret;
+}
+
static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8803,14 +8998,28 @@
memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
- if (!info->attrs[NL80211_ATTR_MAC])
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_PMKID])
return -EINVAL;
pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
- pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_MAC]) {
+ pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ } else if (info->attrs[NL80211_ATTR_SSID] &&
+ info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
+ (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
+ info->attrs[NL80211_ATTR_PMK])) {
+ pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ pmksa.cache_id =
+ nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
+ } else {
+ return -EINVAL;
+ }
+ if (info->attrs[NL80211_ATTR_PMK]) {
+ pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+ pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+ }
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -9428,7 +9637,9 @@
setup.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- err = cfg80211_validate_beacon_int(rdev, setup.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev,
+ NL80211_IFTYPE_MESH_POINT,
+ setup.beacon_interval);
if (err)
return err;
}
@@ -9596,6 +9807,20 @@
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
return -ENOBUFS;
+ if (req->relative_rssi_set) {
+ struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+ if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+ req->relative_rssi))
+ return -ENOBUFS;
+
+ rssi_adjust.band = req->rssi_adjust.band;
+ rssi_adjust.delta = req->rssi_adjust.delta;
+ if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+ sizeof(rssi_adjust), &rssi_adjust))
+ return -ENOBUFS;
+ }
+
freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
if (!freqs)
return -ENOBUFS;
@@ -10384,15 +10609,27 @@
if (err)
return err;
+ if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] ||
+ (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_STA) &&
+ !tb[NL80211_REKEY_DATA_KCK]))
+ return -EINVAL;
+
if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
return -ERANGE;
- if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+ if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN)
return -ERANGE;
- if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
+ if (tb[NL80211_REKEY_DATA_KCK] &&
+ nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
return -ERANGE;
+ memset(&rekey_data, 0, sizeof(rekey_data));
rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
- rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
+ rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]);
+ if (tb[NL80211_REKEY_DATA_KCK])
+ rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]);
wdev_lock(wdev);
@@ -11731,6 +11968,31 @@
return 0;
}
+static int nl80211_set_multicast_to_unicast(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const struct nlattr *nla;
+ bool enabled;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!rdev->ops->set_multicast_to_unicast)
+ return -EOPNOTSUPP;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ nla = info->attrs[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED];
+ enabled = nla_get_flag(nla);
+
+ return rdev_set_multicast_to_unicast(rdev, dev, enabled);
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -12184,6 +12446,14 @@
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
+ .doit = nl80211_update_connect_params,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_DISCONNECT,
.doit = nl80211_disconnect,
.policy = nl80211_policy,
@@ -12604,6 +12874,14 @@
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+ .doit = nl80211_set_multicast_to_unicast,
+ .policy = nl80211_policy,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
/* notification functions */
@@ -12933,7 +13211,7 @@
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -13077,15 +13355,16 @@
}
void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, gfp_t gfp)
+ struct net_device *netdev,
+ struct cfg80211_connect_resp_params *cr,
+ gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
+ cr->fils_kek_len + cr->pmk_len +
+ (cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
if (!msg)
return;
@@ -13097,15 +13376,31 @@
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+ (cr->bssid &&
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
- status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
- status) ||
- (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
- (req_ie &&
- nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
- (resp_ie &&
- nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+ cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+ cr->status) ||
+ (cr->status < 0 &&
+ (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+ nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
+ cr->timeout_reason))) ||
+ (cr->req_ie &&
+ nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
+ (cr->resp_ie &&
+ nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
+ cr->resp_ie)) ||
+ (cr->update_erp_next_seq_num &&
+ nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+ cr->fils_erp_next_seq_num)) ||
+ (cr->status == WLAN_STATUS_SUCCESS &&
+ ((cr->fils_kek &&
+ nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
+ cr->fils_kek)) ||
+ (cr->pmk &&
+ nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
+ (cr->pmkid &&
+ nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -13127,7 +13422,7 @@
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13164,7 +13459,7 @@
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
if (!msg)
return;
@@ -13240,7 +13535,7 @@
trace_cfg80211_notify_new_peer_candidate(dev, addr);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + ie_len, gfp);
if (!msg)
return;
@@ -13611,7 +13906,7 @@
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return -ENOMEM;
@@ -13655,7 +13950,7 @@
trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -14464,7 +14759,7 @@
if (!ft_event->target_ap)
return;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
if (!msg)
return;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 7e3821d..2a84d18 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -55,10 +55,9 @@
struct net_device *netdev,
const u8 *addr, gfp_t gfp);
void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, gfp_t gfp);
+ struct net_device *netdev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp);
void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 11cf83c..2f42507 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -490,6 +490,18 @@
return ret;
}
+static inline int
+rdev_update_connect_params(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed)
+{
+ int ret;
+ trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed);
+ ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason_code)
{
@@ -562,6 +574,18 @@
return ret;
}
+static inline int
+rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const bool enabled)
+{
+ int ret;
+ trace_rdev_set_multicast_to_unicast(&rdev->wiphy, dev, enabled);
+ ret = rdev->ops->set_multicast_to_unicast(&rdev->wiphy, dev, enabled);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
{
trace_rdev_rfkill_poll(&rdev->wiphy);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8ae2e20..d7e6abc 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,10 +34,11 @@
CFG80211_CONN_SCAN_AGAIN,
CFG80211_CONN_AUTHENTICATE_NEXT,
CFG80211_CONN_AUTHENTICATING,
- CFG80211_CONN_AUTH_FAILED,
+ CFG80211_CONN_AUTH_FAILED_TIMEOUT,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
CFG80211_CONN_ASSOC_FAILED,
+ CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
CFG80211_CONN_DEAUTH,
CFG80211_CONN_ABANDON,
CFG80211_CONN_CONNECTED,
@@ -163,7 +164,8 @@
return err;
}
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+ enum nl80211_timeout_reason *treason)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_connect_params *params;
@@ -194,7 +196,8 @@
NULL, 0,
params->key, params->key_len,
params->key_idx, NULL, 0);
- case CFG80211_CONN_AUTH_FAILED:
+ case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_AUTH;
return -ENOTCONN;
case CFG80211_CONN_ASSOCIATE_NEXT:
if (WARN_ON(!rdev->ops->assoc))
@@ -221,6 +224,9 @@
WLAN_REASON_DEAUTH_LEAVING,
false);
return err;
+ case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_ASSOC;
+ /* fall through */
case CFG80211_CONN_ASSOC_FAILED:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
@@ -246,6 +252,7 @@
container_of(work, struct cfg80211_registered_device, conn_work);
struct wireless_dev *wdev;
u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+ enum nl80211_timeout_reason treason;
rtnl_lock();
@@ -267,10 +274,15 @@
memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
bssid = bssid_buf;
}
- if (cfg80211_conn_do_work(wdev)) {
- __cfg80211_connect_result(
- wdev->netdev, bssid,
- NULL, 0, NULL, 0, -1, false, NULL);
+ treason = NL80211_TIMEOUT_UNSPECIFIED;
+ if (cfg80211_conn_do_work(wdev, &treason)) {
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = -1;
+ cr.bssid = bssid;
+ cr.timeout_reason = treason;
+ __cfg80211_connect_result(wdev->netdev, &cr, false);
}
wdev_unlock(wdev);
}
@@ -373,9 +385,13 @@
wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
schedule_work(&rdev->conn_work);
} else if (status_code != WLAN_STATUS_SUCCESS) {
- __cfg80211_connect_result(wdev->netdev, mgmt->bssid,
- NULL, 0, NULL, 0,
- status_code, false, NULL);
+ struct cfg80211_connect_resp_params cr;
+
+ memset(&cr, 0, sizeof(cr));
+ cr.status = status_code;
+ cr.bssid = mgmt->bssid;
+ cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
+ __cfg80211_connect_result(wdev->netdev, &cr, false);
} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
schedule_work(&rdev->conn_work);
@@ -423,7 +439,7 @@
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+ wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -445,7 +461,7 @@
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -587,7 +603,9 @@
/* we're good if we have a matching bss struct */
if (bss) {
- err = cfg80211_conn_do_work(wdev);
+ enum nl80211_timeout_reason treason;
+
+ err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
/* otherwise we'll need to scan for the AP first */
@@ -681,11 +699,9 @@
*/
/* This method must consume bss one way or another */
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- int status, bool wextev,
- struct cfg80211_bss *bss)
+void __cfg80211_connect_result(struct net_device *dev,
+ struct cfg80211_connect_resp_params *cr,
+ bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const u8 *country_ie;
@@ -697,48 +713,48 @@
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, cr->bss);
return;
}
- nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
- bssid, req_ie, req_ie_len,
- resp_ie, resp_ie_len,
- status, GFP_KERNEL);
+ nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
+ GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
if (wextev) {
- if (req_ie && status == WLAN_STATUS_SUCCESS) {
+ if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = req_ie_len;
- wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+ wrqu.data.length = cr->req_ie_len;
+ wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+ cr->req_ie);
}
- if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+ if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) {
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = resp_ie_len;
- wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+ wrqu.data.length = cr->resp_ie_len;
+ wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+ cr->resp_ie);
}
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if (bssid && status == WLAN_STATUS_SUCCESS) {
- memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
- memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+ if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
+ memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
wdev->wext.prev_bssid_valid = true;
}
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
#endif
- if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+ if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
- wdev->conn_bss_type,
- IEEE80211_PRIVACY_ANY);
- if (bss)
- cfg80211_hold_bss(bss_from_pub(bss));
+ cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
+ wdev->ssid, wdev->ssid_len,
+ wdev->conn_bss_type,
+ IEEE80211_PRIVACY_ANY);
+ if (cr->bss)
+ cfg80211_hold_bss(bss_from_pub(cr->bss));
}
if (wdev->current_bss) {
@@ -747,28 +763,28 @@
wdev->current_bss = NULL;
}
- if (status != WLAN_STATUS_SUCCESS) {
+ if (cr->status != WLAN_STATUS_SUCCESS) {
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
- if (bss) {
- cfg80211_unhold_bss(bss_from_pub(bss));
- cfg80211_put_bss(wdev->wiphy, bss);
+ if (cr->bss) {
+ cfg80211_unhold_bss(bss_from_pub(cr->bss));
+ cfg80211_put_bss(wdev->wiphy, cr->bss);
}
cfg80211_sme_free(wdev);
return;
}
- if (WARN_ON(!bss))
+ if (WARN_ON(!cr->bss))
return;
- wdev->current_bss = bss_from_pub(bss);
+ wdev->current_bss = bss_from_pub(cr->bss);
if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
- country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+ country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
if (!country_ie) {
rcu_read_unlock();
return;
@@ -785,62 +801,95 @@
* - country_ie + 2, the start of the country ie data, and
* - and country_ie[1] which is the IE length
*/
- regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+ regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
country_ie + 2, country_ie[1]);
kfree(country_ie);
}
/* Consumes bss object one way or another */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
- struct cfg80211_bss *bss, const u8 *req_ie,
- size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp)
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
+ u8 *next;
- if (bss) {
+ if (params->bss) {
/* Make sure the bss entry provided by the driver is valid. */
- struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+ struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
if (WARN_ON(list_empty(&ibss->list))) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, params->bss);
return;
}
}
- ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+ ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+ params->req_ie_len + params->resp_ie_len +
+ params->fils_kek_len + params->pmk_len +
+ (params->pmkid ? WLAN_PMKID_LEN : 0), gfp);
if (!ev) {
- cfg80211_put_bss(wdev->wiphy, bss);
+ cfg80211_put_bss(wdev->wiphy, params->bss);
return;
}
ev->type = EVENT_CONNECT_RESULT;
- if (bssid)
- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
- if (req_ie_len) {
- ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
- ev->cr.req_ie_len = req_ie_len;
- memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+ next = ((u8 *)ev) + sizeof(*ev);
+ if (params->bssid) {
+ ev->cr.bssid = next;
+ memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+ next += ETH_ALEN;
}
- if (resp_ie_len) {
- ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
- ev->cr.resp_ie_len = resp_ie_len;
- memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ if (params->req_ie_len) {
+ ev->cr.req_ie = next;
+ ev->cr.req_ie_len = params->req_ie_len;
+ memcpy((void *)ev->cr.req_ie, params->req_ie,
+ params->req_ie_len);
+ next += params->req_ie_len;
}
- if (bss)
- cfg80211_hold_bss(bss_from_pub(bss));
- ev->cr.bss = bss;
- ev->cr.status = status;
+ if (params->resp_ie_len) {
+ ev->cr.resp_ie = next;
+ ev->cr.resp_ie_len = params->resp_ie_len;
+ memcpy((void *)ev->cr.resp_ie, params->resp_ie,
+ params->resp_ie_len);
+ next += params->resp_ie_len;
+ }
+ if (params->fils_kek_len) {
+ ev->cr.fils_kek = next;
+ ev->cr.fils_kek_len = params->fils_kek_len;
+ memcpy((void *)ev->cr.fils_kek, params->fils_kek,
+ params->fils_kek_len);
+ next += params->fils_kek_len;
+ }
+ if (params->pmk_len) {
+ ev->cr.pmk = next;
+ ev->cr.pmk_len = params->pmk_len;
+ memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len);
+ next += params->pmk_len;
+ }
+ if (params->pmkid) {
+ ev->cr.pmkid = next;
+ memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN);
+ next += WLAN_PMKID_LEN;
+ }
+ ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num;
+ if (params->update_erp_next_seq_num)
+ ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num;
+ if (params->bss)
+ cfg80211_hold_bss(bss_from_pub(params->bss));
+ ev->cr.bss = params->bss;
+ ev->cr.status = params->status;
+ ev->cr.timeout_reason = params->timeout_reason;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
}
-EXPORT_SYMBOL(cfg80211_connect_bss);
+EXPORT_SYMBOL(cfg80211_connect_done);
/* Consumes bss object one way or another */
void __cfg80211_roamed(struct wireless_dev *wdev,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a3d0a91b..ea1b47e 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1281,6 +1281,24 @@
__entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
);
+TRACE_EVENT(rdev_update_connect_params,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_connect_params *sme, u32 changed),
+ TP_ARGS(wiphy, netdev, sme, changed),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u32, changed)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->changed = changed;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->changed)
+);
+
TRACE_EVENT(rdev_set_cqm_rssi_config,
TP_PROTO(struct wiphy *wiphy,
struct net_device *netdev, s32 rssi_thold,
@@ -3030,6 +3048,25 @@
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
TP_ARGS(wiphy, wdev)
);
+
+TRACE_EVENT(rdev_set_multicast_to_unicast,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const bool enabled),
+ TP_ARGS(wiphy, netdev, enabled),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(bool, enabled)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ BOOL_TO_STR(__entry->enabled))
+);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 60ee74c..8ac413f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -15,6 +15,7 @@
#include <linux/mpls.h>
#include <net/ndisc.h>
#include <linux/if_arp.h>
+#include <linux/gcd.h>
#include "core.h"
#include "rdev-ops.h"
@@ -456,6 +457,8 @@
if (iftype == NL80211_IFTYPE_MESH_POINT)
skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
+ mesh_flags &= MESH_FLAGS_AE;
+
switch (hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -471,9 +474,9 @@
iftype != NL80211_IFTYPE_STATION))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags == MESH_FLAGS_AE_A4)
return -1;
- if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
+ if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_dest, 2 * ETH_ALEN);
@@ -489,9 +492,9 @@
ether_addr_equal(tmp.h_source, addr)))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
- if (mesh_flags & MESH_FLAGS_AE_A5_A6)
+ if (mesh_flags == MESH_FLAGS_AE_A5_A6)
return -1;
- if (mesh_flags & MESH_FLAGS_AE_A4)
+ if (mesh_flags == MESH_FLAGS_AE_A4)
skb_copy_bits(skb, hdrlen +
offsetof(struct ieee80211s_hdr, eaddr1),
tmp.h_source, ETH_ALEN);
@@ -933,7 +936,6 @@
{
struct cfg80211_event *ev;
unsigned long flags;
- const u8 *bssid = NULL;
spin_lock_irqsave(&wdev->event_lock, flags);
while (!list_empty(&wdev->event_list)) {
@@ -945,15 +947,10 @@
wdev_lock(wdev);
switch (ev->type) {
case EVENT_CONNECT_RESULT:
- if (!is_zero_ether_addr(ev->cr.bssid))
- bssid = ev->cr.bssid;
__cfg80211_connect_result(
- wdev->netdev, bssid,
- ev->cr.req_ie, ev->cr.req_ie_len,
- ev->cr.resp_ie, ev->cr.resp_ie_len,
- ev->cr.status,
- ev->cr.status == WLAN_STATUS_SUCCESS,
- ev->cr.bss);
+ wdev->netdev,
+ &ev->cr,
+ ev->cr.status == WLAN_STATUS_SUCCESS);
break;
case EVENT_ROAMED:
__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -1557,31 +1554,57 @@
}
EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int)
+static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
+ u32 *beacon_int_gcd,
+ bool *beacon_int_different)
{
struct wireless_dev *wdev;
- int res = 0;
+
+ *beacon_int_gcd = 0;
+ *beacon_int_different = false;
+
+ list_for_each_entry(wdev, &wiphy->wdev_list, list) {
+ if (!wdev->beacon_interval)
+ continue;
+
+ if (!*beacon_int_gcd) {
+ *beacon_int_gcd = wdev->beacon_interval;
+ continue;
+ }
+
+ if (wdev->beacon_interval == *beacon_int_gcd)
+ continue;
+
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
+ }
+
+ if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
+ if (*beacon_int_gcd)
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int);
+ }
+}
+
+int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, u32 beacon_int)
+{
+ /*
+ * This is just a basic pre-condition check; if interface combinations
+ * are possible the driver must already be checking those with a call
+ * to cfg80211_check_combinations(), in which case we'll validate more
+ * through the cfg80211_calculate_bi_data() call and code in
+ * cfg80211_iter_combinations().
+ */
if (beacon_int < 10 || beacon_int > 10000)
return -EINVAL;
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
- if (!wdev->beacon_interval)
- continue;
- if (wdev->beacon_interval != beacon_int) {
- res = -EINVAL;
- break;
- }
- }
-
- return res;
+ return 0;
}
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data)
@@ -1591,8 +1614,23 @@
int i, j, iftype;
int num_interfaces = 0;
u32 used_iftypes = 0;
+ u32 beacon_int_gcd;
+ bool beacon_int_different;
- if (radar_detect) {
+ /*
+ * This is a bit strange, since the iteration used to rely only on
+ * the data given by the driver, but here it now relies on context,
+ * in form of the currently operating interfaces.
+ * This is OK for all current users, and saves us from having to
+ * push the GCD calculations into all the drivers.
+ * In the future, this should probably rely more on data that's in
+ * cfg80211 already - the only thing not would appear to be any new
+ * interfaces (while being brought up) and channel/radar data.
+ */
+ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
+ &beacon_int_gcd, &beacon_int_different);
+
+ if (params->radar_detect) {
rcu_read_lock();
regdom = rcu_dereference(cfg80211_regdomain);
if (regdom)
@@ -1601,8 +1639,8 @@
}
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
- num_interfaces += iftype_num[iftype];
- if (iftype_num[iftype] > 0 &&
+ num_interfaces += params->iftype_num[iftype];
+ if (params->iftype_num[iftype] > 0 &&
!(wiphy->software_iftypes & BIT(iftype)))
used_iftypes |= BIT(iftype);
}
@@ -1616,7 +1654,7 @@
if (num_interfaces > c->max_interfaces)
continue;
- if (num_different_channels > c->num_different_channels)
+ if (params->num_different_channels > c->num_different_channels)
continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
@@ -1631,16 +1669,17 @@
all_iftypes |= limits[j].types;
if (!(limits[j].types & BIT(iftype)))
continue;
- if (limits[j].max < iftype_num[iftype])
+ if (limits[j].max < params->iftype_num[iftype])
goto cont;
- limits[j].max -= iftype_num[iftype];
+ limits[j].max -= params->iftype_num[iftype];
}
}
- if (radar_detect != (c->radar_detect_widths & radar_detect))
+ if (params->radar_detect !=
+ (c->radar_detect_widths & params->radar_detect))
goto cont;
- if (radar_detect && c->radar_detect_regions &&
+ if (params->radar_detect && c->radar_detect_regions &&
!(c->radar_detect_regions & BIT(region)))
goto cont;
@@ -1652,6 +1691,14 @@
if ((all_iftypes & used_iftypes) != used_iftypes)
goto cont;
+ if (beacon_int_gcd) {
+ if (c->beacon_int_min_gcd &&
+ beacon_int_gcd < c->beacon_int_min_gcd)
+ goto cont;
+ if (!c->beacon_int_min_gcd && beacon_int_different)
+ goto cont;
+ }
+
/* This combination covered all interface types and
* supported the requested numbers, so we're good.
*/
@@ -1674,14 +1721,11 @@
}
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES])
+ struct iface_combination_params *params)
{
int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels,
- radar_detect, iftype_num,
+ err = cfg80211_iter_combinations(wiphy, params,
cfg80211_iter_sum_ifcombs, &num);
if (err)
return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e0437a7..8da67f7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1808,43 +1808,6 @@
goto out;
}
-#ifdef CONFIG_XFRM_SUB_POLICY
-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
-{
- if (!*target) {
- *target = kmalloc(size, GFP_ATOMIC);
- if (!*target)
- return -ENOMEM;
- }
-
- memcpy(*target, src, size);
- return 0;
-}
-#endif
-
-static int xfrm_dst_update_parent(struct dst_entry *dst,
- const struct xfrm_selector *sel)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->partner),
- sel, sizeof(*sel));
-#else
- return 0;
-#endif
-}
-
-static int xfrm_dst_update_origin(struct dst_entry *dst,
- const struct flowi *fl)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
-#else
- return 0;
-#endif
-}
-
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
@@ -1916,16 +1879,6 @@
xdst = (struct xfrm_dst *)dst;
xdst->num_xfrms = err;
- if (num_pols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- dst_free(dst);
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- return ERR_PTR(err);
- }
-
xdst->num_pols = num_pols;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a0d45ef..71ea359 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3654,7 +3654,7 @@
$fixedline =~ s/\s*=\s*$/ = {/;
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $line;
- $fixedline =~ s/^(.\s*){\s*/$1/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1/;
fix_insert_line($fixlinenr, $fixedline);
}
}
@@ -3995,7 +3995,7 @@
my $fixedline = rtrim($prevrawline) . " {";
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $rawline;
- $fixedline =~ s/^(.\s*){\s*/$1\t/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
@@ -4484,7 +4484,7 @@
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
}
}
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 22286a1..14fb793 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -71,6 +71,20 @@
return -FDT_ERR_BADMAGIC;
}
+ if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
+ return FDT_ERR_BADOFFSET;
+
+ if (fdt_off_dt_strings(fdt) > (UINT_MAX - fdt_size_dt_strings(fdt)))
+ return FDT_ERR_BADOFFSET;
+
+ if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
+ > fdt_totalsize(fdt))
+ return FDT_ERR_BADOFFSET;
+
+ if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
+ > fdt_totalsize(fdt))
+ return FDT_ERR_BADOFFSET;
+
return 0;
}
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 8be02b1..468e169 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -396,7 +396,7 @@
static void _fdt_packblocks(const char *old, char *new,
int mem_rsv_size, int struct_size)
{
- int mem_rsv_off, struct_off, strings_off;
+ uint32_t mem_rsv_off, struct_off, strings_off;
mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
struct_off = mem_rsv_off + mem_rsv_size;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
old mode 100755
new mode 100644
index f742c65..1b9e67b
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -145,6 +145,25 @@
${CC} ${aflags} -c -o ${2} ${afile}
}
+# Generates ${2} .o file with RTIC MP's from the ${1} object file (vmlinux)
+# ${3} the file name where the sizes of the RTIC MP structure are stored
+# just in case, save copy of the RTIC mp to ${4}
+# Note: RTIC_MPGEN has to be set if MPGen is available
+rtic_mp()
+{
+ # assume that RTIC_MP_O generation may fail
+ RTIC_MP_O=
+
+ ${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+ --binpath='' --vmlinux=${1} --config=${KCONFIG_CONFIG} && \
+ cat rtic_mp.c | ${CC} -c -o ${2} -x c - && \
+ cp rtic_mp.c ${4} && \
+ ${NM} --print-size --size-sort ${2} > ${3} && \
+ RTIC_MP_O=${2}
+ # NM - save generated variable sizes for verification
+ # RTIC_MP_O is our retval - great success if set to generated .o file
+}
+
# Create map file with all symbols from ${1}
# See mksymap for additional details
mksysmap()
@@ -169,6 +188,8 @@
rm -f System.map
rm -f vmlinux
rm -f vmlinux.o
+ rm -f .tmp_rtic_mp_sz*
+ rm -f rtic_mp.*
}
on_exit()
@@ -231,6 +252,15 @@
# final build of init/
${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
+# Generate RTIC MP placeholder compile unit of the correct size
+# and add it to the list of link objects
+# this needs to be done before generating kallsyms
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+ rtic_mp vmlinux.o rtic_mp.o .tmp_rtic_mp_sz1 .tmp_rtic_mp1.c
+ KBUILD_VMLINUX_MAIN+=" "
+ KBUILD_VMLINUX_MAIN+=$RTIC_MP_O
+fi
+
kallsymso=""
kallsyms_vmlinux=""
if [ -n "${CONFIG_KALLSYMS}" ]; then
@@ -276,6 +306,18 @@
fi
fi
+# Update RTIC MP object by replacing the place holder
+# with actual MP data of the same size
+# Also double check that object size did not change
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+ rtic_mp "${kallsyms_vmlinux}" rtic_mp.o .tmp_rtic_mp_sz2 \
+ .tmp_rtic_mp2.c
+ if ! cmp -s .tmp_rtic_mp_sz1 .tmp_rtic_mp_sz2; then
+ echo >&2 'ERROR: RTIC MP object files size mismatch'
+ exit 1
+ fi
+fi
+
info LD vmlinux
vmlinux_link "${kallsymso}" vmlinux
diff --git a/security/Kconfig b/security/Kconfig
index 59aea7d..5693989 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -40,6 +40,11 @@
If you are unsure how to answer this question, answer N.
+config SECURITY_WRITABLE_HOOKS
+ depends on SECURITY
+ bool
+ default n
+
config SECURITYFS
bool "Enable the securityfs filesystem"
help
@@ -167,6 +172,13 @@
been removed. This config is intended to be used only while
trying to find such users.
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 41b8cb1..57bc405 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -584,7 +584,7 @@
return error;
}
-static struct security_hook_list apparmor_hooks[] = {
+static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
LSM_HOOK_INIT(capget, apparmor_capget),
diff --git a/security/commoncap.c b/security/commoncap.c
index a8e4aac..3e44d01 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1081,7 +1081,7 @@
#ifdef CONFIG_SECURITY
-struct security_hook_list capability_hooks[] = {
+struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 56c458d..8d9330a 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -437,7 +437,7 @@
static struct key *request_master_key(struct encrypted_key_payload *epayload,
const u8 **master_key, size_t *master_keylen)
{
- struct key *mkey = NULL;
+ struct key *mkey = ERR_PTR(-EINVAL);
if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
KEY_TRUSTED_PREFIX_LEN)) {
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 89a46f1..afd4ab9 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -174,7 +174,7 @@
return 0;
}
-static struct security_hook_list loadpin_hooks[] = {
+static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(sb_free_security, loadpin_sb_free_security),
LSM_HOOK_INIT(kernel_read_file, loadpin_read_file),
};
diff --git a/security/security.c b/security/security.c
index 1ba5274..6a7b359 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1590,7 +1590,7 @@
}
#endif /* CONFIG_AUDIT */
-struct security_hook_heads security_hook_heads = {
+struct security_hook_heads security_hook_heads __lsm_ro_after_init = {
.binder_set_context_mgr =
LIST_HEAD_INIT(security_hook_heads.binder_set_context_mgr),
.binder_transaction =
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index ea7e3ef..8af7a69 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -40,6 +40,7 @@
config SECURITY_SELINUX_DISABLE
bool "NSA SELinux runtime disable"
depends on SECURITY_SELINUX
+ select SECURITY_WRITABLE_HOOKS
default n
help
This option enables writing to a selinuxfs node 'disable', which
@@ -50,6 +51,11 @@
portability across platforms where boot parameters are difficult
to employ.
+ NOTE: selecting this option will disable the '__ro_after_init'
+ kernel hardening feature for security hooks. Please consider
+ using the selinux=0 boot parameter instead of enabling this
+ option.
+
If you are unsure how to answer this question, answer N.
config SECURITY_SELINUX_DEVELOP
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8b918f8..e26ecb0 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -814,6 +814,7 @@
sbsec->flags |= SE_SBPROC | SE_SBGENFS;
if (!strcmp(sb->s_type->name, "debugfs") ||
+ !strcmp(sb->s_type->name, "tracefs") ||
!strcmp(sb->s_type->name, "sysfs") ||
!strcmp(sb->s_type->name, "pstore"))
sbsec->flags |= SE_SBGENFS;
@@ -6078,7 +6079,7 @@
#endif
-static struct security_hook_list selinux_hooks[] = {
+static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
LSM_HOOK_INIT(binder_transfer_binder, selinux_binder_transfer_binder),
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1cb0602..b75c31a 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4611,7 +4611,7 @@
return 0;
}
-static struct security_hook_list smack_hooks[] = {
+static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
LSM_HOOK_INIT(syslog, smack_syslog),
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 75c9987..f1dce33 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -496,7 +496,7 @@
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
*/
-static struct security_hook_list tomoyo_hooks[] = {
+static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(cred_alloc_blank, tomoyo_cred_alloc_blank),
LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
LSM_HOOK_INIT(cred_transfer, tomoyo_cred_transfer),
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 0309f21..70aa64c 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -414,7 +414,7 @@
return rc;
}
-static struct security_hook_list yama_hooks[] = {
+static struct security_hook_list yama_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
LSM_HOOK_INIT(task_prctl, yama_task_prctl),
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index bbba7be..6faddfb 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2509,7 +2509,7 @@
struct snd_pcm_substream *substream;
const struct snd_pcm_chmap_elem *map;
- if (snd_BUG_ON(!info->chmap))
+ if (!info->chmap)
return -EINVAL;
substream = snd_pcm_chmap_substream(info, idx);
if (!substream)
@@ -2541,7 +2541,7 @@
unsigned int __user *dst;
int c, count = 0;
- if (snd_BUG_ON(!info->chmap))
+ if (!info->chmap)
return -EINVAL;
if (size < 8)
return -ENOMEM;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 00060c4..9741757 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -606,7 +606,9 @@
cycle = increment_cycle_count(cycle, 1);
if (handle_out_packet(s, cycle, i) < 0) {
s->packet_index = -1;
- amdtp_stream_pcm_abort(s);
+ if (in_interrupt())
+ amdtp_stream_pcm_abort(s);
+ WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
return;
}
}
@@ -658,7 +660,9 @@
/* Queueing error or detecting invalid payload. */
if (i < packets) {
s->packet_index = -1;
- amdtp_stream_pcm_abort(s);
+ if (in_interrupt())
+ amdtp_stream_pcm_abort(s);
+ WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
return;
}
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index c1bc7fa..f7c054b 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -124,7 +124,7 @@
/* For a PCM substream processing. */
struct snd_pcm_substream *pcm;
struct tasklet_struct period_tasklet;
- unsigned int pcm_buffer_pointer;
+ snd_pcm_uframes_t pcm_buffer_pointer;
unsigned int pcm_period_pointer;
/* To wait for first packet. */
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 373fcad..776dffa 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -294,6 +294,8 @@
#define list_for_each_codec(c, bus) \
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus) \
+ list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
/* snd_hda_codec_read/write optional flags */
#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 5008785..0af1132 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1333,8 +1333,12 @@
/* configure each codec instance */
int azx_codec_configure(struct azx *chip)
{
- struct hda_codec *codec;
- list_for_each_codec(codec, &chip->bus) {
+ struct hda_codec *codec, *next;
+
+ /* use _safe version here since snd_hda_codec_configure() deregisters
+ * the device upon error and deletes itself from the bus list.
+ */
+ list_for_each_codec_safe(codec, next, &chip->bus) {
snd_hda_codec_configure(codec);
}
return 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index e7c8f4f..b0bd290 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3169,6 +3169,7 @@
spec->input_paths[i][nums]);
spec->input_paths[i][nums] =
spec->input_paths[i][n];
+ spec->input_paths[i][n] = 0;
}
}
nums++;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 17224de..01c67be 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -878,68 +878,60 @@
config SND_SOC_WCD934X_DSD
tristate
-config SND_SOC_WCD9320
- tristate
-
-config SND_SOC_WCD9330
- tristate
- depends on WCD9330_CODEC
-
config SND_SOC_WCD9335
tristate
- depends on WCD9335_CODEC
- select SND_SOC_WCD_MBHC
+ depends on WCD9XXX_CODEC_CORE
+ select SND_SOC_WCD9XXX
select SND_SOC_WCD_MBHC_LEGACY
+ select SND_SOC_WCD_CPE
config SND_SOC_WCD934X
tristate
- depends on WCD934X_CODEC
- select SND_SOC_WCD9XXX_V2
+ depends on WCD9XXX_CODEC_CORE
+ select SND_SOC_WCD9XXX
select AUDIO_EXT_CLK
select SND_SOC_WCD_DSP_MGR
select SND_SOC_WCD_SPI
select SND_SOC_WCD934X_MBHC
select SND_SOC_WCD934X_DSD
+ select WCD_DSP_GLINK
config SND_SOC_WCD934X_MBHC
tristate
depends on SND_SOC_WCD934X
- select SND_SOC_WCD_MBHC
select SND_SOC_WCD_MBHC_ADC
+config REGMAP_SWR
+ tristate
+ default y
+
config SND_SOC_WSA881X
tristate
+ depends on REGMAP_SWR
select MSM_CDC_PINCTRL
- select REGMAP_SWR
-
-config SND_SOC_WSA881X_ANALOG
- tristate
- select REGMAP_I2C
config SND_SOC_WCD9XXX
tristate
- default y if SND_SOC_WCD9320=y || SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
-
-config SND_SOC_WCD9XXX_V2
- tristate
- default y if SND_SOC_WCD9335=y
+ default y if SND_SOC_WCD9335=y || SND_SOC_WCD934X=y
config SND_SOC_WCD_CPE
tristate
- default y if SND_SOC_WCD9330=y || SND_SOC_WCD9335=y
+ default y if SND_SOC_WCD9335=y
config AUDIO_EXT_CLK
tristate
- default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_SDM660_CDC=y
+ default y if SND_SOC_WCD9335=y || SND_SOC_SDM660_CDC=y
config SND_SOC_WCD_MBHC
tristate
config SND_SOC_WCD_MBHC_LEGACY
tristate
+ select SND_SOC_WCD_MBHC
config SND_SOC_WCD_MBHC_ADC
tristate
+ select SND_SOC_WCD_MBHC
config SND_SOC_WCD_DSP_MGR
tristate
@@ -1167,7 +1159,7 @@
config SND_SOC_MSM_HDMI_CODEC_RX
bool "HDMI Audio Playback"
- depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
+ depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_660)
help
HDMI audio drivers should be built only if the platform
supports hdmi panel.
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8c84460..daf05d8 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -158,30 +158,20 @@
snd-soc-twl6040-objs := twl6040.o
snd-soc-uda134x-objs := uda134x.o
snd-soc-uda1380-objs := uda1380.o
-snd-soc-wcd9320-objs := wcd9320.o wcd9320-tables.o
-snd-soc-wcd9330-objs := wcd9330.o wcd9330-tables.o
snd-soc-wcd9335-objs := wcd9335.o
snd-soc-wcd934x-objs := wcd934x.o
-snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
-snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o wcdcal-hwdep.o
-ifeq ($(CONFIG_COMMON_CLK_MSM), y)
- audio-ext-clock-objs := audio-ext-clk.o
-endif
-
-ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
- audio-ext-clock-up-objs := audio-ext-clk-up.o
-endif
+snd-soc-wcd9xxx-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o \
+ wcdcal-hwdep.o wcd-dsp-mgr.o wcd-dsp-utils.o \
+ wcd9xxx-soc-init.o audio-ext-clk-up.o
snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
-ifeq ($(CONFIG_SND_SOC_WCD_MBHC_LEGACY), y)
- snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o
-else ifeq ($(CONFIG_SND_SOC_WCD_MBHC_ADC), y)
- snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o
+snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
+ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_LEGACY),y m))
+ snd-soc-wcd-mbhc-objs += wcd-mbhc-legacy.o
endif
-snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o
-snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o
-snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o
-snd-soc-wcd-dsp-mgr-objs := wcd-dsp-mgr.o
+ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_ADC),y m))
+ snd-soc-wcd-mbhc-objs += wcd-mbhc-adc.o
+endif
snd-soc-wcd-spi-objs := wcd-spi.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
@@ -407,23 +397,13 @@
obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
obj-$(CONFIG_SND_SOC_WCD9320) += snd-soc-wcd9320.o
-obj-$(CONFIG_SND_SOC_WCD9330) += snd-soc-wcd9330.o
obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WCD934X) += wcd934x/
-ifeq ($(CONFIG_COMMON_CLK_MSM), y)
- obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock.o
-endif
-ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
- obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock-up.o
-endif
-obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o
-obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o
+obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o
obj-$(CONFIG_SND_SOC_WCD_CPE) += snd-soc-wcd-cpe.o
obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o
obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o
-obj-$(CONFIG_SND_SOC_WSA881X_ANALOG) += snd-soc-wsa881x-analog.o
obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o
-obj-$(CONFIG_SND_SOC_WCD_DSP_MGR) += snd-soc-wcd-dsp-mgr.o snd-soc-wcd-dsp-utils.o
obj-$(CONFIG_SND_SOC_WCD_SPI) += snd-soc-wcd-spi.o
obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o
obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c
index f12f4ca..31c063d 100644
--- a/sound/soc/codecs/audio-ext-clk-up.c
+++ b/sound/soc/codecs/audio-ext-clk-up.c
@@ -23,6 +23,7 @@
#include <linux/of_gpio.h>
#include <dt-bindings/clock/qcom,audio-ext-clk.h>
#include <sound/q6afe-v2.h>
+#include "audio-ext-clk-up.h"
enum audio_clk_mux {
AP_CLK2,
@@ -176,6 +177,15 @@
struct pinctrl_info *pnctrl_info = &audio_lpass_mclk->pnctrl_info;
int ret;
+ lpass_mclk.enable = 1;
+ ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX,
+ &lpass_mclk);
+ if (ret < 0) {
+ pr_err("%s afe_set_digital_codec_core_clock failed\n",
+ __func__);
+ return ret;
+ }
+
if (pnctrl_info->pinctrl) {
ret = pinctrl_select_state(pnctrl_info->pinctrl,
pnctrl_info->active);
@@ -186,15 +196,6 @@
}
}
- lpass_mclk.enable = 1;
- ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX,
- &lpass_mclk);
- if (ret < 0) {
- pr_err("%s afe_set_digital_codec_core_clock failed\n",
- __func__);
- return ret;
- }
-
if (pnctrl_info->base)
iowrite32(1, pnctrl_info->base);
return 0;
@@ -611,17 +612,15 @@
.remove = audio_ref_clk_remove,
};
-static int __init audio_ref_clk_platform_init(void)
+int audio_ref_clk_platform_init(void)
{
return platform_driver_register(&audio_ref_clk_driver);
}
-module_init(audio_ref_clk_platform_init);
-static void __exit audio_ref_clk_platform_exit(void)
+void audio_ref_clk_platform_exit(void)
{
platform_driver_unregister(&audio_ref_clk_driver);
}
-module_exit(audio_ref_clk_platform_exit);
MODULE_DESCRIPTION("Audio Ref Up Clock module platform driver");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/audio-ext-clk-up.h b/sound/soc/codecs/audio-ext-clk-up.h
new file mode 100644
index 0000000..8a0232e
--- /dev/null
+++ b/sound/soc/codecs/audio-ext-clk-up.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __AUDIO_EXT_CLK_UP_H_
+#define __AUDIO_EXT_CLK_UP_H_
+
+int audio_ref_clk_platform_init(void);
+void audio_ref_clk_platform_exit(void);
+
+#endif
diff --git a/sound/soc/codecs/audio-ext-clk.c b/sound/soc/codecs/audio-ext-clk.c
deleted file mode 100644
index ef795df..0000000
--- a/sound/soc/codecs/audio-ext-clk.c
+++ /dev/null
@@ -1,349 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <dt-bindings/clock/audio-ext-clk.h>
-#include <sound/q6afe-v2.h>
-
-struct pinctrl_info {
- struct pinctrl *pinctrl;
- struct pinctrl_state *sleep;
- struct pinctrl_state *active;
-};
-
-struct audio_ext_ap_clk {
- bool enabled;
- int gpio;
- struct clk c;
-};
-
-struct audio_ext_pmi_clk {
- int gpio;
- struct clk c;
-};
-
-struct audio_ext_ap_clk2 {
- bool enabled;
- struct pinctrl_info pnctrl_info;
- struct clk c;
-};
-
-static struct afe_clk_set clk2_config = {
- Q6AFE_LPASS_CLK_CONFIG_API_VERSION,
- Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR,
- Q6AFE_LPASS_IBIT_CLK_11_P2896_MHZ,
- Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
- Q6AFE_LPASS_CLK_ROOT_DEFAULT,
- 0,
-};
-
-static inline struct audio_ext_ap_clk *to_audio_ap_clk(struct clk *clk)
-{
- return container_of(clk, struct audio_ext_ap_clk, c);
-}
-
-static int audio_ext_clk_prepare(struct clk *clk)
-{
- struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
- pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
- if (gpio_is_valid(audio_clk->gpio))
- return gpio_direction_output(audio_clk->gpio, 1);
- return 0;
-}
-
-static void audio_ext_clk_unprepare(struct clk *clk)
-{
- struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
- pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
- if (gpio_is_valid(audio_clk->gpio))
- gpio_direction_output(audio_clk->gpio, 0);
-}
-
-static inline struct audio_ext_ap_clk2 *to_audio_ap_clk2(struct clk *clk)
-{
- return container_of(clk, struct audio_ext_ap_clk2, c);
-}
-
-static int audio_ext_clk2_prepare(struct clk *clk)
-{
- struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
- struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
- int ret;
-
-
- if (!pnctrl_info->pinctrl || !pnctrl_info->active)
- return 0;
-
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->active);
- if (ret) {
- pr_err("%s: active state select failed with %d\n",
- __func__, ret);
- return -EIO;
- }
-
- clk2_config.enable = 1;
- ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
- if (ret < 0) {
- pr_err("%s: failed to set clock, ret = %d\n", __func__, ret);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void audio_ext_clk2_unprepare(struct clk *clk)
-{
- struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
- struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
- int ret;
-
- if (!pnctrl_info->pinctrl || !pnctrl_info->sleep)
- return;
-
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->sleep);
- if (ret)
- pr_err("%s: sleep state select failed with %d\n",
- __func__, ret);
-
- clk2_config.enable = 0;
- ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
- if (ret < 0)
- pr_err("%s: failed to reset clock, ret = %d\n", __func__, ret);
-}
-
-static const struct clk_ops audio_ext_ap_clk_ops = {
- .prepare = audio_ext_clk_prepare,
- .unprepare = audio_ext_clk_unprepare,
-};
-
-static const struct clk_ops audio_ext_ap_clk2_ops = {
- .prepare = audio_ext_clk2_prepare,
- .unprepare = audio_ext_clk2_unprepare,
-};
-
-static struct audio_ext_pmi_clk audio_pmi_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_pmi_clk",
- .ops = &clk_ops_dummy,
- CLK_INIT(audio_pmi_clk.c),
- },
-};
-
-static struct audio_ext_pmi_clk audio_pmi_lnbb_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_pmi_lnbb_clk",
- .ops = &clk_ops_dummy,
- CLK_INIT(audio_pmi_lnbb_clk.c),
- },
-};
-
-static struct audio_ext_ap_clk audio_ap_clk = {
- .gpio = -EINVAL,
- .c = {
- .dbg_name = "audio_ext_ap_clk",
- .ops = &audio_ext_ap_clk_ops,
- CLK_INIT(audio_ap_clk.c),
- },
-};
-
-static struct audio_ext_ap_clk2 audio_ap_clk2 = {
- .c = {
- .dbg_name = "audio_ext_ap_clk2",
- .ops = &audio_ext_ap_clk2_ops,
- CLK_INIT(audio_ap_clk2.c),
- },
-};
-
-static struct clk_lookup audio_ref_clock[] = {
- CLK_LIST(audio_ap_clk),
- CLK_LIST(audio_pmi_clk),
- CLK_LIST(audio_pmi_lnbb_clk),
- CLK_LIST(audio_ap_clk2),
-};
-
-static int audio_get_pinctrl(struct platform_device *pdev)
-{
- struct pinctrl_info *pnctrl_info;
- struct pinctrl *pinctrl;
- int ret;
-
- pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
- if (pnctrl_info->pinctrl) {
- dev_dbg(&pdev->dev, "%s: already requested before\n",
- __func__);
- return -EINVAL;
- }
-
- pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR_OR_NULL(pinctrl)) {
- dev_dbg(&pdev->dev, "%s: Unable to get pinctrl handle\n",
- __func__);
- return -EINVAL;
- }
- pnctrl_info->pinctrl = pinctrl;
- /* get all state handles from Device Tree */
- pnctrl_info->sleep = pinctrl_lookup_state(pinctrl, "sleep");
- if (IS_ERR(pnctrl_info->sleep)) {
- dev_err(&pdev->dev, "%s: could not get sleep pinstate\n",
- __func__);
- goto err;
- }
- pnctrl_info->active = pinctrl_lookup_state(pinctrl, "active");
- if (IS_ERR(pnctrl_info->active)) {
- dev_err(&pdev->dev, "%s: could not get active pinstate\n",
- __func__);
- goto err;
- }
- /* Reset the TLMM pins to a default state */
- ret = pinctrl_select_state(pnctrl_info->pinctrl,
- pnctrl_info->sleep);
- if (ret) {
- dev_err(&pdev->dev, "%s: Disable TLMM pins failed with %d\n",
- __func__, ret);
- goto err;
- }
- return 0;
-
-err:
- devm_pinctrl_put(pnctrl_info->pinctrl);
- return -EINVAL;
-}
-
-static int audio_ref_clk_probe(struct platform_device *pdev)
-{
- int clk_gpio;
- int ret;
- struct clk *audio_clk;
-
- clk_gpio = of_get_named_gpio(pdev->dev.of_node,
- "qcom,audio-ref-clk-gpio", 0);
- if (clk_gpio > 0) {
- ret = gpio_request(clk_gpio, "EXT_CLK");
- if (ret) {
- dev_err(&pdev->dev,
- "Request ext clk gpio failed %d, err:%d\n",
- clk_gpio, ret);
- goto err;
- }
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,node_has_rpm_clock")) {
- audio_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(audio_clk)) {
- dev_err(&pdev->dev, "Failed to get RPM div clk\n");
- ret = PTR_ERR(audio_clk);
- goto err_gpio;
- }
- audio_pmi_clk.c.parent = audio_clk;
- audio_pmi_clk.gpio = clk_gpio;
- } else
- audio_ap_clk.gpio = clk_gpio;
-
- } else {
- if (of_property_read_bool(pdev->dev.of_node,
- "qcom,node_has_rpm_clock")) {
- audio_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(audio_clk)) {
- dev_err(&pdev->dev, "Failed to get lnbbclk2\n");
- ret = PTR_ERR(audio_clk);
- goto err;
- }
- audio_pmi_lnbb_clk.c.parent = audio_clk;
- audio_pmi_lnbb_clk.gpio = -EINVAL;
- }
- }
-
- ret = audio_get_pinctrl(pdev);
- if (ret)
- dev_dbg(&pdev->dev, "%s: Parsing pinctrl failed\n",
- __func__);
-
- ret = of_msm_clock_register(pdev->dev.of_node, audio_ref_clock,
- ARRAY_SIZE(audio_ref_clock));
- if (ret) {
- dev_err(&pdev->dev, "%s: audio ref clock register failed\n",
- __func__);
- goto err_gpio;
- }
-
- return 0;
-
-err_gpio:
- gpio_free(clk_gpio);
-
-err:
- return ret;
-}
-
-static int audio_ref_clk_remove(struct platform_device *pdev)
-{
- struct pinctrl_info *pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
- if (audio_pmi_clk.gpio > 0)
- gpio_free(audio_pmi_clk.gpio);
- else if (audio_ap_clk.gpio > 0)
- gpio_free(audio_ap_clk.gpio);
-
- if (pnctrl_info->pinctrl) {
- devm_pinctrl_put(pnctrl_info->pinctrl);
- pnctrl_info->pinctrl = NULL;
- }
-
- return 0;
-}
-
-static const struct of_device_id audio_ref_clk_match[] = {
- {.compatible = "qcom,audio-ref-clk"},
- {}
-};
-MODULE_DEVICE_TABLE(of, audio_ref_clk_match);
-
-static struct platform_driver audio_ref_clk_driver = {
- .driver = {
- .name = "audio-ref-clk",
- .owner = THIS_MODULE,
- .of_match_table = audio_ref_clk_match,
- },
- .probe = audio_ref_clk_probe,
- .remove = audio_ref_clk_remove,
-};
-
-static int __init audio_ref_clk_platform_init(void)
-{
- return platform_driver_register(&audio_ref_clk_driver);
-}
-module_init(audio_ref_clk_platform_init);
-
-static void __exit audio_ref_clk_platform_exit(void)
-{
- platform_driver_unregister(&audio_ref_clk_driver);
-}
-module_exit(audio_ref_clk_platform_exit);
-
-MODULE_DESCRIPTION("Audio Ref Clock module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 62fdb94..a253aea 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -937,9 +937,8 @@
static int msm_sdw_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
@@ -951,9 +950,8 @@
static int msm_sdw_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct msm_sdw_priv *msm_sdw_p = snd_soc_codec_get_drvdata(codec);
struct soc_multi_mixer_control *mixer =
@@ -1039,7 +1037,6 @@
__func__, reg);
sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
-
/*
* Add sleep as SWR slave access read takes time.
* Allow for RD_DONE to complete for previous register if any.
@@ -1054,6 +1051,8 @@
dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__);
goto err;
}
+ /* Add sleep for SWR register read value to get updated. */
+ usleep_range(100, 105);
/* Check for RD value */
ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base,
(u8 *)&val, 4);
@@ -1079,12 +1078,12 @@
sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
- /*
- * Add sleep as SWR slave write takes time.
- * Allow for any previous pending write to complete.
- */
- usleep_range(50, 55);
for (i = 0; i < len; i += 2) {
+ /*
+ * Add sleep as SWR slave write takes time.
+ * Allow for any previous pending write to complete.
+ */
+ usleep_range(100, 105);
/* First Write the Data to register */
ret = regmap_bulk_write(msm_sdw->regmap,
sdw_wr_data_base, bulk_reg[i].buf, 4);
@@ -1377,15 +1376,19 @@
struct snd_info_entry *version_entry;
struct msm_sdw_priv *msm_sdw;
struct snd_soc_card *card;
+ char name[80];
if (!codec_root || !codec)
return -EINVAL;
msm_sdw = snd_soc_codec_get_drvdata(codec);
card = codec->component.card;
- msm_sdw->entry = snd_register_module_info(codec_root->module,
- "152c1000.msm-sdw-codec",
- codec_root);
+
+ snprintf(name, sizeof(name), "%x.%s", (u32)msm_sdw->sdw_base_addr,
+ "msm-sdw-codec");
+ msm_sdw->entry = snd_info_create_subdir(codec_root->module,
+ (const char *)name,
+ codec_root);
if (!msm_sdw->entry) {
dev_err(codec->dev, "%s: failed to create msm_sdw entry\n",
__func__);
@@ -1761,13 +1764,15 @@
static struct snd_soc_codec_driver soc_codec_dev_msm_sdw = {
.probe = msm_sdw_codec_probe,
.remove = msm_sdw_codec_remove,
- .controls = msm_sdw_snd_controls,
- .num_controls = ARRAY_SIZE(msm_sdw_snd_controls),
- .dapm_widgets = msm_sdw_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
.get_regmap = msm_sdw_get_regmap,
+ .component_driver = {
+ .controls = msm_sdw_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_sdw_snd_controls),
+ .dapm_widgets = msm_sdw_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ },
};
static void msm_sdw_add_child_devices(struct work_struct *work)
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7..7899a2c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
}
},
+ {
+ .ident = "Thinkpad Helix 2nd",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+ }
+ },
{ }
};
diff --git a/sound/soc/codecs/sdm660_cdc/Kconfig b/sound/soc/codecs/sdm660_cdc/Kconfig
index 2f36c39..e618258 100644
--- a/sound/soc/codecs/sdm660_cdc/Kconfig
+++ b/sound/soc/codecs/sdm660_cdc/Kconfig
@@ -1,5 +1,4 @@
config SND_SOC_SDM660_CDC
tristate "MSM Internal PMIC based codec"
- select SND_SOC_WCD_MBHC
select SND_SOC_WCD_MBHC_LEGACY
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index a8fcd34..f126d35 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -2055,6 +2055,9 @@
"ZERO", "RX2", "RX1"
};
+static const struct snd_kcontrol_new adc1_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
static const struct soc_enum rdac2_mux_enum =
SOC_ENUM_SINGLE(MSM89XX_PMIC_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
0, 3, rdac2_mux_text);
@@ -3105,7 +3108,8 @@
{"ADC2 MUX", "INP2", "ADC2_INP2"},
{"ADC2 MUX", "INP3", "ADC2_INP3"},
- {"ADC1", NULL, "AMIC1"},
+ {"ADC1", NULL, "ADC1_INP1"},
+ {"ADC1_INP1", "Switch", "AMIC1"},
{"ADC2_INP2", NULL, "AMIC2"},
{"ADC2_INP3", NULL, "AMIC3"},
@@ -3446,6 +3450,8 @@
SND_SOC_DAPM_SPK("Ext Spk", msm_anlg_cdc_codec_enable_spk_ext_pa),
+ SND_SOC_DAPM_SWITCH("ADC1_INP1", SND_SOC_NOPM, 0, 0,
+ &adc1_switch),
SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM89XX_PMIC_DIGITAL_CDC_DIG_CLK_CTL,
@@ -4052,7 +4058,7 @@
sdm660_cdc_priv = snd_soc_codec_get_drvdata(codec);
card = codec->component.card;
- sdm660_cdc_priv->entry = snd_register_module_info(codec_root->module,
+ sdm660_cdc_priv->entry = snd_info_create_subdir(codec_root->module,
"spmi0-03",
codec_root);
if (!sdm660_cdc_priv->entry) {
@@ -4320,13 +4326,15 @@
.suspend = msm_anlg_cdc_suspend,
.resume = msm_anlg_cdc_resume,
.reg_word_size = 1,
- .controls = msm_anlg_cdc_snd_controls,
- .num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls),
- .dapm_widgets = msm_anlg_cdc_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
.get_regmap = msm_anlg_get_regmap,
+ .component_driver = {
+ .controls = msm_anlg_cdc_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls),
+ .dapm_widgets = msm_anlg_cdc_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+ },
};
static int msm_anlg_cdc_init_supplies(struct sdm660_cdc_priv *sdm660_cdc,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 3f9c0b4..5e0a104 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -122,9 +122,7 @@
static int msm_dig_cdc_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
+ struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int dec_mux, decimator;
@@ -1157,7 +1155,7 @@
msm_dig = snd_soc_codec_get_drvdata(codec);
card = codec->component.card;
- msm_dig->entry = snd_register_module_info(codec_root->module,
+ msm_dig->entry = snd_info_create_subdir(codec_root->module,
"msm_digital_codec",
codec_root);
if (!msm_dig->entry) {
@@ -2037,13 +2035,15 @@
.remove = msm_dig_cdc_soc_remove,
.suspend = msm_dig_cdc_suspend,
.resume = msm_dig_cdc_resume,
- .controls = msm_dig_snd_controls,
- .num_controls = ARRAY_SIZE(msm_dig_snd_controls),
- .dapm_widgets = msm_dig_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets),
- .dapm_routes = audio_dig_map,
- .num_dapm_routes = ARRAY_SIZE(audio_dig_map),
.get_regmap = msm_digital_get_regmap,
+ .component_driver = {
+ .controls = msm_dig_snd_controls,
+ .num_controls = ARRAY_SIZE(msm_dig_snd_controls),
+ .dapm_widgets = msm_dig_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets),
+ .dapm_routes = audio_dig_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_dig_map),
+ },
};
const struct regmap_config msm_digital_regmap_config = {
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 93c2fd1..6cc9f8c 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -26,7 +26,8 @@
static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
/* Component related macros */
-#define WDSP_GET_COMPONENT(wdsp, x) (&(wdsp->cmpnts[x]))
+#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \
+ NULL : (&(wdsp->cmpnts[x])))
#define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x)
/*
@@ -416,22 +417,23 @@
/* Go through the list of segments and download one by one */
list_for_each_entry(seg, wdsp->seg_list, list) {
ret = wdsp_load_each_segment(wdsp, seg);
- if (ret < 0) {
- wdsp_broadcast_event_downseq(wdsp,
- WDSP_EVENT_DLOAD_FAILED,
- NULL);
+ if (ret)
goto dload_error;
- }
}
+ /* Flush the list before setting status and notifying components */
+ wdsp_flush_segment_list(wdsp->seg_list);
+
WDSP_SET_STATUS(wdsp, status);
/* Notify all components that image is downloaded */
wdsp_broadcast_event_downseq(wdsp, post, NULL);
+done:
+ return ret;
dload_error:
wdsp_flush_segment_list(wdsp->seg_list);
-done:
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_DLOAD_FAILED, NULL);
return ret;
}
@@ -485,10 +487,14 @@
/* Make sure wdsp is in good state */
if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_CODE_DLOADED)) {
WDSP_ERR(wdsp, "WDSP in invalid state 0x%x", wdsp->status);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
+ /*
+ * Acquire SSR mutex lock to make sure enablement of DSP
+ * does not race with SSR handling.
+ */
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
/* Download the read-write sections of image */
ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_WRITE);
if (ret < 0) {
@@ -509,6 +515,7 @@
wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_BOOTUP, NULL);
WDSP_SET_STATUS(wdsp, WDSP_STATUS_BOOTED);
done:
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
return ret;
}
@@ -610,6 +617,25 @@
return cmpnt->cdev;
}
+static int wdsp_get_devops_for_cmpnt(struct device *wdsp_dev,
+ enum wdsp_cmpnt_type type,
+ void *data)
+{
+ struct wdsp_mgr_priv *wdsp;
+ int ret = 0;
+
+ if (!wdsp_dev || type >= WDSP_CMPNT_TYPE_MAX)
+ return -EINVAL;
+
+ wdsp = dev_get_drvdata(wdsp_dev);
+ ret = wdsp_unicast_event(wdsp, type,
+ WDSP_EVENT_GET_DEVOPS, data);
+ if (ret)
+ WDSP_ERR(wdsp, "get_dev_ops failed for cmpnt type %d",
+ type);
+ return ret;
+}
+
static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
{
struct wdsp_img_section img_section;
@@ -941,6 +967,7 @@
static struct wdsp_mgr_ops wdsp_ops = {
.register_cmpnt_ops = wdsp_register_cmpnt_ops,
.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
+ .get_devops_for_cmpnt = wdsp_get_devops_for_cmpnt,
.signal_handler = wdsp_signal_handler,
.vote_for_dsp = wdsp_vote_for_dsp,
.suspend = wdsp_suspend,
@@ -1217,7 +1244,16 @@
.probe = wdsp_mgr_probe,
.remove = wdsp_mgr_remove,
};
-module_platform_driver(wdsp_mgr_driver);
+
+int wcd_dsp_mgr_init(void)
+{
+ return platform_driver_register(&wdsp_mgr_driver);
+}
+
+void wcd_dsp_mgr_exit(void)
+{
+ platform_driver_unregister(&wdsp_mgr_driver);
+}
MODULE_DESCRIPTION("WCD DSP manager driver");
MODULE_DEVICE_TABLE(of, wdsp_mgr_dt_match);
diff --git a/sound/soc/codecs/wcd-mbhc-adc.h b/sound/soc/codecs/wcd-mbhc-adc.h
index 112d508..3116108 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.h
+++ b/sound/soc/codecs/wcd-mbhc-adc.h
@@ -24,7 +24,7 @@
MUX_CTL_NONE,
};
-#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC_ADC)
void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc);
#else
static inline void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc)
diff --git a/sound/soc/codecs/wcd-mbhc-v2-api.h b/sound/soc/codecs/wcd-mbhc-v2-api.h
index fab2b49..7b6e945 100644
--- a/sound/soc/codecs/wcd-mbhc-v2-api.h
+++ b/sound/soc/codecs/wcd-mbhc-v2-api.h
@@ -14,7 +14,7 @@
#include "wcd-mbhc-v2.h"
-#ifdef CONFIG_SND_SOC_WCD_MBHC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC)
int wcd_mbhc_start(struct wcd_mbhc *mbhc,
struct wcd_mbhc_config *mbhc_cfg);
void wcd_mbhc_stop(struct wcd_mbhc *mbhc);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index cb96f2b..eb67de9 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -329,6 +329,7 @@
/* Disable micbias, pullup & enable cs */
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
mutex_unlock(&mbhc->hphl_pa_lock);
+ clear_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
break;
case WCD_EVENT_PRE_HPHR_PA_OFF:
mutex_lock(&mbhc->hphr_pa_lock);
@@ -346,6 +347,7 @@
/* Disable micbias, pullup & enable cs */
wcd_enable_curr_micbias(mbhc, WCD_MBHC_EN_CS);
mutex_unlock(&mbhc->hphr_pa_lock);
+ clear_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
break;
case WCD_EVENT_PRE_HPHL_PA_ON:
set_bit(WCD_MBHC_EVENT_PA_HPHL, &mbhc->event_state);
@@ -439,6 +441,25 @@
__func__);
usleep_range(wg_time * 1000, wg_time * 1000 + 50);
}
+
+ if (test_and_clear_bit(WCD_MBHC_ANC0_OFF_ACK,
+ &mbhc->hph_anc_state)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s: HPHL ANC clear flag and enable ANC_EN\n",
+ __func__);
+ if (mbhc->mbhc_cb->update_anc_state)
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 0);
+ }
+
+ if (test_and_clear_bit(WCD_MBHC_ANC1_OFF_ACK,
+ &mbhc->hph_anc_state)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s: HPHR ANC clear flag and enable ANC_EN\n",
+ __func__);
+ if (mbhc->mbhc_cb->update_anc_state)
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, true, 1);
+ }
+
}
static bool wcd_mbhc_is_hph_pa_on(struct wcd_mbhc *mbhc)
@@ -471,6 +492,20 @@
}
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_HPH_PA_EN, 0);
usleep_range(wg_time * 1000, wg_time * 1000 + 50);
+
+
+ if (mbhc->mbhc_cb->is_anc_on && mbhc->mbhc_cb->is_anc_on(mbhc)) {
+ usleep_range(20000, 20100);
+ pr_debug("%s ANC is on, setting ANC_OFF_ACK\n", __func__);
+ set_bit(WCD_MBHC_ANC0_OFF_ACK, &mbhc->hph_anc_state);
+ set_bit(WCD_MBHC_ANC1_OFF_ACK, &mbhc->hph_anc_state);
+ if (mbhc->mbhc_cb->update_anc_state) {
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 0);
+ mbhc->mbhc_cb->update_anc_state(mbhc->codec, false, 1);
+ } else {
+ pr_debug("%s ANC is off\n", __func__);
+ }
+ }
}
int wcd_mbhc_get_impedance(struct wcd_mbhc *mbhc, uint32_t *zl,
@@ -581,7 +616,8 @@
jack_type == SND_JACK_LINEOUT) &&
(mbhc->hph_status && mbhc->hph_status != jack_type)) {
- if (mbhc->micbias_enable) {
+ if (mbhc->micbias_enable &&
+ mbhc->hph_status == SND_JACK_HEADSET) {
if (mbhc->mbhc_cb->mbhc_micbias_control)
mbhc->mbhc_cb->mbhc_micbias_control(
codec, MIC_BIAS_2,
@@ -1108,7 +1144,7 @@
* For ADC MBHC, ADC_COMPLETE interrupt will be generated
* in this case. So skip the check here.
*/
- if (!WCD_MBHC_DETECTION &&
+ if (mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY &&
mbhc->current_plug == MBHC_PLUG_TYPE_HEADPHONE) {
wcd_mbhc_find_plug_and_report(mbhc, MBHC_PLUG_TYPE_HEADSET);
goto exit;
@@ -1922,7 +1958,7 @@
init_waitqueue_head(&mbhc->wait_btn_press);
mutex_init(&mbhc->codec_resource_lock);
- switch (WCD_MBHC_DETECTION) {
+ switch (mbhc->mbhc_detection_logic) {
case WCD_DETECTION_LEGACY:
wcd_mbhc_legacy_init(mbhc);
break;
@@ -1931,7 +1967,7 @@
break;
default:
pr_err("%s: Unknown detection logic type %d\n",
- __func__, WCD_MBHC_DETECTION);
+ __func__, mbhc->mbhc_detection_logic);
break;
}
@@ -2058,9 +2094,12 @@
mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc);
if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
- if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+ if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) {
+ WCD_MBHC_RSC_LOCK(mbhc);
mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
&mbhc->correct_plug_swch);
+ WCD_MBHC_RSC_UNLOCK(mbhc);
+ }
mutex_destroy(&mbhc->codec_resource_lock);
mutex_destroy(&mbhc->hphl_pa_lock);
mutex_destroy(&mbhc->hphr_pa_lock);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index 7ed06c3..c8714fc 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -157,12 +157,6 @@
WCD_DETECTION_ADC,
};
-#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC
-#define WCD_MBHC_DETECTION WCD_DETECTION_ADC
-#else
-#define WCD_MBHC_DETECTION WCD_DETECTION_LEGACY
-#endif
-
enum wcd_mbhc_cs_mb_en_flag {
WCD_MBHC_EN_CS = 0,
WCD_MBHC_EN_MB,
@@ -240,6 +234,11 @@
WCD_MBHC_HPHR_PA_OFF_ACK,
};
+enum anc_ack_flags {
+ WCD_MBHC_ANC0_OFF_ACK = 0,
+ WCD_MBHC_ANC1_OFF_ACK,
+};
+
enum wcd_mbhc_btn_det_mem {
WCD_MBHC_BTN_DET_V_BTN_LOW,
WCD_MBHC_BTN_DET_V_BTN_HIGH
@@ -488,6 +487,9 @@
void (*hph_pull_down_ctrl)(struct snd_soc_codec *, bool);
void (*mbhc_moisture_config)(struct wcd_mbhc *);
bool (*hph_register_recovery)(struct wcd_mbhc *);
+ void (*update_anc_state)(struct snd_soc_codec *codec,
+ bool enable, int anc_num);
+ bool (*is_anc_on)(struct wcd_mbhc *mbhc);
};
struct wcd_mbhc_fn {
@@ -538,6 +540,7 @@
/* track PA/DAC state to sync with userspace */
unsigned long hph_pa_dac_state;
+ unsigned long hph_anc_state;
unsigned long event_state;
unsigned long jiffies_atreport;
@@ -565,6 +568,9 @@
struct mutex hphl_pa_lock;
struct mutex hphr_pa_lock;
+ /* Holds mbhc detection method - ADC/Legacy */
+ unsigned int mbhc_detection_logic;
+
unsigned long intr_status;
bool is_hph_ocp_pending;
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index a08b598..957d642 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -837,7 +837,7 @@
* about the write are encapsulated in @msg. Write size should be multiple
* of 4 bytes and write address should be 4-byte aligned.
*/
-int wcd_spi_data_write(struct spi_device *spi,
+static int wcd_spi_data_write(struct spi_device *spi,
struct wcd_spi_msg *msg)
{
if (!spi || !msg) {
@@ -850,7 +850,6 @@
__func__, msg->remote_addr, msg->len);
return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
}
-EXPORT_SYMBOL(wcd_spi_data_write);
/*
* wcd_spi_data_read: Read data from WCD SPI
@@ -861,7 +860,7 @@
* about the read are encapsulated in @msg. Read size should be multiple
* of 4 bytes and read address should be 4-byte aligned.
*/
-int wcd_spi_data_read(struct spi_device *spi,
+static int wcd_spi_data_read(struct spi_device *spi,
struct wcd_spi_msg *msg)
{
if (!spi || !msg) {
@@ -874,7 +873,6 @@
__func__, msg->remote_addr, msg->len);
return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
}
-EXPORT_SYMBOL(wcd_spi_data_read);
static int wdsp_spi_dload_section(struct spi_device *spi,
void *data)
@@ -925,6 +923,7 @@
{
struct spi_device *spi = to_spi_device(dev);
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+ struct wcd_spi_ops *spi_ops;
int ret = 0;
dev_dbg(&spi->dev, "%s: event type %d\n",
@@ -979,6 +978,20 @@
ret = wcd_spi_wait_for_resume(wcd_spi);
break;
+ case WDSP_EVENT_GET_DEVOPS:
+ if (!data) {
+ dev_err(&spi->dev, "%s: invalid data\n",
+ __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ spi_ops = (struct wcd_spi_ops *) data;
+ spi_ops->spi_dev = spi;
+ spi_ops->read_dev = wcd_spi_data_read;
+ spi_ops->write_dev = wcd_spi_data_write;
+ break;
+
default:
dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
__func__, event);
diff --git a/sound/soc/codecs/wcd9330-tables.c b/sound/soc/codecs/wcd9330-tables.c
deleted file mode 100644
index 1866fb3..0000000
--- a/sound/soc/codecs/wcd9330-tables.c
+++ /dev/null
@@ -1,1675 +0,0 @@
-/* Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include "wcd9330.h"
-
-const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1] = {
- [TOMTOM_A_CHIP_CTL] = 1,
- [TOMTOM_A_CHIP_STATUS] = 1,
- [TOMTOM_A_CHIP_ID_BYTE_0] = 1,
- [TOMTOM_A_CHIP_ID_BYTE_1] = 1,
- [TOMTOM_A_CHIP_ID_BYTE_2] = 1,
- [TOMTOM_A_CHIP_ID_BYTE_3] = 1,
- [TOMTOM_A_CHIP_I2C_SLAVE_ID] = 1,
- [TOMTOM_A_SLAVE_ID_1] = 1,
- [TOMTOM_A_SLAVE_ID_2] = 1,
- [TOMTOM_A_SLAVE_ID_3] = 1,
- [TOMTOM_A_PIN_CTL_OE0] = 1,
- [TOMTOM_A_PIN_CTL_OE1] = 1,
- [TOMTOM_A_PIN_CTL_OE2] = 1,
- [TOMTOM_A_PIN_CTL_DATA0] = 1,
- [TOMTOM_A_PIN_CTL_DATA1] = 1,
- [TOMTOM_A_PIN_CTL_DATA2] = 1,
- [TOMTOM_A_HDRIVE_GENERIC] = 1,
- [TOMTOM_A_HDRIVE_OVERRIDE] = 1,
- [TOMTOM_A_ANA_CSR_WAIT_STATE] = 1,
- [TOMTOM_A_PROCESS_MONITOR_CTL0] = 1,
- [TOMTOM_A_PROCESS_MONITOR_CTL1] = 1,
- [TOMTOM_A_PROCESS_MONITOR_CTL2] = 1,
- [TOMTOM_A_PROCESS_MONITOR_CTL3] = 1,
- [TOMTOM_A_QFUSE_CTL] = 1,
- [TOMTOM_A_QFUSE_STATUS] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT0] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT1] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT2] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT3] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT4] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT5] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT6] = 1,
- [TOMTOM_A_QFUSE_DATA_OUT7] = 1,
- [TOMTOM_A_CDC_CTL] = 1,
- [TOMTOM_A_LEAKAGE_CTL] = 1,
- [TOMTOM_A_SVASS_MEM_PTR0] = 1,
- [TOMTOM_A_SVASS_MEM_PTR1] = 1,
- [TOMTOM_A_SVASS_MEM_PTR2] = 1,
- [TOMTOM_A_SVASS_MEM_CTL] = 1,
- [TOMTOM_A_SVASS_MEM_BANK] = 1,
- [TOMTOM_A_DMIC_B1_CTL] = 1,
- [TOMTOM_A_DMIC_B2_CTL] = 1,
- [TOMTOM_A_SVASS_CLKRST_CTL] = 1,
- [TOMTOM_A_SVASS_CPAR_CFG] = 1,
- [TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] = 1,
- [TOMTOM_A_SVASS_CPAR_WDOG_CFG] = 1,
- [TOMTOM_A_SVASS_CFG] = 1,
- [TOMTOM_A_SVASS_SPE_CFG] = 1,
- [TOMTOM_A_SVASS_STATUS] = 1,
- [TOMTOM_A_SVASS_INT_MASK] = 1,
- [TOMTOM_A_SVASS_INT_STATUS] = 1,
- [TOMTOM_A_SVASS_INT_CLR] = 0,
- [TOMTOM_A_SVASS_DEBUG] = 1,
- [TOMTOM_A_SVASS_SPE_BKUP_INT] = 0,
- [TOMTOM_A_SVASS_MEM_ACC] = 1,
- [TOMTOM_A_MEM_LEAKAGE_CTL] = 1,
- [TOMTOM_A_SVASS_SPE_INBOX_TRG] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_0] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_1] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_2] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_3] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_4] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_5] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_6] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_7] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_8] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_9] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_10] = 0,
- [TOMTOM_A_SVASS_SPE_INBOX_11] = 0,
- [TOMTOM_A_SVASS_SPE_OUTBOX_0] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_1] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_2] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_3] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_4] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_5] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_6] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_7] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_8] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_9] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_10] = 1,
- [TOMTOM_A_SVASS_SPE_OUTBOX_11] = 1,
- [TOMTOM_A_INTR_MODE] = 1,
- [TOMTOM_A_INTR1_MASK0] = 1,
- [TOMTOM_A_INTR1_MASK1] = 1,
- [TOMTOM_A_INTR1_MASK2] = 1,
- [TOMTOM_A_INTR1_MASK3] = 1,
- [TOMTOM_A_INTR1_STATUS0] = 1,
- [TOMTOM_A_INTR1_STATUS1] = 1,
- [TOMTOM_A_INTR1_STATUS2] = 1,
- [TOMTOM_A_INTR1_STATUS3] = 1,
- [TOMTOM_A_INTR1_CLEAR0] = 0,
- [TOMTOM_A_INTR1_CLEAR1] = 0,
- [TOMTOM_A_INTR1_CLEAR2] = 0,
- [TOMTOM_A_INTR1_CLEAR3] = 0,
- [TOMTOM_A_INTR1_LEVEL0] = 1,
- [TOMTOM_A_INTR1_LEVEL1] = 1,
- [TOMTOM_A_INTR1_LEVEL2] = 1,
- [TOMTOM_A_INTR1_LEVEL3] = 1,
- [TOMTOM_A_INTR1_TEST0] = 1,
- [TOMTOM_A_INTR1_TEST1] = 1,
- [TOMTOM_A_INTR1_TEST2] = 1,
- [TOMTOM_A_INTR1_TEST3] = 1,
- [TOMTOM_A_INTR1_SET0] = 1,
- [TOMTOM_A_INTR1_SET1] = 1,
- [TOMTOM_A_INTR1_SET2] = 1,
- [TOMTOM_A_INTR1_SET3] = 1,
- [TOMTOM_A_INTR2_MASK0] = 1,
- [TOMTOM_A_INTR2_STATUS0] = 1,
- [TOMTOM_A_INTR2_CLEAR0] = 0,
- [TOMTOM_A_INTR2_LEVEL0] = 1,
- [TOMTOM_A_INTR2_TEST0] = 1,
- [TOMTOM_A_INTR2_SET0] = 1,
- [TOMTOM_A_CDC_TX_I2S_SCK_MODE] = 1,
- [TOMTOM_A_CDC_TX_I2S_WS_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_DATA0_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_CLK0_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_DATA1_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_CLK1_MODE] = 1,
- [TOMTOM_A_CDC_RX_I2S_SCK_MODE] = 1,
- [TOMTOM_A_CDC_RX_I2S_WS_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_DATA2_MODE] = 1,
- [TOMTOM_A_CDC_DMIC_CLK2_MODE] = 1,
- [TOMTOM_A_CDC_INTR1_MODE] = 1,
- [TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = 1,
- [TOMTOM_A_CDC_INTR2_MODE] = 1,
- [TOMTOM_A_CDC_RF_PA_ON_MODE] = 1,
- [TOMTOM_A_CDC_BOOST_MODE] = 1,
- [TOMTOM_A_CDC_JTCK_MODE] = 1,
- [TOMTOM_A_CDC_JTDI_MODE] = 1,
- [TOMTOM_A_CDC_JTMS_MODE] = 1,
- [TOMTOM_A_CDC_JTDO_MODE] = 1,
- [TOMTOM_A_CDC_JTRST_MODE] = 1,
- [TOMTOM_A_CDC_BIST_MODE_MODE] = 1,
- [TOMTOM_A_CDC_MAD_MAIN_CTL_1] = 1,
- [TOMTOM_A_CDC_MAD_MAIN_CTL_2] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] = 1,
- [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_1] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_2] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_3] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_4] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_5] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_6] = 1,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_7] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_1] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_2] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_3] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_4] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_5] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_6] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_7] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_8] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] = 1,
- [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] = 1,
- [TOMTOM_A_CDC_MAD_INP_SEL] = 1,
- [TOMTOM_A_BIAS_REF_CTL] = 1,
- [TOMTOM_A_BIAS_CENTRAL_BG_CTL] = 1,
- [TOMTOM_A_BIAS_PRECHRG_CTL] = 1,
- [TOMTOM_A_BIAS_CURR_CTL_1] = 1,
- [TOMTOM_A_BIAS_CURR_CTL_2] = 1,
- [TOMTOM_A_BIAS_OSC_BG_CTL] = 1,
- [TOMTOM_A_CLK_BUFF_EN1] = 1,
- [TOMTOM_A_CLK_BUFF_EN2] = 1,
- [TOMTOM_A_LDO_L_MODE_1] = 1,
- [TOMTOM_A_LDO_L_MODE_2] = 1,
- [TOMTOM_A_LDO_L_CTRL_1] = 1,
- [TOMTOM_A_LDO_L_CTRL_2] = 1,
- [TOMTOM_A_LDO_L_CTRL_3] = 1,
- [TOMTOM_A_LDO_L_CTRL_4] = 1,
- [TOMTOM_A_LDO_H_MODE_1] = 1,
- [TOMTOM_A_LDO_H_MODE_2] = 1,
- [TOMTOM_A_LDO_H_LOOP_CTL] = 1,
- [TOMTOM_A_LDO_H_COMP_1] = 1,
- [TOMTOM_A_LDO_H_COMP_2] = 1,
- [TOMTOM_A_LDO_H_BIAS_1] = 1,
- [TOMTOM_A_LDO_H_BIAS_2] = 1,
- [TOMTOM_A_LDO_H_BIAS_3] = 1,
- [TOMTOM_A_VBAT_CLK] = 1,
- [TOMTOM_A_VBAT_LOOP] = 1,
- [TOMTOM_A_VBAT_REF] = 1,
- [TOMTOM_A_VBAT_ADC_TEST] = 1,
- [TOMTOM_A_VBAT_FE] = 1,
- [TOMTOM_A_VBAT_BIAS_1] = 1,
- [TOMTOM_A_VBAT_BIAS_2] = 1,
- [TOMTOM_A_VBAT_ADC_DATA_MSB] = 1,
- [TOMTOM_A_VBAT_ADC_DATA_LSB] = 1,
- [TOMTOM_A_FLL_NREF] = 1,
- [TOMTOM_A_FLL_KDCO_TUNE] = 1,
- [TOMTOM_A_FLL_LOCK_THRESH] = 1,
- [TOMTOM_A_FLL_LOCK_DET_COUNT] = 1,
- [TOMTOM_A_FLL_DAC_THRESHOLD] = 1,
- [TOMTOM_A_FLL_TEST_DCO_FREERUN] = 1,
- [TOMTOM_A_FLL_TEST_ENABLE] = 1,
- [TOMTOM_A_MICB_CFILT_1_CTL] = 1,
- [TOMTOM_A_MICB_CFILT_1_VAL] = 1,
- [TOMTOM_A_MICB_CFILT_1_PRECHRG] = 1,
- [TOMTOM_A_MICB_1_CTL] = 1,
- [TOMTOM_A_MICB_1_INT_RBIAS] = 1,
- [TOMTOM_A_MICB_1_MBHC] = 1,
- [TOMTOM_A_MICB_CFILT_2_CTL] = 1,
- [TOMTOM_A_MICB_CFILT_2_VAL] = 1,
- [TOMTOM_A_MICB_CFILT_2_PRECHRG] = 1,
- [TOMTOM_A_MICB_2_CTL] = 1,
- [TOMTOM_A_MICB_2_INT_RBIAS] = 1,
- [TOMTOM_A_MICB_2_MBHC] = 1,
- [TOMTOM_A_MICB_CFILT_3_CTL] = 1,
- [TOMTOM_A_MICB_CFILT_3_VAL] = 1,
- [TOMTOM_A_MICB_CFILT_3_PRECHRG] = 1,
- [TOMTOM_A_MICB_3_CTL] = 1,
- [TOMTOM_A_MICB_3_INT_RBIAS] = 1,
- [TOMTOM_A_MICB_3_MBHC] = 1,
- [TOMTOM_A_MICB_4_CTL] = 1,
- [TOMTOM_A_MICB_4_INT_RBIAS] = 1,
- [TOMTOM_A_MICB_4_MBHC] = 1,
- [TOMTOM_A_SPKR_DRV2_EN] = 1,
- [TOMTOM_A_SPKR_DRV2_GAIN] = 1,
- [TOMTOM_A_SPKR_DRV2_DAC_CTL] = 1,
- [TOMTOM_A_SPKR_DRV2_OCP_CTL] = 1,
- [TOMTOM_A_SPKR_DRV2_CLIP_DET] = 1,
- [TOMTOM_A_SPKR_DRV2_DBG_DAC] = 1,
- [TOMTOM_A_SPKR_DRV2_DBG_PA] = 1,
- [TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = 1,
- [TOMTOM_A_SPKR_DRV2_BIAS_LDO] = 1,
- [TOMTOM_A_SPKR_DRV2_BIAS_INT] = 1,
- [TOMTOM_A_SPKR_DRV2_BIAS_PA] = 1,
- [TOMTOM_A_SPKR_DRV2_STATUS_OCP] = 1,
- [TOMTOM_A_SPKR_DRV2_STATUS_PA] = 1,
- [TOMTOM_A_MBHC_INSERT_DETECT] = 1,
- [TOMTOM_A_MBHC_INSERT_DET_STATUS] = 1,
- [TOMTOM_A_TX_COM_BIAS] = 1,
- [TOMTOM_A_MBHC_INSERT_DETECT2] = 1,
- [TOMTOM_A_MBHC_SCALING_MUX_1] = 1,
- [TOMTOM_A_MBHC_SCALING_MUX_2] = 1,
- [TOMTOM_A_MAD_ANA_CTRL] = 1,
- [TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = 1,
- [TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = 1,
- [TOMTOM_A_TX_1_GAIN] = 1,
- [TOMTOM_A_TX_1_2_TEST_EN] = 1,
- [TOMTOM_A_TX_2_GAIN] = 1,
- [TOMTOM_A_TX_1_2_ADC_IB] = 1,
- [TOMTOM_A_TX_1_2_ATEST_REFCTRL] = 1,
- [TOMTOM_A_TX_1_2_TEST_CTL] = 1,
- [TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = 1,
- [TOMTOM_A_TX_1_2_TXFE_CLKDIV] = 1,
- [TOMTOM_A_TX_1_2_SAR_ERR_CH1] = 1,
- [TOMTOM_A_TX_1_2_SAR_ERR_CH2] = 1,
- [TOMTOM_A_TX_3_GAIN] = 1,
- [TOMTOM_A_TX_3_4_TEST_EN] = 1,
- [TOMTOM_A_TX_4_GAIN] = 1,
- [TOMTOM_A_TX_3_4_ADC_IB] = 1,
- [TOMTOM_A_TX_3_4_ATEST_REFCTRL] = 1,
- [TOMTOM_A_TX_3_4_TEST_CTL] = 1,
- [TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = 1,
- [TOMTOM_A_TX_3_4_TXFE_CKDIV] = 1,
- [TOMTOM_A_TX_3_4_SAR_ERR_CH3] = 1,
- [TOMTOM_A_TX_3_4_SAR_ERR_CH4] = 1,
- [TOMTOM_A_TX_5_GAIN] = 1,
- [TOMTOM_A_TX_5_6_TEST_EN] = 1,
- [TOMTOM_A_TX_6_GAIN] = 1,
- [TOMTOM_A_TX_5_6_ADC_IB] = 1,
- [TOMTOM_A_TX_5_6_ATEST_REFCTRL] = 1,
- [TOMTOM_A_TX_5_6_TEST_CTL] = 1,
- [TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = 1,
- [TOMTOM_A_TX_5_6_TXFE_CKDIV] = 1,
- [TOMTOM_A_TX_5_6_SAR_ERR_CH5] = 1,
- [TOMTOM_A_TX_5_6_SAR_ERR_CH6] = 1,
- [TOMTOM_A_TX_7_MBHC_EN] = 1,
- [TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] = 1,
- [TOMTOM_A_TX_7_MBHC_ADC] = 1,
- [TOMTOM_A_TX_7_MBHC_TEST_CTL] = 1,
- [TOMTOM_A_TX_7_MBHC_SAR_ERR] = 1,
- [TOMTOM_A_TX_7_TXFE_CLKDIV] = 1,
- [TOMTOM_A_RCO_CTRL] = 1,
- [TOMTOM_A_RCO_CALIBRATION_CTRL1] = 1,
- [TOMTOM_A_RCO_CALIBRATION_CTRL2] = 1,
- [TOMTOM_A_RCO_CALIBRATION_CTRL3] = 1,
- [TOMTOM_A_RCO_TEST_CTRL] = 1,
- [TOMTOM_A_RCO_CALIBRATION_RESULT1] = 1,
- [TOMTOM_A_RCO_CALIBRATION_RESULT2] = 1,
- [TOMTOM_A_BUCK_MODE_1] = 1,
- [TOMTOM_A_BUCK_MODE_2] = 1,
- [TOMTOM_A_BUCK_MODE_3] = 1,
- [TOMTOM_A_BUCK_MODE_4] = 1,
- [TOMTOM_A_BUCK_MODE_5] = 1,
- [TOMTOM_A_BUCK_CTRL_VCL_1] = 1,
- [TOMTOM_A_BUCK_CTRL_VCL_2] = 1,
- [TOMTOM_A_BUCK_CTRL_VCL_3] = 1,
- [TOMTOM_A_BUCK_CTRL_CCL_1] = 1,
- [TOMTOM_A_BUCK_CTRL_CCL_2] = 1,
- [TOMTOM_A_BUCK_CTRL_CCL_3] = 1,
- [TOMTOM_A_BUCK_CTRL_CCL_4] = 1,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = 1,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = 1,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = 1,
- [TOMTOM_A_BUCK_TMUX_A_D] = 1,
- [TOMTOM_A_NCP_BUCKREF] = 1,
- [TOMTOM_A_NCP_EN] = 1,
- [TOMTOM_A_NCP_CLK] = 1,
- [TOMTOM_A_NCP_STATIC] = 1,
- [TOMTOM_A_NCP_VTH_LOW] = 1,
- [TOMTOM_A_NCP_VTH_HIGH] = 1,
- [TOMTOM_A_NCP_ATEST] = 1,
- [TOMTOM_A_NCP_DTEST] = 1,
- [TOMTOM_A_NCP_DLY1] = 1,
- [TOMTOM_A_NCP_DLY2] = 1,
- [TOMTOM_A_RX_AUX_SW_CTL] = 1,
- [TOMTOM_A_RX_PA_AUX_IN_CONN] = 1,
- [TOMTOM_A_RX_COM_TIMER_DIV] = 1,
- [TOMTOM_A_RX_COM_OCP_CTL] = 1,
- [TOMTOM_A_RX_COM_OCP_COUNT] = 1,
- [TOMTOM_A_RX_COM_DAC_CTL] = 1,
- [TOMTOM_A_RX_COM_BIAS] = 1,
- [TOMTOM_A_RX_HPH_AUTO_CHOP] = 1,
- [TOMTOM_A_RX_HPH_CHOP_CTL] = 1,
- [TOMTOM_A_RX_HPH_BIAS_PA] = 1,
- [TOMTOM_A_RX_HPH_BIAS_LDO] = 1,
- [TOMTOM_A_RX_HPH_BIAS_CNP] = 1,
- [TOMTOM_A_RX_HPH_BIAS_WG_OCP] = 1,
- [TOMTOM_A_RX_HPH_OCP_CTL] = 1,
- [TOMTOM_A_RX_HPH_CNP_EN] = 1,
- [TOMTOM_A_RX_HPH_CNP_WG_CTL] = 1,
- [TOMTOM_A_RX_HPH_CNP_WG_TIME] = 1,
- [TOMTOM_A_RX_HPH_L_GAIN] = 1,
- [TOMTOM_A_RX_HPH_L_TEST] = 1,
- [TOMTOM_A_RX_HPH_L_PA_CTL] = 1,
- [TOMTOM_A_RX_HPH_L_DAC_CTL] = 1,
- [TOMTOM_A_RX_HPH_L_ATEST] = 1,
- [TOMTOM_A_RX_HPH_L_STATUS] = 1,
- [TOMTOM_A_RX_HPH_R_GAIN] = 1,
- [TOMTOM_A_RX_HPH_R_TEST] = 1,
- [TOMTOM_A_RX_HPH_R_PA_CTL] = 1,
- [TOMTOM_A_RX_HPH_R_DAC_CTL] = 1,
- [TOMTOM_A_RX_HPH_R_ATEST] = 1,
- [TOMTOM_A_RX_HPH_R_STATUS] = 1,
- [TOMTOM_A_RX_EAR_BIAS_PA] = 1,
- [TOMTOM_A_RX_EAR_BIAS_CMBUFF] = 1,
- [TOMTOM_A_RX_EAR_EN] = 1,
- [TOMTOM_A_RX_EAR_GAIN] = 1,
- [TOMTOM_A_RX_EAR_CMBUFF] = 1,
- [TOMTOM_A_RX_EAR_ICTL] = 1,
- [TOMTOM_A_RX_EAR_CCOMP] = 1,
- [TOMTOM_A_RX_EAR_VCM] = 1,
- [TOMTOM_A_RX_EAR_CNP] = 1,
- [TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = 1,
- [TOMTOM_A_RX_EAR_STATUS] = 1,
- [TOMTOM_A_RX_LINE_BIAS_PA] = 1,
- [TOMTOM_A_RX_BUCK_BIAS1] = 1,
- [TOMTOM_A_RX_BUCK_BIAS2] = 1,
- [TOMTOM_A_RX_LINE_COM] = 1,
- [TOMTOM_A_RX_LINE_CNP_EN] = 1,
- [TOMTOM_A_RX_LINE_CNP_WG_CTL] = 1,
- [TOMTOM_A_RX_LINE_CNP_WG_TIME] = 1,
- [TOMTOM_A_RX_LINE_1_GAIN] = 1,
- [TOMTOM_A_RX_LINE_1_TEST] = 1,
- [TOMTOM_A_RX_LINE_1_DAC_CTL] = 1,
- [TOMTOM_A_RX_LINE_1_STATUS] = 1,
- [TOMTOM_A_RX_LINE_2_GAIN] = 1,
- [TOMTOM_A_RX_LINE_2_TEST] = 1,
- [TOMTOM_A_RX_LINE_2_DAC_CTL] = 1,
- [TOMTOM_A_RX_LINE_2_STATUS] = 1,
- [TOMTOM_A_RX_LINE_3_GAIN] = 1,
- [TOMTOM_A_RX_LINE_3_TEST] = 1,
- [TOMTOM_A_RX_LINE_3_DAC_CTL] = 1,
- [TOMTOM_A_RX_LINE_3_STATUS] = 1,
- [TOMTOM_A_RX_LINE_4_GAIN] = 1,
- [TOMTOM_A_RX_LINE_4_TEST] = 1,
- [TOMTOM_A_RX_LINE_4_DAC_CTL] = 1,
- [TOMTOM_A_RX_LINE_4_STATUS] = 1,
- [TOMTOM_A_RX_LINE_CNP_DBG] = 1,
- [TOMTOM_A_SPKR_DRV1_EN] = 1,
- [TOMTOM_A_SPKR_DRV1_GAIN] = 1,
- [TOMTOM_A_SPKR_DRV1_DAC_CTL] = 1,
- [TOMTOM_A_SPKR_DRV1_OCP_CTL] = 1,
- [TOMTOM_A_SPKR_DRV1_CLIP_DET] = 1,
- [TOMTOM_A_SPKR_DRV1_IEC] = 1,
- [TOMTOM_A_SPKR_DRV1_DBG_DAC] = 1,
- [TOMTOM_A_SPKR_DRV1_DBG_PA] = 1,
- [TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = 1,
- [TOMTOM_A_SPKR_DRV1_BIAS_LDO] = 1,
- [TOMTOM_A_SPKR_DRV1_BIAS_INT] = 1,
- [TOMTOM_A_SPKR_DRV1_BIAS_PA] = 1,
- [TOMTOM_A_SPKR_DRV1_STATUS_OCP] = 1,
- [TOMTOM_A_SPKR_DRV1_STATUS_PA] = 1,
- [TOMTOM_A_SPKR1_PROT_EN] = 1,
- [TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] = 1,
- [TOMTOM_A_SPKR1_PROT_ATEST] = 1,
- [TOMTOM_A_SPKR1_PROT_LDO_CTRL] = 1,
- [TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] = 1,
- [TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] = 1,
- [TOMTOM_A_SPKR2_PROT_EN] = 1,
- [TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] = 1,
- [TOMTOM_A_SPKR2_PROT_ATEST] = 1,
- [TOMTOM_A_SPKR2_PROT_LDO_CTRL] = 1,
- [TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] = 1,
- [TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] = 1,
- [TOMTOM_A_MBHC_HPH] = 1,
- [TOMTOM_A_CDC_ANC1_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_SHIFT] = 1,
- [TOMTOM_A_CDC_ANC2_SHIFT] = 1,
- [TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_SPARE] = 1,
- [TOMTOM_A_CDC_ANC2_SPARE] = 1,
- [TOMTOM_A_CDC_ANC1_SMLPF_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_SMLPF_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_DCFLT_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_DCFLT_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_GAIN_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_GAIN_CTL] = 1,
- [TOMTOM_A_CDC_ANC1_B2_CTL] = 1,
- [TOMTOM_A_CDC_ANC2_B2_CTL] = 1,
- [TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] = 1,
- [TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = 1,
- [TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = 1,
- [TOMTOM_A_CDC_TX1_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX2_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX3_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX4_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX5_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX6_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX7_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX8_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX9_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX10_MUX_CTL] = 1,
- [TOMTOM_A_CDC_TX1_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX2_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX3_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX4_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX5_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX6_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX7_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX8_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX9_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX10_CLK_FS_CTL] = 1,
- [TOMTOM_A_CDC_TX1_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX2_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX3_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX4_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX5_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX6_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX7_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX8_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX9_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_TX10_DMIC_CTL] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = 1,
- [TOMTOM_A_CDC_DEBUG_B1_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B2_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B3_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B4_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B5_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B6_CTL] = 1,
- [TOMTOM_A_CDC_DEBUG_B7_CTL] = 1,
- [TOMTOM_A_CDC_SRC1_PDA_CFG] = 1,
- [TOMTOM_A_CDC_SRC2_PDA_CFG] = 1,
- [TOMTOM_A_CDC_SRC1_FS_CTL] = 1,
- [TOMTOM_A_CDC_SRC2_FS_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX1_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX2_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX3_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX4_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX5_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX6_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX7_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_VBAT_CFG] = 1,
- [TOMTOM_A_CDC_VBAT_ADC_CAL1] = 1,
- [TOMTOM_A_CDC_VBAT_ADC_CAL2] = 1,
- [TOMTOM_A_CDC_VBAT_ADC_CAL3] = 1,
- [TOMTOM_A_CDC_VBAT_PK_EST1] = 1,
- [TOMTOM_A_CDC_VBAT_PK_EST2] = 1,
- [TOMTOM_A_CDC_VBAT_PK_EST3] = 1,
- [TOMTOM_A_CDC_VBAT_RF_PROC1] = 1,
- [TOMTOM_A_CDC_VBAT_RF_PROC2] = 1,
- [TOMTOM_A_CDC_VBAT_TAC1] = 1,
- [TOMTOM_A_CDC_VBAT_TAC2] = 1,
- [TOMTOM_A_CDC_VBAT_TAC3] = 1,
- [TOMTOM_A_CDC_VBAT_TAC4] = 1,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD1] = 1,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD2] = 1,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD3] = 1,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD4] = 1,
- [TOMTOM_A_CDC_VBAT_DEBUG1] = 1,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = 0,
- [TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = 1,
- [TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = 1,
- [TOMTOM_A_CDC_CLK_RX_RESET_CTL] = 1,
- [TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] = 1,
- [TOMTOM_A_CDC_CLK_RX_I2S_CTL] = 1,
- [TOMTOM_A_CDC_CLK_TX_I2S_CTL] = 1,
- [TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] = 1,
- [TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] = 1,
- [TOMTOM_A_CDC_CLK_OTHR_CTL] = 1,
- [TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] = 1,
- [TOMTOM_A_CDC_CLK_RX_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLK_RX_B2_CTL] = 1,
- [TOMTOM_A_CDC_CLK_MCLK_CTL] = 1,
- [TOMTOM_A_CDC_CLK_PDM_CTL] = 1,
- [TOMTOM_A_CDC_CLK_SD_CTL] = 1,
- [TOMTOM_A_CDC_CLSH_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLSH_B2_CTL] = 1,
- [TOMTOM_A_CDC_CLSH_B3_CTL] = 1,
- [TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] = 1,
- [TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] = 1,
- [TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] = 1,
- [TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] = 1,
- [TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] = 1,
- [TOMTOM_A_CDC_CLSH_K_ADDR] = 1,
- [TOMTOM_A_CDC_CLSH_K_DATA] = 1,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] = 1,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] = 1,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] = 1,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] = 1,
- [TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = 1,
- [TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = 1,
- [TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = 1,
- [TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = 1,
- [TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = 1,
- [TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = 1,
- [TOMTOM_A_CDC_TOP_GAIN_UPDATE] = 1,
- [TOMTOM_A_CDC_PA_RAMP_B1_CTL] = 1,
- [TOMTOM_A_CDC_PA_RAMP_B2_CTL] = 1,
- [TOMTOM_A_CDC_PA_RAMP_B3_CTL] = 1,
- [TOMTOM_A_CDC_PA_RAMP_B4_CTL] = 1,
- [TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B1_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B1_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B1_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B2_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B2_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B2_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B3_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B3_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B3_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B4_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B4_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B4_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B5_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B5_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B5_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_B6_CTL] = 1,
- [TOMTOM_A_CDC_COMP1_B6_CTL] = 1,
- [TOMTOM_A_CDC_COMP2_B6_CTL] = 1,
- [TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] = 1,
- [TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] = 1,
- [TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] = 1,
- [TOMTOM_A_CDC_COMP0_FS_CFG] = 1,
- [TOMTOM_A_CDC_COMP1_FS_CFG] = 1,
- [TOMTOM_A_CDC_COMP2_FS_CFG] = 1,
- [TOMTOM_A_CDC_CONN_RX1_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX1_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX1_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX2_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX2_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX2_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX3_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX3_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX4_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX4_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX5_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX5_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX6_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX6_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX7_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX7_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX7_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_ANC_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_ANC_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_B4_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = 1,
- [TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] = 1,
- [TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = 1,
- [TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = 1,
- [TOMTOM_A_CDC_CONN_CLSH_CTL] = 1,
- [TOMTOM_A_CDC_CONN_MISC] = 1,
- [TOMTOM_A_CDC_CONN_RX8_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] = 1,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] = 1,
- [TOMTOM_A_CDC_MBHC_EN_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = 1,
- [TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_B1_STATUS] = 1,
- [TOMTOM_A_CDC_MBHC_B2_STATUS] = 1,
- [TOMTOM_A_CDC_MBHC_B3_STATUS] = 1,
- [TOMTOM_A_CDC_MBHC_B4_STATUS] = 1,
- [TOMTOM_A_CDC_MBHC_B5_STATUS] = 1,
- [TOMTOM_A_CDC_MBHC_B1_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_B2_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_CLK_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_INT_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_DEBUG_CTL] = 1,
- [TOMTOM_A_CDC_MBHC_SPARE] = 1,
- [TOMTOM_A_CDC_RX8_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX8_B2_CTL] = 1,
- [TOMTOM_A_CDC_RX8_B3_CTL] = 1,
- [TOMTOM_A_CDC_RX8_B4_CTL] = 1,
- [TOMTOM_A_CDC_RX8_B5_CTL] = 1,
- [TOMTOM_A_CDC_RX8_B6_CTL] = 1,
- [TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] = 1,
- [TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] = 1,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] = 1,
- [TOMTOM_A_CDC_BOOST_MODE_CTL] = 1,
- [TOMTOM_A_CDC_BOOST_THRESHOLD] = 1,
- [TOMTOM_A_CDC_BOOST_TAP_SEL] = 1,
- [TOMTOM_A_CDC_BOOST_HOLD_TIME] = 1,
- [TOMTOM_A_CDC_BOOST_TRGR_EN] = 1,
-};
-
-const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE] = {
- [TOMTOM_A_CHIP_CTL] = TOMTOM_A_CHIP_CTL__POR,
- [TOMTOM_A_CHIP_STATUS] = TOMTOM_A_CHIP_STATUS__POR,
- [TOMTOM_A_CHIP_ID_BYTE_0] = TOMTOM_A_CHIP_ID_BYTE_0__POR,
- [TOMTOM_A_CHIP_ID_BYTE_1] = TOMTOM_A_CHIP_ID_BYTE_1__POR,
- [TOMTOM_A_CHIP_ID_BYTE_2] = TOMTOM_A_CHIP_ID_BYTE_2__POR,
- [TOMTOM_A_CHIP_ID_BYTE_3] = TOMTOM_A_CHIP_ID_BYTE_3__POR,
- [TOMTOM_A_CHIP_I2C_SLAVE_ID] = TOMTOM_A_CHIP_I2C_SLAVE_ID__POR,
- [TOMTOM_A_SLAVE_ID_1] = TOMTOM_A_SLAVE_ID_1__POR,
- [TOMTOM_A_SLAVE_ID_2] = TOMTOM_A_SLAVE_ID_2__POR,
- [TOMTOM_A_SLAVE_ID_3] = TOMTOM_A_SLAVE_ID_3__POR,
- [TOMTOM_A_PIN_CTL_OE0] = TOMTOM_A_PIN_CTL_OE0__POR,
- [TOMTOM_A_PIN_CTL_OE1] = TOMTOM_A_PIN_CTL_OE1__POR,
- [TOMTOM_A_PIN_CTL_OE2] = TOMTOM_A_PIN_CTL_OE2__POR,
- [TOMTOM_A_PIN_CTL_DATA0] = TOMTOM_A_PIN_CTL_DATA0__POR,
- [TOMTOM_A_PIN_CTL_DATA1] = TOMTOM_A_PIN_CTL_DATA1__POR,
- [TOMTOM_A_PIN_CTL_DATA2] = TOMTOM_A_PIN_CTL_DATA2__POR,
- [TOMTOM_A_HDRIVE_GENERIC] = TOMTOM_A_HDRIVE_GENERIC__POR,
- [TOMTOM_A_HDRIVE_OVERRIDE] = TOMTOM_A_HDRIVE_OVERRIDE__POR,
- [TOMTOM_A_ANA_CSR_WAIT_STATE] = TOMTOM_A_ANA_CSR_WAIT_STATE__POR,
- [TOMTOM_A_PROCESS_MONITOR_CTL0] = TOMTOM_A_PROCESS_MONITOR_CTL0__POR,
- [TOMTOM_A_PROCESS_MONITOR_CTL1] = TOMTOM_A_PROCESS_MONITOR_CTL1__POR,
- [TOMTOM_A_PROCESS_MONITOR_CTL2] = TOMTOM_A_PROCESS_MONITOR_CTL2__POR,
- [TOMTOM_A_PROCESS_MONITOR_CTL3] = TOMTOM_A_PROCESS_MONITOR_CTL3__POR,
- [TOMTOM_A_QFUSE_CTL] = TOMTOM_A_QFUSE_CTL__POR,
- [TOMTOM_A_QFUSE_STATUS] = TOMTOM_A_QFUSE_STATUS__POR,
- [TOMTOM_A_QFUSE_DATA_OUT0] = TOMTOM_A_QFUSE_DATA_OUT0__POR,
- [TOMTOM_A_QFUSE_DATA_OUT1] = TOMTOM_A_QFUSE_DATA_OUT1__POR,
- [TOMTOM_A_QFUSE_DATA_OUT2] = TOMTOM_A_QFUSE_DATA_OUT2__POR,
- [TOMTOM_A_QFUSE_DATA_OUT3] = TOMTOM_A_QFUSE_DATA_OUT3__POR,
- [TOMTOM_A_QFUSE_DATA_OUT4] = TOMTOM_A_QFUSE_DATA_OUT4__POR,
- [TOMTOM_A_QFUSE_DATA_OUT5] = TOMTOM_A_QFUSE_DATA_OUT5__POR,
- [TOMTOM_A_QFUSE_DATA_OUT6] = TOMTOM_A_QFUSE_DATA_OUT6__POR,
- [TOMTOM_A_QFUSE_DATA_OUT7] = TOMTOM_A_QFUSE_DATA_OUT7__POR,
- [TOMTOM_A_CDC_CTL] = TOMTOM_A_CDC_CTL__POR,
- [TOMTOM_A_LEAKAGE_CTL] = TOMTOM_A_LEAKAGE_CTL__POR,
- [TOMTOM_A_SVASS_MEM_PTR0] = TOMTOM_A_SVASS_MEM_PTR0__POR,
- [TOMTOM_A_SVASS_MEM_PTR1] = TOMTOM_A_SVASS_MEM_PTR1__POR,
- [TOMTOM_A_SVASS_MEM_PTR2] = TOMTOM_A_SVASS_MEM_PTR2__POR,
- [TOMTOM_A_SVASS_MEM_CTL] = TOMTOM_A_SVASS_MEM_CTL__POR,
- [TOMTOM_A_SVASS_MEM_BANK] = TOMTOM_A_SVASS_MEM_BANK__POR,
- [TOMTOM_A_DMIC_B1_CTL] = TOMTOM_A_DMIC_B1_CTL__POR,
- [TOMTOM_A_DMIC_B2_CTL] = TOMTOM_A_DMIC_B2_CTL__POR,
- [TOMTOM_A_SVASS_CLKRST_CTL] = TOMTOM_A_SVASS_CLKRST_CTL__POR,
- [TOMTOM_A_SVASS_CPAR_CFG] = TOMTOM_A_SVASS_CPAR_CFG__POR,
- [TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] =
- TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR,
- [TOMTOM_A_SVASS_CPAR_WDOG_CFG] = TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR,
- [TOMTOM_A_SVASS_CFG] = TOMTOM_A_SVASS_CFG__POR,
- [TOMTOM_A_SVASS_SPE_CFG] = TOMTOM_A_SVASS_SPE_CFG__POR,
- [TOMTOM_A_SVASS_STATUS] = TOMTOM_A_SVASS_STATUS__POR,
- [TOMTOM_A_SVASS_INT_MASK] = TOMTOM_A_SVASS_INT_MASK__POR,
- [TOMTOM_A_SVASS_INT_STATUS] = TOMTOM_A_SVASS_INT_STATUS__POR,
- [TOMTOM_A_SVASS_INT_CLR] = TOMTOM_A_SVASS_INT_CLR__POR,
- [TOMTOM_A_SVASS_DEBUG] = TOMTOM_A_SVASS_DEBUG__POR,
- [TOMTOM_A_SVASS_SPE_BKUP_INT] = TOMTOM_A_SVASS_SPE_BKUP_INT__POR,
- [TOMTOM_A_SVASS_MEM_ACC] = TOMTOM_A_SVASS_MEM_ACC__POR,
- [TOMTOM_A_MEM_LEAKAGE_CTL] = TOMTOM_A_MEM_LEAKAGE_CTL__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_TRG] = TOMTOM_A_SVASS_SPE_INBOX_TRG__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_0] = TOMTOM_A_SVASS_SPE_INBOX_0__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_1] = TOMTOM_A_SVASS_SPE_INBOX_1__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_2] = TOMTOM_A_SVASS_SPE_INBOX_2__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_3] = TOMTOM_A_SVASS_SPE_INBOX_3__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_4] = TOMTOM_A_SVASS_SPE_INBOX_4__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_5] = TOMTOM_A_SVASS_SPE_INBOX_5__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_6] = TOMTOM_A_SVASS_SPE_INBOX_6__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_7] = TOMTOM_A_SVASS_SPE_INBOX_7__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_8] = TOMTOM_A_SVASS_SPE_INBOX_8__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_9] = TOMTOM_A_SVASS_SPE_INBOX_9__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_10] = TOMTOM_A_SVASS_SPE_INBOX_10__POR,
- [TOMTOM_A_SVASS_SPE_INBOX_11] = TOMTOM_A_SVASS_SPE_INBOX_11__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_0] = TOMTOM_A_SVASS_SPE_OUTBOX_0__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_1] = TOMTOM_A_SVASS_SPE_OUTBOX_1__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_2] = TOMTOM_A_SVASS_SPE_OUTBOX_2__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_3] = TOMTOM_A_SVASS_SPE_OUTBOX_3__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_4] = TOMTOM_A_SVASS_SPE_OUTBOX_4__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_5] = TOMTOM_A_SVASS_SPE_OUTBOX_5__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_6] = TOMTOM_A_SVASS_SPE_OUTBOX_6__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_7] = TOMTOM_A_SVASS_SPE_OUTBOX_7__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_8] = TOMTOM_A_SVASS_SPE_OUTBOX_8__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_9] = TOMTOM_A_SVASS_SPE_OUTBOX_9__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_10] = TOMTOM_A_SVASS_SPE_OUTBOX_10__POR,
- [TOMTOM_A_SVASS_SPE_OUTBOX_11] = TOMTOM_A_SVASS_SPE_OUTBOX_11__POR,
- [TOMTOM_A_INTR_MODE] = TOMTOM_A_INTR_MODE__POR,
- [TOMTOM_A_INTR1_MASK0] = TOMTOM_A_INTR1_MASK0__POR,
- [TOMTOM_A_INTR1_MASK1] = TOMTOM_A_INTR1_MASK1__POR,
- [TOMTOM_A_INTR1_MASK2] = TOMTOM_A_INTR1_MASK2__POR,
- [TOMTOM_A_INTR1_MASK3] = TOMTOM_A_INTR1_MASK3__POR,
- [TOMTOM_A_INTR1_STATUS0] = TOMTOM_A_INTR1_STATUS0__POR,
- [TOMTOM_A_INTR1_STATUS1] = TOMTOM_A_INTR1_STATUS1__POR,
- [TOMTOM_A_INTR1_STATUS2] = TOMTOM_A_INTR1_STATUS2__POR,
- [TOMTOM_A_INTR1_STATUS3] = TOMTOM_A_INTR1_STATUS3__POR,
- [TOMTOM_A_INTR1_CLEAR0] = TOMTOM_A_INTR1_CLEAR0__POR,
- [TOMTOM_A_INTR1_CLEAR1] = TOMTOM_A_INTR1_CLEAR1__POR,
- [TOMTOM_A_INTR1_CLEAR2] = TOMTOM_A_INTR1_CLEAR2__POR,
- [TOMTOM_A_INTR1_CLEAR3] = TOMTOM_A_INTR1_CLEAR3__POR,
- [TOMTOM_A_INTR1_LEVEL0] = TOMTOM_A_INTR1_LEVEL0__POR,
- [TOMTOM_A_INTR1_LEVEL1] = TOMTOM_A_INTR1_LEVEL1__POR,
- [TOMTOM_A_INTR1_LEVEL2] = TOMTOM_A_INTR1_LEVEL2__POR,
- [TOMTOM_A_INTR1_LEVEL3] = TOMTOM_A_INTR1_LEVEL3__POR,
- [TOMTOM_A_INTR1_TEST0] = TOMTOM_A_INTR1_TEST0__POR,
- [TOMTOM_A_INTR1_TEST1] = TOMTOM_A_INTR1_TEST1__POR,
- [TOMTOM_A_INTR1_TEST2] = TOMTOM_A_INTR1_TEST2__POR,
- [TOMTOM_A_INTR1_TEST3] = TOMTOM_A_INTR1_TEST3__POR,
- [TOMTOM_A_INTR1_SET0] = TOMTOM_A_INTR1_SET0__POR,
- [TOMTOM_A_INTR1_SET1] = TOMTOM_A_INTR1_SET1__POR,
- [TOMTOM_A_INTR1_SET2] = TOMTOM_A_INTR1_SET2__POR,
- [TOMTOM_A_INTR1_SET3] = TOMTOM_A_INTR1_SET3__POR,
- [TOMTOM_A_INTR2_MASK0] = TOMTOM_A_INTR2_MASK0__POR,
- [TOMTOM_A_INTR2_STATUS0] = TOMTOM_A_INTR2_STATUS0__POR,
- [TOMTOM_A_INTR2_CLEAR0] = TOMTOM_A_INTR2_CLEAR0__POR,
- [TOMTOM_A_INTR2_LEVEL0] = TOMTOM_A_INTR2_LEVEL0__POR,
- [TOMTOM_A_INTR2_TEST0] = TOMTOM_A_INTR2_TEST0__POR,
- [TOMTOM_A_INTR2_SET0] = TOMTOM_A_INTR2_SET0__POR,
- [TOMTOM_A_CDC_TX_I2S_SCK_MODE] = TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR,
- [TOMTOM_A_CDC_TX_I2S_WS_MODE] = TOMTOM_A_CDC_TX_I2S_WS_MODE__POR,
- [TOMTOM_A_CDC_DMIC_DATA0_MODE] = TOMTOM_A_CDC_DMIC_DATA0_MODE__POR,
- [TOMTOM_A_CDC_DMIC_CLK0_MODE] = TOMTOM_A_CDC_DMIC_CLK0_MODE__POR,
- [TOMTOM_A_CDC_DMIC_DATA1_MODE] = TOMTOM_A_CDC_DMIC_DATA1_MODE__POR,
- [TOMTOM_A_CDC_DMIC_CLK1_MODE] = TOMTOM_A_CDC_DMIC_CLK1_MODE__POR,
- [TOMTOM_A_CDC_RX_I2S_SCK_MODE] = TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR,
- [TOMTOM_A_CDC_RX_I2S_WS_MODE] = TOMTOM_A_CDC_RX_I2S_WS_MODE__POR,
- [TOMTOM_A_CDC_DMIC_DATA2_MODE] = TOMTOM_A_CDC_DMIC_DATA2_MODE__POR,
- [TOMTOM_A_CDC_DMIC_CLK2_MODE] = TOMTOM_A_CDC_DMIC_CLK2_MODE__POR,
- [TOMTOM_A_CDC_INTR1_MODE] = TOMTOM_A_CDC_INTR1_MODE__POR,
- [TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR,
- [TOMTOM_A_CDC_INTR2_MODE] = TOMTOM_A_CDC_INTR2_MODE__POR,
- [TOMTOM_A_CDC_RF_PA_ON_MODE] = TOMTOM_A_CDC_RF_PA_ON_MODE__POR,
- [TOMTOM_A_CDC_BOOST_MODE] = TOMTOM_A_CDC_BOOST_MODE__POR,
- [TOMTOM_A_CDC_JTCK_MODE] = TOMTOM_A_CDC_JTCK_MODE__POR,
- [TOMTOM_A_CDC_JTDI_MODE] = TOMTOM_A_CDC_JTDI_MODE__POR,
- [TOMTOM_A_CDC_JTMS_MODE] = TOMTOM_A_CDC_JTMS_MODE__POR,
- [TOMTOM_A_CDC_JTDO_MODE] = TOMTOM_A_CDC_JTDO_MODE__POR,
- [TOMTOM_A_CDC_JTRST_MODE] = TOMTOM_A_CDC_JTRST_MODE__POR,
- [TOMTOM_A_CDC_BIST_MODE_MODE] = TOMTOM_A_CDC_BIST_MODE_MODE__POR,
- [TOMTOM_A_CDC_MAD_MAIN_CTL_1] = TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR,
- [TOMTOM_A_CDC_MAD_MAIN_CTL_2] = TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] =
- TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR,
- [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] =
- TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_1] = TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_2] = TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_3] = TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_4] = TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_5] = TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_6] = TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR,
- [TOMTOM_A_CDC_MAD_ULTR_CTL_7] = TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_1] = TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_2] = TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_3] = TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_4] = TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_5] = TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_6] = TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_7] = TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR,
- [TOMTOM_A_CDC_MAD_BEACON_CTL_8] = TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR,
- [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] =
- TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR,
- [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] =
- TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR,
- [TOMTOM_A_CDC_MAD_INP_SEL] = TOMTOM_A_CDC_MAD_INP_SEL__POR,
- [TOMTOM_A_BIAS_REF_CTL] = TOMTOM_A_BIAS_REF_CTL__POR,
- [TOMTOM_A_BIAS_CENTRAL_BG_CTL] = TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR,
- [TOMTOM_A_BIAS_PRECHRG_CTL] = TOMTOM_A_BIAS_PRECHRG_CTL__POR,
- [TOMTOM_A_BIAS_CURR_CTL_1] = TOMTOM_A_BIAS_CURR_CTL_1__POR,
- [TOMTOM_A_BIAS_CURR_CTL_2] = TOMTOM_A_BIAS_CURR_CTL_2__POR,
- [TOMTOM_A_BIAS_OSC_BG_CTL] = TOMTOM_A_BIAS_OSC_BG_CTL__POR,
- [TOMTOM_A_CLK_BUFF_EN1] = TOMTOM_A_CLK_BUFF_EN1__POR,
- [TOMTOM_A_CLK_BUFF_EN2] = TOMTOM_A_CLK_BUFF_EN2__POR,
- [TOMTOM_A_LDO_L_MODE_1] = TOMTOM_A_LDO_L_MODE_1__POR,
- [TOMTOM_A_LDO_L_MODE_2] = TOMTOM_A_LDO_L_MODE_2__POR,
- [TOMTOM_A_LDO_L_CTRL_1] = TOMTOM_A_LDO_L_CTRL_1__POR,
- [TOMTOM_A_LDO_L_CTRL_2] = TOMTOM_A_LDO_L_CTRL_2__POR,
- [TOMTOM_A_LDO_L_CTRL_3] = TOMTOM_A_LDO_L_CTRL_3__POR,
- [TOMTOM_A_LDO_L_CTRL_4] = TOMTOM_A_LDO_L_CTRL_4__POR,
- [TOMTOM_A_LDO_H_MODE_1] = TOMTOM_A_LDO_H_MODE_1__POR,
- [TOMTOM_A_LDO_H_MODE_2] = TOMTOM_A_LDO_H_MODE_2__POR,
- [TOMTOM_A_LDO_H_LOOP_CTL] = TOMTOM_A_LDO_H_LOOP_CTL__POR,
- [TOMTOM_A_LDO_H_COMP_1] = TOMTOM_A_LDO_H_COMP_1__POR,
- [TOMTOM_A_LDO_H_COMP_2] = TOMTOM_A_LDO_H_COMP_2__POR,
- [TOMTOM_A_LDO_H_BIAS_1] = TOMTOM_A_LDO_H_BIAS_1__POR,
- [TOMTOM_A_LDO_H_BIAS_2] = TOMTOM_A_LDO_H_BIAS_2__POR,
- [TOMTOM_A_LDO_H_BIAS_3] = TOMTOM_A_LDO_H_BIAS_3__POR,
- [TOMTOM_A_VBAT_CLK] = TOMTOM_A_VBAT_CLK__POR,
- [TOMTOM_A_VBAT_LOOP] = TOMTOM_A_VBAT_LOOP__POR,
- [TOMTOM_A_VBAT_REF] = TOMTOM_A_VBAT_REF__POR,
- [TOMTOM_A_VBAT_ADC_TEST] = TOMTOM_A_VBAT_ADC_TEST__POR,
- [TOMTOM_A_VBAT_FE] = TOMTOM_A_VBAT_FE__POR,
- [TOMTOM_A_VBAT_BIAS_1] = TOMTOM_A_VBAT_BIAS_1__POR,
- [TOMTOM_A_VBAT_BIAS_2] = TOMTOM_A_VBAT_BIAS_2__POR,
- [TOMTOM_A_VBAT_ADC_DATA_MSB] = TOMTOM_A_VBAT_ADC_DATA_MSB__POR,
- [TOMTOM_A_VBAT_ADC_DATA_LSB] = TOMTOM_A_VBAT_ADC_DATA_LSB__POR,
- [TOMTOM_A_FLL_NREF] = TOMTOM_A_FLL_NREF__POR,
- [TOMTOM_A_FLL_KDCO_TUNE] = TOMTOM_A_FLL_KDCO_TUNE__POR,
- [TOMTOM_A_FLL_LOCK_THRESH] = TOMTOM_A_FLL_LOCK_THRESH__POR,
- [TOMTOM_A_FLL_LOCK_DET_COUNT] = TOMTOM_A_FLL_LOCK_DET_COUNT__POR,
- [TOMTOM_A_FLL_DAC_THRESHOLD] = TOMTOM_A_FLL_DAC_THRESHOLD__POR,
- [TOMTOM_A_FLL_TEST_DCO_FREERUN] = TOMTOM_A_FLL_TEST_DCO_FREERUN__POR,
- [TOMTOM_A_FLL_TEST_ENABLE] = TOMTOM_A_FLL_TEST_ENABLE__POR,
- [TOMTOM_A_MICB_CFILT_1_CTL] = TOMTOM_A_MICB_CFILT_1_CTL__POR,
- [TOMTOM_A_MICB_CFILT_1_VAL] = TOMTOM_A_MICB_CFILT_1_VAL__POR,
- [TOMTOM_A_MICB_CFILT_1_PRECHRG] = TOMTOM_A_MICB_CFILT_1_PRECHRG__POR,
- [TOMTOM_A_MICB_1_CTL] = TOMTOM_A_MICB_1_CTL__POR,
- [TOMTOM_A_MICB_1_INT_RBIAS] = TOMTOM_A_MICB_1_INT_RBIAS__POR,
- [TOMTOM_A_MICB_1_MBHC] = TOMTOM_A_MICB_1_MBHC__POR,
- [TOMTOM_A_MICB_CFILT_2_CTL] = TOMTOM_A_MICB_CFILT_2_CTL__POR,
- [TOMTOM_A_MICB_CFILT_2_VAL] = TOMTOM_A_MICB_CFILT_2_VAL__POR,
- [TOMTOM_A_MICB_CFILT_2_PRECHRG] = TOMTOM_A_MICB_CFILT_2_PRECHRG__POR,
- [TOMTOM_A_MICB_2_CTL] = TOMTOM_A_MICB_2_CTL__POR,
- [TOMTOM_A_MICB_2_INT_RBIAS] = TOMTOM_A_MICB_2_INT_RBIAS__POR,
- [TOMTOM_A_MICB_2_MBHC] = TOMTOM_A_MICB_2_MBHC__POR,
- [TOMTOM_A_MICB_CFILT_3_CTL] = TOMTOM_A_MICB_CFILT_3_CTL__POR,
- [TOMTOM_A_MICB_CFILT_3_VAL] = TOMTOM_A_MICB_CFILT_3_VAL__POR,
- [TOMTOM_A_MICB_CFILT_3_PRECHRG] = TOMTOM_A_MICB_CFILT_3_PRECHRG__POR,
- [TOMTOM_A_MICB_3_CTL] = TOMTOM_A_MICB_3_CTL__POR,
- [TOMTOM_A_MICB_3_INT_RBIAS] = TOMTOM_A_MICB_3_INT_RBIAS__POR,
- [TOMTOM_A_MICB_3_MBHC] = TOMTOM_A_MICB_3_MBHC__POR,
- [TOMTOM_A_MICB_4_CTL] = TOMTOM_A_MICB_4_CTL__POR,
- [TOMTOM_A_MICB_4_INT_RBIAS] = TOMTOM_A_MICB_4_INT_RBIAS__POR,
- [TOMTOM_A_MICB_4_MBHC] = TOMTOM_A_MICB_4_MBHC__POR,
- [TOMTOM_A_SPKR_DRV2_EN] = TOMTOM_A_SPKR_DRV2_EN__POR,
- [TOMTOM_A_SPKR_DRV2_GAIN] = TOMTOM_A_SPKR_DRV2_GAIN__POR,
- [TOMTOM_A_SPKR_DRV2_DAC_CTL] = TOMTOM_A_SPKR_DRV2_DAC_CTL__POR,
- [TOMTOM_A_SPKR_DRV2_OCP_CTL] = TOMTOM_A_SPKR_DRV2_OCP_CTL__POR,
- [TOMTOM_A_SPKR_DRV2_CLIP_DET] = TOMTOM_A_SPKR_DRV2_CLIP_DET__POR,
- [TOMTOM_A_SPKR_DRV2_DBG_DAC] = TOMTOM_A_SPKR_DRV2_DBG_DAC__POR,
- [TOMTOM_A_SPKR_DRV2_DBG_PA] = TOMTOM_A_SPKR_DRV2_DBG_PA__POR,
- [TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR,
- [TOMTOM_A_SPKR_DRV2_BIAS_LDO] = TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR,
- [TOMTOM_A_SPKR_DRV2_BIAS_INT] = TOMTOM_A_SPKR_DRV2_BIAS_INT__POR,
- [TOMTOM_A_SPKR_DRV2_BIAS_PA] = TOMTOM_A_SPKR_DRV2_BIAS_PA__POR,
- [TOMTOM_A_SPKR_DRV2_STATUS_OCP] = TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR,
- [TOMTOM_A_SPKR_DRV2_STATUS_PA] = TOMTOM_A_SPKR_DRV2_STATUS_PA__POR,
- [TOMTOM_A_MBHC_INSERT_DETECT] = TOMTOM_A_MBHC_INSERT_DETECT__POR,
- [TOMTOM_A_MBHC_INSERT_DET_STATUS] =
- TOMTOM_A_MBHC_INSERT_DET_STATUS__POR,
- [TOMTOM_A_TX_COM_BIAS] = TOMTOM_A_TX_COM_BIAS__POR,
- [TOMTOM_A_MBHC_INSERT_DETECT2] = TOMTOM_A_MBHC_INSERT_DETECT2__POR,
- [TOMTOM_A_MBHC_SCALING_MUX_1] = TOMTOM_A_MBHC_SCALING_MUX_1__POR,
- [TOMTOM_A_MBHC_SCALING_MUX_2] = TOMTOM_A_MBHC_SCALING_MUX_2__POR,
- [TOMTOM_A_MAD_ANA_CTRL] = TOMTOM_A_MAD_ANA_CTRL__POR,
- [TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR,
- [TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR,
- [TOMTOM_A_TX_1_GAIN] = TOMTOM_A_TX_1_GAIN__POR,
- [TOMTOM_A_TX_1_2_TEST_EN] = TOMTOM_A_TX_1_2_TEST_EN__POR,
- [TOMTOM_A_TX_2_GAIN] = TOMTOM_A_TX_2_GAIN__POR,
- [TOMTOM_A_TX_1_2_ADC_IB] = TOMTOM_A_TX_1_2_ADC_IB__POR,
- [TOMTOM_A_TX_1_2_ATEST_REFCTRL] = TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR,
- [TOMTOM_A_TX_1_2_TEST_CTL] = TOMTOM_A_TX_1_2_TEST_CTL__POR,
- [TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR,
- [TOMTOM_A_TX_1_2_TXFE_CLKDIV] = TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR,
- [TOMTOM_A_TX_1_2_SAR_ERR_CH1] = TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR,
- [TOMTOM_A_TX_1_2_SAR_ERR_CH2] = TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR,
- [TOMTOM_A_TX_3_GAIN] = TOMTOM_A_TX_3_GAIN__POR,
- [TOMTOM_A_TX_3_4_TEST_EN] = TOMTOM_A_TX_3_4_TEST_EN__POR,
- [TOMTOM_A_TX_4_GAIN] = TOMTOM_A_TX_4_GAIN__POR,
- [TOMTOM_A_TX_3_4_ADC_IB] = TOMTOM_A_TX_3_4_ADC_IB__POR,
- [TOMTOM_A_TX_3_4_ATEST_REFCTRL] = TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR,
- [TOMTOM_A_TX_3_4_TEST_CTL] = TOMTOM_A_TX_3_4_TEST_CTL__POR,
- [TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR,
- [TOMTOM_A_TX_3_4_TXFE_CKDIV] = TOMTOM_A_TX_3_4_TXFE_CKDIV__POR,
- [TOMTOM_A_TX_3_4_SAR_ERR_CH3] = TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR,
- [TOMTOM_A_TX_3_4_SAR_ERR_CH4] = TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR,
- [TOMTOM_A_TX_5_GAIN] = TOMTOM_A_TX_5_GAIN__POR,
- [TOMTOM_A_TX_5_6_TEST_EN] = TOMTOM_A_TX_5_6_TEST_EN__POR,
- [TOMTOM_A_TX_6_GAIN] = TOMTOM_A_TX_6_GAIN__POR,
- [TOMTOM_A_TX_5_6_ADC_IB] = TOMTOM_A_TX_5_6_ADC_IB__POR,
- [TOMTOM_A_TX_5_6_ATEST_REFCTRL] = TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR,
- [TOMTOM_A_TX_5_6_TEST_CTL] = TOMTOM_A_TX_5_6_TEST_CTL__POR,
- [TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR,
- [TOMTOM_A_TX_5_6_TXFE_CKDIV] = TOMTOM_A_TX_5_6_TXFE_CKDIV__POR,
- [TOMTOM_A_TX_5_6_SAR_ERR_CH5] = TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR,
- [TOMTOM_A_TX_5_6_SAR_ERR_CH6] = TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR,
- [TOMTOM_A_TX_7_MBHC_EN] = TOMTOM_A_TX_7_MBHC_EN__POR,
- [TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] =
- TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR,
- [TOMTOM_A_TX_7_MBHC_ADC] = TOMTOM_A_TX_7_MBHC_ADC__POR,
- [TOMTOM_A_TX_7_MBHC_TEST_CTL] = TOMTOM_A_TX_7_MBHC_TEST_CTL__POR,
- [TOMTOM_A_TX_7_MBHC_SAR_ERR] = TOMTOM_A_TX_7_MBHC_SAR_ERR__POR,
- [TOMTOM_A_TX_7_TXFE_CLKDIV] = TOMTOM_A_TX_7_TXFE_CLKDIV__POR,
- [TOMTOM_A_RCO_CTRL] = TOMTOM_A_RCO_CTRL__POR,
- [TOMTOM_A_RCO_CALIBRATION_CTRL1] = TOMTOM_A_RCO_CALIBRATION_CTRL1__POR,
- [TOMTOM_A_RCO_CALIBRATION_CTRL2] = TOMTOM_A_RCO_CALIBRATION_CTRL2__POR,
- [TOMTOM_A_RCO_CALIBRATION_CTRL3] = TOMTOM_A_RCO_CALIBRATION_CTRL3__POR,
- [TOMTOM_A_RCO_TEST_CTRL] = TOMTOM_A_RCO_TEST_CTRL__POR,
- [TOMTOM_A_RCO_CALIBRATION_RESULT1] =
- TOMTOM_A_RCO_CALIBRATION_RESULT1__POR,
- [TOMTOM_A_RCO_CALIBRATION_RESULT2] =
- TOMTOM_A_RCO_CALIBRATION_RESULT2__POR,
- [TOMTOM_A_BUCK_MODE_1] = TOMTOM_A_BUCK_MODE_1__POR,
- [TOMTOM_A_BUCK_MODE_2] = TOMTOM_A_BUCK_MODE_2__POR,
- [TOMTOM_A_BUCK_MODE_3] = TOMTOM_A_BUCK_MODE_3__POR,
- [TOMTOM_A_BUCK_MODE_4] = TOMTOM_A_BUCK_MODE_4__POR,
- [TOMTOM_A_BUCK_MODE_5] = TOMTOM_A_BUCK_MODE_5__POR,
- [TOMTOM_A_BUCK_CTRL_VCL_1] = TOMTOM_A_BUCK_CTRL_VCL_1__POR,
- [TOMTOM_A_BUCK_CTRL_VCL_2] = TOMTOM_A_BUCK_CTRL_VCL_2__POR,
- [TOMTOM_A_BUCK_CTRL_VCL_3] = TOMTOM_A_BUCK_CTRL_VCL_3__POR,
- [TOMTOM_A_BUCK_CTRL_CCL_1] = TOMTOM_A_BUCK_CTRL_CCL_1__POR,
- [TOMTOM_A_BUCK_CTRL_CCL_2] = TOMTOM_A_BUCK_CTRL_CCL_2__POR,
- [TOMTOM_A_BUCK_CTRL_CCL_3] = TOMTOM_A_BUCK_CTRL_CCL_3__POR,
- [TOMTOM_A_BUCK_CTRL_CCL_4] = TOMTOM_A_BUCK_CTRL_CCL_4__POR,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR,
- [TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR,
- [TOMTOM_A_BUCK_TMUX_A_D] = TOMTOM_A_BUCK_TMUX_A_D__POR,
- [TOMTOM_A_NCP_BUCKREF] = TOMTOM_A_NCP_BUCKREF__POR,
- [TOMTOM_A_NCP_EN] = TOMTOM_A_NCP_EN__POR,
- [TOMTOM_A_NCP_CLK] = TOMTOM_A_NCP_CLK__POR,
- [TOMTOM_A_NCP_STATIC] = TOMTOM_A_NCP_STATIC__POR,
- [TOMTOM_A_NCP_VTH_LOW] = TOMTOM_A_NCP_VTH_LOW__POR,
- [TOMTOM_A_NCP_VTH_HIGH] = TOMTOM_A_NCP_VTH_HIGH__POR,
- [TOMTOM_A_NCP_ATEST] = TOMTOM_A_NCP_ATEST__POR,
- [TOMTOM_A_NCP_DTEST] = TOMTOM_A_NCP_DTEST__POR,
- [TOMTOM_A_NCP_DLY1] = TOMTOM_A_NCP_DLY1__POR,
- [TOMTOM_A_NCP_DLY2] = TOMTOM_A_NCP_DLY2__POR,
- [TOMTOM_A_RX_AUX_SW_CTL] = TOMTOM_A_RX_AUX_SW_CTL__POR,
- [TOMTOM_A_RX_PA_AUX_IN_CONN] = TOMTOM_A_RX_PA_AUX_IN_CONN__POR,
- [TOMTOM_A_RX_COM_TIMER_DIV] = TOMTOM_A_RX_COM_TIMER_DIV__POR,
- [TOMTOM_A_RX_COM_OCP_CTL] = TOMTOM_A_RX_COM_OCP_CTL__POR,
- [TOMTOM_A_RX_COM_OCP_COUNT] = TOMTOM_A_RX_COM_OCP_COUNT__POR,
- [TOMTOM_A_RX_COM_DAC_CTL] = TOMTOM_A_RX_COM_DAC_CTL__POR,
- [TOMTOM_A_RX_COM_BIAS] = TOMTOM_A_RX_COM_BIAS__POR,
- [TOMTOM_A_RX_HPH_AUTO_CHOP] = TOMTOM_A_RX_HPH_AUTO_CHOP__POR,
- [TOMTOM_A_RX_HPH_CHOP_CTL] = TOMTOM_A_RX_HPH_CHOP_CTL__POR,
- [TOMTOM_A_RX_HPH_BIAS_PA] = TOMTOM_A_RX_HPH_BIAS_PA__POR,
- [TOMTOM_A_RX_HPH_BIAS_LDO] = TOMTOM_A_RX_HPH_BIAS_LDO__POR,
- [TOMTOM_A_RX_HPH_BIAS_CNP] = TOMTOM_A_RX_HPH_BIAS_CNP__POR,
- [TOMTOM_A_RX_HPH_BIAS_WG_OCP] = TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR,
- [TOMTOM_A_RX_HPH_OCP_CTL] = TOMTOM_A_RX_HPH_OCP_CTL__POR,
- [TOMTOM_A_RX_HPH_CNP_EN] = TOMTOM_A_RX_HPH_CNP_EN__POR,
- [TOMTOM_A_RX_HPH_CNP_WG_CTL] = TOMTOM_A_RX_HPH_CNP_WG_CTL__POR,
- [TOMTOM_A_RX_HPH_CNP_WG_TIME] = TOMTOM_A_RX_HPH_CNP_WG_TIME__POR,
- [TOMTOM_A_RX_HPH_L_GAIN] = TOMTOM_A_RX_HPH_L_GAIN__POR,
- [TOMTOM_A_RX_HPH_L_TEST] = TOMTOM_A_RX_HPH_L_TEST__POR,
- [TOMTOM_A_RX_HPH_L_PA_CTL] = TOMTOM_A_RX_HPH_L_PA_CTL__POR,
- [TOMTOM_A_RX_HPH_L_DAC_CTL] = TOMTOM_A_RX_HPH_L_DAC_CTL__POR,
- [TOMTOM_A_RX_HPH_L_ATEST] = TOMTOM_A_RX_HPH_L_ATEST__POR,
- [TOMTOM_A_RX_HPH_L_STATUS] = TOMTOM_A_RX_HPH_L_STATUS__POR,
- [TOMTOM_A_RX_HPH_R_GAIN] = TOMTOM_A_RX_HPH_R_GAIN__POR,
- [TOMTOM_A_RX_HPH_R_TEST] = TOMTOM_A_RX_HPH_R_TEST__POR,
- [TOMTOM_A_RX_HPH_R_PA_CTL] = TOMTOM_A_RX_HPH_R_PA_CTL__POR,
- [TOMTOM_A_RX_HPH_R_DAC_CTL] = TOMTOM_A_RX_HPH_R_DAC_CTL__POR,
- [TOMTOM_A_RX_HPH_R_ATEST] = TOMTOM_A_RX_HPH_R_ATEST__POR,
- [TOMTOM_A_RX_HPH_R_STATUS] = TOMTOM_A_RX_HPH_R_STATUS__POR,
- [TOMTOM_A_RX_EAR_BIAS_PA] = TOMTOM_A_RX_EAR_BIAS_PA__POR,
- [TOMTOM_A_RX_EAR_BIAS_CMBUFF] = TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR,
- [TOMTOM_A_RX_EAR_EN] = TOMTOM_A_RX_EAR_EN__POR,
- [TOMTOM_A_RX_EAR_GAIN] = TOMTOM_A_RX_EAR_GAIN__POR,
- [TOMTOM_A_RX_EAR_CMBUFF] = TOMTOM_A_RX_EAR_CMBUFF__POR,
- [TOMTOM_A_RX_EAR_ICTL] = TOMTOM_A_RX_EAR_ICTL__POR,
- [TOMTOM_A_RX_EAR_CCOMP] = TOMTOM_A_RX_EAR_CCOMP__POR,
- [TOMTOM_A_RX_EAR_VCM] = TOMTOM_A_RX_EAR_VCM__POR,
- [TOMTOM_A_RX_EAR_CNP] = TOMTOM_A_RX_EAR_CNP__POR,
- [TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR,
- [TOMTOM_A_RX_EAR_STATUS] = TOMTOM_A_RX_EAR_STATUS__POR,
- [TOMTOM_A_RX_LINE_BIAS_PA] = TOMTOM_A_RX_LINE_BIAS_PA__POR,
- [TOMTOM_A_RX_BUCK_BIAS1] = TOMTOM_A_RX_BUCK_BIAS1__POR,
- [TOMTOM_A_RX_BUCK_BIAS2] = TOMTOM_A_RX_BUCK_BIAS2__POR,
- [TOMTOM_A_RX_LINE_COM] = TOMTOM_A_RX_LINE_COM__POR,
- [TOMTOM_A_RX_LINE_CNP_EN] = TOMTOM_A_RX_LINE_CNP_EN__POR,
- [TOMTOM_A_RX_LINE_CNP_WG_CTL] = TOMTOM_A_RX_LINE_CNP_WG_CTL__POR,
- [TOMTOM_A_RX_LINE_CNP_WG_TIME] = TOMTOM_A_RX_LINE_CNP_WG_TIME__POR,
- [TOMTOM_A_RX_LINE_1_GAIN] = TOMTOM_A_RX_LINE_1_GAIN__POR,
- [TOMTOM_A_RX_LINE_1_TEST] = TOMTOM_A_RX_LINE_1_TEST__POR,
- [TOMTOM_A_RX_LINE_1_DAC_CTL] = TOMTOM_A_RX_LINE_1_DAC_CTL__POR,
- [TOMTOM_A_RX_LINE_1_STATUS] = TOMTOM_A_RX_LINE_1_STATUS__POR,
- [TOMTOM_A_RX_LINE_2_GAIN] = TOMTOM_A_RX_LINE_2_GAIN__POR,
- [TOMTOM_A_RX_LINE_2_TEST] = TOMTOM_A_RX_LINE_2_TEST__POR,
- [TOMTOM_A_RX_LINE_2_DAC_CTL] = TOMTOM_A_RX_LINE_2_DAC_CTL__POR,
- [TOMTOM_A_RX_LINE_2_STATUS] = TOMTOM_A_RX_LINE_2_STATUS__POR,
- [TOMTOM_A_RX_LINE_3_GAIN] = TOMTOM_A_RX_LINE_3_GAIN__POR,
- [TOMTOM_A_RX_LINE_3_TEST] = TOMTOM_A_RX_LINE_3_TEST__POR,
- [TOMTOM_A_RX_LINE_3_DAC_CTL] = TOMTOM_A_RX_LINE_3_DAC_CTL__POR,
- [TOMTOM_A_RX_LINE_3_STATUS] = TOMTOM_A_RX_LINE_3_STATUS__POR,
- [TOMTOM_A_RX_LINE_4_GAIN] = TOMTOM_A_RX_LINE_4_GAIN__POR,
- [TOMTOM_A_RX_LINE_4_TEST] = TOMTOM_A_RX_LINE_4_TEST__POR,
- [TOMTOM_A_RX_LINE_4_DAC_CTL] = TOMTOM_A_RX_LINE_4_DAC_CTL__POR,
- [TOMTOM_A_RX_LINE_4_STATUS] = TOMTOM_A_RX_LINE_4_STATUS__POR,
- [TOMTOM_A_RX_LINE_CNP_DBG] = TOMTOM_A_RX_LINE_CNP_DBG__POR,
- [TOMTOM_A_SPKR_DRV1_EN] = TOMTOM_A_SPKR_DRV1_EN__POR,
- [TOMTOM_A_SPKR_DRV1_GAIN] = TOMTOM_A_SPKR_DRV1_GAIN__POR,
- [TOMTOM_A_SPKR_DRV1_DAC_CTL] = TOMTOM_A_SPKR_DRV1_DAC_CTL__POR,
- [TOMTOM_A_SPKR_DRV1_OCP_CTL] = TOMTOM_A_SPKR_DRV1_OCP_CTL__POR,
- [TOMTOM_A_SPKR_DRV1_CLIP_DET] = TOMTOM_A_SPKR_DRV1_CLIP_DET__POR,
- [TOMTOM_A_SPKR_DRV1_IEC] = TOMTOM_A_SPKR_DRV1_IEC__POR,
- [TOMTOM_A_SPKR_DRV1_DBG_DAC] = TOMTOM_A_SPKR_DRV1_DBG_DAC__POR,
- [TOMTOM_A_SPKR_DRV1_DBG_PA] = TOMTOM_A_SPKR_DRV1_DBG_PA__POR,
- [TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR,
- [TOMTOM_A_SPKR_DRV1_BIAS_LDO] = TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR,
- [TOMTOM_A_SPKR_DRV1_BIAS_INT] = TOMTOM_A_SPKR_DRV1_BIAS_INT__POR,
- [TOMTOM_A_SPKR_DRV1_BIAS_PA] = TOMTOM_A_SPKR_DRV1_BIAS_PA__POR,
- [TOMTOM_A_SPKR_DRV1_STATUS_OCP] = TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR,
- [TOMTOM_A_SPKR_DRV1_STATUS_PA] = TOMTOM_A_SPKR_DRV1_STATUS_PA__POR,
- [TOMTOM_A_SPKR1_PROT_EN] = TOMTOM_A_SPKR1_PROT_EN__POR,
- [TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] =
- TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR,
- [TOMTOM_A_SPKR1_PROT_ATEST] = TOMTOM_A_SPKR1_PROT_ATEST__POR,
- [TOMTOM_A_SPKR1_PROT_LDO_CTRL] = TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR,
- [TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] =
- TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR,
- [TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] =
- TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR,
- [TOMTOM_A_SPKR2_PROT_EN] = TOMTOM_A_SPKR2_PROT_EN__POR,
- [TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] =
- TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR,
- [TOMTOM_A_SPKR2_PROT_ATEST] = TOMTOM_A_SPKR2_PROT_ATEST__POR,
- [TOMTOM_A_SPKR2_PROT_LDO_CTRL] = TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR,
- [TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] =
- TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR,
- [TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] =
- TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR,
- [TOMTOM_A_MBHC_HPH] = TOMTOM_A_MBHC_HPH__POR,
- [TOMTOM_A_CDC_ANC1_B1_CTL] = TOMTOM_A_CDC_ANC1_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC2_B1_CTL] = TOMTOM_A_CDC_ANC2_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC1_SHIFT] = TOMTOM_A_CDC_ANC1_SHIFT__POR,
- [TOMTOM_A_CDC_ANC2_SHIFT] = TOMTOM_A_CDC_ANC2_SHIFT__POR,
- [TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR,
- [TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR,
- [TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR,
- [TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR,
- [TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR,
- [TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR,
- [TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR,
- [TOMTOM_A_CDC_ANC1_SPARE] = TOMTOM_A_CDC_ANC1_SPARE__POR,
- [TOMTOM_A_CDC_ANC2_SPARE] = TOMTOM_A_CDC_ANC2_SPARE__POR,
- [TOMTOM_A_CDC_ANC1_SMLPF_CTL] = TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR,
- [TOMTOM_A_CDC_ANC2_SMLPF_CTL] = TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR,
- [TOMTOM_A_CDC_ANC1_DCFLT_CTL] = TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR,
- [TOMTOM_A_CDC_ANC2_DCFLT_CTL] = TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR,
- [TOMTOM_A_CDC_ANC1_GAIN_CTL] = TOMTOM_A_CDC_ANC1_GAIN_CTL__POR,
- [TOMTOM_A_CDC_ANC2_GAIN_CTL] = TOMTOM_A_CDC_ANC2_GAIN_CTL__POR,
- [TOMTOM_A_CDC_ANC1_B2_CTL] = TOMTOM_A_CDC_ANC1_B2_CTL__POR,
- [TOMTOM_A_CDC_ANC2_B2_CTL] = TOMTOM_A_CDC_ANC2_B2_CTL__POR,
- [TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] =
- TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR,
- [TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR,
- [TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR,
- [TOMTOM_A_CDC_TX1_MUX_CTL] = TOMTOM_A_CDC_TX1_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX2_MUX_CTL] = TOMTOM_A_CDC_TX2_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX3_MUX_CTL] = TOMTOM_A_CDC_TX3_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX4_MUX_CTL] = TOMTOM_A_CDC_TX4_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX5_MUX_CTL] = TOMTOM_A_CDC_TX5_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX6_MUX_CTL] = TOMTOM_A_CDC_TX6_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX7_MUX_CTL] = TOMTOM_A_CDC_TX7_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX8_MUX_CTL] = TOMTOM_A_CDC_TX8_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX9_MUX_CTL] = TOMTOM_A_CDC_TX9_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX10_MUX_CTL] = TOMTOM_A_CDC_TX10_MUX_CTL__POR,
- [TOMTOM_A_CDC_TX1_CLK_FS_CTL] = TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX2_CLK_FS_CTL] = TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX3_CLK_FS_CTL] = TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX4_CLK_FS_CTL] = TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX5_CLK_FS_CTL] = TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX6_CLK_FS_CTL] = TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX7_CLK_FS_CTL] = TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX8_CLK_FS_CTL] = TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX9_CLK_FS_CTL] = TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX10_CLK_FS_CTL] = TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR,
- [TOMTOM_A_CDC_TX1_DMIC_CTL] = TOMTOM_A_CDC_TX1_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX2_DMIC_CTL] = TOMTOM_A_CDC_TX2_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX3_DMIC_CTL] = TOMTOM_A_CDC_TX3_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX4_DMIC_CTL] = TOMTOM_A_CDC_TX4_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX5_DMIC_CTL] = TOMTOM_A_CDC_TX5_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX6_DMIC_CTL] = TOMTOM_A_CDC_TX6_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX7_DMIC_CTL] = TOMTOM_A_CDC_TX7_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX8_DMIC_CTL] = TOMTOM_A_CDC_TX8_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX9_DMIC_CTL] = TOMTOM_A_CDC_TX9_DMIC_CTL__POR,
- [TOMTOM_A_CDC_TX10_DMIC_CTL] = TOMTOM_A_CDC_TX10_DMIC_CTL__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR,
- [TOMTOM_A_CDC_DEBUG_B1_CTL] = TOMTOM_A_CDC_DEBUG_B1_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B2_CTL] = TOMTOM_A_CDC_DEBUG_B2_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B3_CTL] = TOMTOM_A_CDC_DEBUG_B3_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B4_CTL] = TOMTOM_A_CDC_DEBUG_B4_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B5_CTL] = TOMTOM_A_CDC_DEBUG_B5_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B6_CTL] = TOMTOM_A_CDC_DEBUG_B6_CTL__POR,
- [TOMTOM_A_CDC_DEBUG_B7_CTL] = TOMTOM_A_CDC_DEBUG_B7_CTL__POR,
- [TOMTOM_A_CDC_SRC1_PDA_CFG] = TOMTOM_A_CDC_SRC1_PDA_CFG__POR,
- [TOMTOM_A_CDC_SRC2_PDA_CFG] = TOMTOM_A_CDC_SRC2_PDA_CFG__POR,
- [TOMTOM_A_CDC_SRC1_FS_CTL] = TOMTOM_A_CDC_SRC1_FS_CTL__POR,
- [TOMTOM_A_CDC_SRC2_FS_CTL] = TOMTOM_A_CDC_SRC2_FS_CTL__POR,
- [TOMTOM_A_CDC_RX1_B1_CTL] = TOMTOM_A_CDC_RX1_B1_CTL__POR,
- [TOMTOM_A_CDC_RX2_B1_CTL] = TOMTOM_A_CDC_RX2_B1_CTL__POR,
- [TOMTOM_A_CDC_RX3_B1_CTL] = TOMTOM_A_CDC_RX3_B1_CTL__POR,
- [TOMTOM_A_CDC_RX4_B1_CTL] = TOMTOM_A_CDC_RX4_B1_CTL__POR,
- [TOMTOM_A_CDC_RX5_B1_CTL] = TOMTOM_A_CDC_RX5_B1_CTL__POR,
- [TOMTOM_A_CDC_RX6_B1_CTL] = TOMTOM_A_CDC_RX6_B1_CTL__POR,
- [TOMTOM_A_CDC_RX7_B1_CTL] = TOMTOM_A_CDC_RX7_B1_CTL__POR,
- [TOMTOM_A_CDC_RX1_B2_CTL] = TOMTOM_A_CDC_RX1_B2_CTL__POR,
- [TOMTOM_A_CDC_RX2_B2_CTL] = TOMTOM_A_CDC_RX2_B2_CTL__POR,
- [TOMTOM_A_CDC_RX3_B2_CTL] = TOMTOM_A_CDC_RX3_B2_CTL__POR,
- [TOMTOM_A_CDC_RX4_B2_CTL] = TOMTOM_A_CDC_RX4_B2_CTL__POR,
- [TOMTOM_A_CDC_RX5_B2_CTL] = TOMTOM_A_CDC_RX5_B2_CTL__POR,
- [TOMTOM_A_CDC_RX6_B2_CTL] = TOMTOM_A_CDC_RX6_B2_CTL__POR,
- [TOMTOM_A_CDC_RX7_B2_CTL] = TOMTOM_A_CDC_RX7_B2_CTL__POR,
- [TOMTOM_A_CDC_RX1_B3_CTL] = TOMTOM_A_CDC_RX1_B3_CTL__POR,
- [TOMTOM_A_CDC_RX2_B3_CTL] = TOMTOM_A_CDC_RX2_B3_CTL__POR,
- [TOMTOM_A_CDC_RX3_B3_CTL] = TOMTOM_A_CDC_RX3_B3_CTL__POR,
- [TOMTOM_A_CDC_RX4_B3_CTL] = TOMTOM_A_CDC_RX4_B3_CTL__POR,
- [TOMTOM_A_CDC_RX5_B3_CTL] = TOMTOM_A_CDC_RX5_B3_CTL__POR,
- [TOMTOM_A_CDC_RX6_B3_CTL] = TOMTOM_A_CDC_RX6_B3_CTL__POR,
- [TOMTOM_A_CDC_RX7_B3_CTL] = TOMTOM_A_CDC_RX7_B3_CTL__POR,
- [TOMTOM_A_CDC_RX1_B4_CTL] = TOMTOM_A_CDC_RX1_B4_CTL__POR,
- [TOMTOM_A_CDC_RX2_B4_CTL] = TOMTOM_A_CDC_RX2_B4_CTL__POR,
- [TOMTOM_A_CDC_RX3_B4_CTL] = TOMTOM_A_CDC_RX3_B4_CTL__POR,
- [TOMTOM_A_CDC_RX4_B4_CTL] = TOMTOM_A_CDC_RX4_B4_CTL__POR,
- [TOMTOM_A_CDC_RX5_B4_CTL] = TOMTOM_A_CDC_RX5_B4_CTL__POR,
- [TOMTOM_A_CDC_RX6_B4_CTL] = TOMTOM_A_CDC_RX6_B4_CTL__POR,
- [TOMTOM_A_CDC_RX7_B4_CTL] = TOMTOM_A_CDC_RX7_B4_CTL__POR,
- [TOMTOM_A_CDC_RX1_B5_CTL] = TOMTOM_A_CDC_RX1_B5_CTL__POR,
- [TOMTOM_A_CDC_RX2_B5_CTL] = TOMTOM_A_CDC_RX2_B5_CTL__POR,
- [TOMTOM_A_CDC_RX3_B5_CTL] = TOMTOM_A_CDC_RX3_B5_CTL__POR,
- [TOMTOM_A_CDC_RX4_B5_CTL] = TOMTOM_A_CDC_RX4_B5_CTL__POR,
- [TOMTOM_A_CDC_RX5_B5_CTL] = TOMTOM_A_CDC_RX5_B5_CTL__POR,
- [TOMTOM_A_CDC_RX6_B5_CTL] = TOMTOM_A_CDC_RX6_B5_CTL__POR,
- [TOMTOM_A_CDC_RX7_B5_CTL] = TOMTOM_A_CDC_RX7_B5_CTL__POR,
- [TOMTOM_A_CDC_RX1_B6_CTL] = TOMTOM_A_CDC_RX1_B6_CTL__POR,
- [TOMTOM_A_CDC_RX2_B6_CTL] = TOMTOM_A_CDC_RX2_B6_CTL__POR,
- [TOMTOM_A_CDC_RX3_B6_CTL] = TOMTOM_A_CDC_RX3_B6_CTL__POR,
- [TOMTOM_A_CDC_RX4_B6_CTL] = TOMTOM_A_CDC_RX4_B6_CTL__POR,
- [TOMTOM_A_CDC_RX5_B6_CTL] = TOMTOM_A_CDC_RX5_B6_CTL__POR,
- [TOMTOM_A_CDC_RX6_B6_CTL] = TOMTOM_A_CDC_RX6_B6_CTL__POR,
- [TOMTOM_A_CDC_RX7_B6_CTL] = TOMTOM_A_CDC_RX7_B6_CTL__POR,
- [TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_VBAT_CFG] = TOMTOM_A_CDC_VBAT_CFG__POR,
- [TOMTOM_A_CDC_VBAT_ADC_CAL1] = TOMTOM_A_CDC_VBAT_ADC_CAL1__POR,
- [TOMTOM_A_CDC_VBAT_ADC_CAL2] = TOMTOM_A_CDC_VBAT_ADC_CAL2__POR,
- [TOMTOM_A_CDC_VBAT_ADC_CAL3] = TOMTOM_A_CDC_VBAT_ADC_CAL3__POR,
- [TOMTOM_A_CDC_VBAT_PK_EST1] = TOMTOM_A_CDC_VBAT_PK_EST1__POR,
- [TOMTOM_A_CDC_VBAT_PK_EST2] = TOMTOM_A_CDC_VBAT_PK_EST2__POR,
- [TOMTOM_A_CDC_VBAT_PK_EST3] = TOMTOM_A_CDC_VBAT_PK_EST3__POR,
- [TOMTOM_A_CDC_VBAT_RF_PROC1] = TOMTOM_A_CDC_VBAT_RF_PROC1__POR,
- [TOMTOM_A_CDC_VBAT_RF_PROC2] = TOMTOM_A_CDC_VBAT_RF_PROC2__POR,
- [TOMTOM_A_CDC_VBAT_TAC1] = TOMTOM_A_CDC_VBAT_TAC1__POR,
- [TOMTOM_A_CDC_VBAT_TAC2] = TOMTOM_A_CDC_VBAT_TAC2__POR,
- [TOMTOM_A_CDC_VBAT_TAC3] = TOMTOM_A_CDC_VBAT_TAC3__POR,
- [TOMTOM_A_CDC_VBAT_TAC4] = TOMTOM_A_CDC_VBAT_TAC4__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD1] = TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD2] = TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD3] = TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD4] = TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR,
- [TOMTOM_A_CDC_VBAT_DEBUG1] = TOMTOM_A_CDC_VBAT_DEBUG1__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR,
- [TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR,
- [TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR,
- [TOMTOM_A_CDC_CLK_RX_RESET_CTL] = TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR,
- [TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] =
- TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR,
- [TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] =
- TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR,
- [TOMTOM_A_CDC_CLK_RX_I2S_CTL] = TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR,
- [TOMTOM_A_CDC_CLK_TX_I2S_CTL] = TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR,
- [TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] =
- TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR,
- [TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] =
- TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR,
- [TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] =
- TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR,
- [TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] =
- TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR,
- [TOMTOM_A_CDC_CLK_OTHR_CTL] = TOMTOM_A_CDC_CLK_OTHR_CTL__POR,
- [TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] =
- TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR,
- [TOMTOM_A_CDC_CLK_RX_B1_CTL] = TOMTOM_A_CDC_CLK_RX_B1_CTL__POR,
- [TOMTOM_A_CDC_CLK_RX_B2_CTL] = TOMTOM_A_CDC_CLK_RX_B2_CTL__POR,
- [TOMTOM_A_CDC_CLK_MCLK_CTL] = TOMTOM_A_CDC_CLK_MCLK_CTL__POR,
- [TOMTOM_A_CDC_CLK_PDM_CTL] = TOMTOM_A_CDC_CLK_PDM_CTL__POR,
- [TOMTOM_A_CDC_CLK_SD_CTL] = TOMTOM_A_CDC_CLK_SD_CTL__POR,
- [TOMTOM_A_CDC_CLSH_B1_CTL] = TOMTOM_A_CDC_CLSH_B1_CTL__POR,
- [TOMTOM_A_CDC_CLSH_B2_CTL] = TOMTOM_A_CDC_CLSH_B2_CTL__POR,
- [TOMTOM_A_CDC_CLSH_B3_CTL] = TOMTOM_A_CDC_CLSH_B3_CTL__POR,
- [TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] =
- TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR,
- [TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] =
- TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR,
- [TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] =
- TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR,
- [TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] =
- TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR,
- [TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] =
- TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR,
- [TOMTOM_A_CDC_CLSH_K_ADDR] = TOMTOM_A_CDC_CLSH_K_ADDR__POR,
- [TOMTOM_A_CDC_CLSH_K_DATA] = TOMTOM_A_CDC_CLSH_K_DATA__POR,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] =
- TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] =
- TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] =
- TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR,
- [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] =
- TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR,
- [TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR,
- [TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR,
- [TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR,
- [TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR,
- [TOMTOM_A_CDC_IIR1_CTL] = TOMTOM_A_CDC_IIR1_CTL__POR,
- [TOMTOM_A_CDC_IIR2_CTL] = TOMTOM_A_CDC_IIR2_CTL__POR,
- [TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] =
- TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR,
- [TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] =
- TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
- [TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR,
- [TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR,
- [TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR,
- [TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR,
- [TOMTOM_A_CDC_TOP_GAIN_UPDATE] = TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR,
- [TOMTOM_A_CDC_PA_RAMP_B1_CTL] = TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR,
- [TOMTOM_A_CDC_PA_RAMP_B2_CTL] = TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR,
- [TOMTOM_A_CDC_PA_RAMP_B3_CTL] = TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR,
- [TOMTOM_A_CDC_PA_RAMP_B4_CTL] = TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR,
- [TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] =
- TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B1_CTL] = TOMTOM_A_CDC_COMP0_B1_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B1_CTL] = TOMTOM_A_CDC_COMP1_B1_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B1_CTL] = TOMTOM_A_CDC_COMP2_B1_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B2_CTL] = TOMTOM_A_CDC_COMP0_B2_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B2_CTL] = TOMTOM_A_CDC_COMP1_B2_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B2_CTL] = TOMTOM_A_CDC_COMP2_B2_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B3_CTL] = TOMTOM_A_CDC_COMP0_B3_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B3_CTL] = TOMTOM_A_CDC_COMP1_B3_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B3_CTL] = TOMTOM_A_CDC_COMP2_B3_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B4_CTL] = TOMTOM_A_CDC_COMP0_B4_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B4_CTL] = TOMTOM_A_CDC_COMP1_B4_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B4_CTL] = TOMTOM_A_CDC_COMP2_B4_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B5_CTL] = TOMTOM_A_CDC_COMP0_B5_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B5_CTL] = TOMTOM_A_CDC_COMP1_B5_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B5_CTL] = TOMTOM_A_CDC_COMP2_B5_CTL__POR,
- [TOMTOM_A_CDC_COMP0_B6_CTL] = TOMTOM_A_CDC_COMP0_B6_CTL__POR,
- [TOMTOM_A_CDC_COMP1_B6_CTL] = TOMTOM_A_CDC_COMP1_B6_CTL__POR,
- [TOMTOM_A_CDC_COMP2_B6_CTL] = TOMTOM_A_CDC_COMP2_B6_CTL__POR,
- [TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] =
- TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR,
- [TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] =
- TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR,
- [TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] =
- TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR,
- [TOMTOM_A_CDC_COMP0_FS_CFG] = TOMTOM_A_CDC_COMP0_FS_CFG__POR,
- [TOMTOM_A_CDC_COMP1_FS_CFG] = TOMTOM_A_CDC_COMP1_FS_CFG__POR,
- [TOMTOM_A_CDC_COMP2_FS_CFG] = TOMTOM_A_CDC_COMP2_FS_CFG__POR,
- [TOMTOM_A_CDC_CONN_RX1_B1_CTL] = TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX1_B2_CTL] = TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX1_B3_CTL] = TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX2_B1_CTL] = TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX2_B2_CTL] = TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX2_B3_CTL] = TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX3_B1_CTL] = TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX3_B2_CTL] = TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX4_B1_CTL] = TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX4_B2_CTL] = TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX5_B1_CTL] = TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX5_B2_CTL] = TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX6_B1_CTL] = TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX6_B2_CTL] = TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX7_B1_CTL] = TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX7_B2_CTL] = TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX7_B3_CTL] = TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_ANC_B1_CTL] = TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_ANC_B2_CTL] = TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_B1_CTL] = TOMTOM_A_CDC_CONN_TX_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_B2_CTL] = TOMTOM_A_CDC_CONN_TX_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_B3_CTL] = TOMTOM_A_CDC_CONN_TX_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_B4_CTL] = TOMTOM_A_CDC_CONN_TX_B4_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR,
- [TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] =
- TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR,
- [TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] =
- TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR,
- [TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR,
- [TOMTOM_A_CDC_CONN_CLSH_CTL] = TOMTOM_A_CDC_CONN_CLSH_CTL__POR,
- [TOMTOM_A_CDC_CONN_MISC] = TOMTOM_A_CDC_CONN_MISC__POR,
- [TOMTOM_A_CDC_CONN_RX8_B1_CTL] = TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR,
- [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] =
- TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR,
- [TOMTOM_A_CDC_MBHC_EN_CTL] = TOMTOM_A_CDC_MBHC_EN_CTL__POR,
- [TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR,
- [TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR,
- [TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR,
- [TOMTOM_A_CDC_MBHC_B1_STATUS] = TOMTOM_A_CDC_MBHC_B1_STATUS__POR,
- [TOMTOM_A_CDC_MBHC_B2_STATUS] = TOMTOM_A_CDC_MBHC_B2_STATUS__POR,
- [TOMTOM_A_CDC_MBHC_B3_STATUS] = TOMTOM_A_CDC_MBHC_B3_STATUS__POR,
- [TOMTOM_A_CDC_MBHC_B4_STATUS] = TOMTOM_A_CDC_MBHC_B4_STATUS__POR,
- [TOMTOM_A_CDC_MBHC_B5_STATUS] = TOMTOM_A_CDC_MBHC_B5_STATUS__POR,
- [TOMTOM_A_CDC_MBHC_B1_CTL] = TOMTOM_A_CDC_MBHC_B1_CTL__POR,
- [TOMTOM_A_CDC_MBHC_B2_CTL] = TOMTOM_A_CDC_MBHC_B2_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR,
- [TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR,
- [TOMTOM_A_CDC_MBHC_CLK_CTL] = TOMTOM_A_CDC_MBHC_CLK_CTL__POR,
- [TOMTOM_A_CDC_MBHC_INT_CTL] = TOMTOM_A_CDC_MBHC_INT_CTL__POR,
- [TOMTOM_A_CDC_MBHC_DEBUG_CTL] = TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR,
- [TOMTOM_A_CDC_MBHC_SPARE] = TOMTOM_A_CDC_MBHC_SPARE__POR,
- [TOMTOM_A_CDC_RX8_B1_CTL] = TOMTOM_A_CDC_RX8_B1_CTL__POR,
- [TOMTOM_A_CDC_RX8_B2_CTL] = TOMTOM_A_CDC_RX8_B2_CTL__POR,
- [TOMTOM_A_CDC_RX8_B3_CTL] = TOMTOM_A_CDC_RX8_B3_CTL__POR,
- [TOMTOM_A_CDC_RX8_B4_CTL] = TOMTOM_A_CDC_RX8_B4_CTL__POR,
- [TOMTOM_A_CDC_RX8_B5_CTL] = TOMTOM_A_CDC_RX8_B5_CTL__POR,
- [TOMTOM_A_CDC_RX8_B6_CTL] = TOMTOM_A_CDC_RX8_B6_CTL__POR,
- [TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] =
- TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR,
- [TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] =
- TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR,
- [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] =
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR,
- [TOMTOM_A_CDC_BOOST_MODE_CTL] = TOMTOM_A_CDC_BOOST_MODE_CTL__POR,
- [TOMTOM_A_CDC_BOOST_THRESHOLD] = TOMTOM_A_CDC_BOOST_THRESHOLD__POR,
- [TOMTOM_A_CDC_BOOST_TAP_SEL] = TOMTOM_A_CDC_BOOST_TAP_SEL__POR,
- [TOMTOM_A_CDC_BOOST_HOLD_TIME] = TOMTOM_A_CDC_BOOST_HOLD_TIME__POR,
- [TOMTOM_A_CDC_BOOST_TRGR_EN] = TOMTOM_A_CDC_BOOST_TRGR_EN__POR,
-};
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
deleted file mode 100644
index 4278e36..0000000
--- a/sound/soc/codecs/wcd9330.c
+++ /dev/null
@@ -1,9113 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-#include <linux/regmap.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <linux/regulator/consumer.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-#include "wcd9330.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-#include "wcdcal-hwdep.h"
-#include "wcd_cpe_core.h"
-
-enum {
- BUS_DOWN,
- ADC1_TXFE,
- ADC2_TXFE,
- ADC3_TXFE,
- ADC4_TXFE,
- ADC5_TXFE,
- ADC6_TXFE,
- HPH_DELAY,
-};
-
-#define TOMTOM_MAD_SLIMBUS_TX_PORT 12
-#define TOMTOM_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin"
-#define TOMTOM_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 23))
-#define TOMTOM_VALIDATE_TX_SBPORT_RANGE(port) ((port >= 0) && (port <= 15))
-#define TOMTOM_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
-#define TOMTOM_BIT_ADJ_SHIFT_PORT1_6 4
-#define TOMTOM_BIT_ADJ_SHIFT_PORT7_10 5
-
-#define TOMTOM_HPH_PA_SETTLE_COMP_ON 10000
-#define TOMTOM_HPH_PA_SETTLE_COMP_OFF 13000
-#define TOMTOM_HPH_PA_RAMP_DELAY 30000
-
-#define TOMTOM_SVASS_INT_STATUS_RCO_WDOG 0x20
-#define TOMTOM_SVASS_INT_STATUS_WDOG_BITE 0x02
-
-/* Add any SVA IRQs that are to be treated as FATAL */
-#define TOMTOM_CPE_FATAL_IRQS \
- (TOMTOM_SVASS_INT_STATUS_RCO_WDOG | \
- TOMTOM_SVASS_INT_STATUS_WDOG_BITE)
-
-#define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
-
-/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
-#define TOMTOM_WG_TIME_FACTOR_US 240
-
-#define RX8_PATH 8
-#define HPH_PA_ENABLE true
-#define HPH_PA_DISABLE false
-
-#define SLIM_BW_CLK_GEAR_9 6200000
-#define SLIM_BW_UNVOTE 0
-
-static int cpe_debug_mode;
-module_param(cpe_debug_mode, int, 0664);
-MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode");
-
-static atomic_t kp_tomtom_priv;
-
-static int high_perf_mode;
-module_param(high_perf_mode, int, 0664);
-MODULE_PARM_DESC(high_perf_mode, "enable/disable class AB config for hph");
-
-static struct afe_param_slimbus_slave_port_cfg tomtom_slimbus_slave_port_cfg = {
- .minor_version = 1,
- .slimbus_dev_id = AFE_SLIMBUS_DEVICE_1,
- .slave_dev_pgd_la = 0,
- .slave_dev_intfdev_la = 0,
- .bit_width = 16,
- .data_format = 0,
- .num_channels = 1
-};
-
-static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = {
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_MAIN_CTL_1),
- HW_MAD_AUDIO_ENABLE, 0x1, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_3),
- HW_MAD_AUDIO_SLEEP_TIME, 0xF, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_4),
- HW_MAD_TX_AUDIO_SWITCH_OFF, 0x1, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR_MODE),
- MAD_AUDIO_INT_DEST_SELECT_REG, 0x4, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
- MAD_AUDIO_INT_MASK_REG, 0x2, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
- MAD_AUDIO_INT_STATUS_REG, 0x2, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
- MAD_AUDIO_INT_CLEAR_REG, 0x2, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE),
- SB_PGD_PORT_TX_WATERMARK_N, 0x1E, 8, 0x1
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE),
- SB_PGD_PORT_TX_ENABLE_N, 0x1, 8, 0x1
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE),
- SB_PGD_PORT_RX_WATERMARK_N, 0x1E, 8, 0x1
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE),
- SB_PGD_PORT_RX_ENABLE_N, 0x1, 8, 0x1
- },
- { 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL),
- AANC_FF_GAIN_ADAPTIVE, 0x4, 8, 0
- },
- { 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL),
- AANC_FFGAIN_ADAPTIVE_EN, 0x8, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_GAIN_CTL),
- AANC_GAIN_CONTROL, 0xFF, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
- MAD_CLIP_INT_MASK_REG, 0x10, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0),
- MAD2_CLIP_INT_MASK_REG, 0x20, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
- MAD_CLIP_INT_STATUS_REG, 0x10, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0),
- MAD2_CLIP_INT_STATUS_REG, 0x20, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
- MAD_CLIP_INT_CLEAR_REG, 0x10, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0),
- MAD2_CLIP_INT_CLEAR_REG, 0x20, 8, 0
- },
-};
-
-static struct afe_param_cdc_reg_cfg clip_reg_cfg[] = {
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL),
- SPKR_CLIP_PIPE_BANK_SEL, 0x3, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL0),
- SPKR_CLIPDET_VAL0, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL1),
- SPKR_CLIPDET_VAL1, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL2),
- SPKR_CLIPDET_VAL2, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL3),
- SPKR_CLIPDET_VAL3, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL4),
- SPKR_CLIPDET_VAL4, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL5),
- SPKR_CLIPDET_VAL5, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL6),
- SPKR_CLIPDET_VAL6, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR_CLIPDET_VAL7),
- SPKR_CLIPDET_VAL7, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL),
- SPKR2_CLIP_PIPE_BANK_SEL, 0x3, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0),
- SPKR2_CLIPDET_VAL0, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1),
- SPKR2_CLIPDET_VAL1, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2),
- SPKR2_CLIPDET_VAL2, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3),
- SPKR2_CLIPDET_VAL3, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4),
- SPKR2_CLIPDET_VAL4, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5),
- SPKR2_CLIPDET_VAL5, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6),
- SPKR2_CLIPDET_VAL6, 0xff, 8, 0
- },
- {
- 1,
- (TOMTOM_REGISTER_START_OFFSET +
- TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7),
- SPKR2_CLIPDET_VAL7, 0xff, 8, 0
- },
-};
-
-static struct afe_param_cdc_reg_cfg_data tomtom_audio_reg_cfg = {
- .num_registers = ARRAY_SIZE(audio_reg_cfg),
- .reg_data = audio_reg_cfg,
-};
-
-static struct afe_param_cdc_reg_cfg_data tomtom_clip_reg_cfg = {
- .num_registers = ARRAY_SIZE(clip_reg_cfg),
- .reg_data = clip_reg_cfg,
-};
-
-static struct afe_param_id_cdc_aanc_version tomtom_cdc_aanc_version = {
- .cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION,
- .aanc_hw_version = AANC_HW_BLOCK_VERSION_2,
-};
-
-static struct afe_param_id_clip_bank_sel clip_bank_sel = {
- .minor_version = AFE_API_VERSION_CLIP_BANK_SEL_CFG,
- .num_banks = AFE_CLIP_MAX_BANKS,
- .bank_map = {0, 1, 2, 3},
-};
-
-#define WCD9330_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
- SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
-
-#define NUM_DECIMATORS 10
-#define NUM_INTERPOLATORS 8
-#define BITS_PER_REG 8
-#define TOMTOM_TX_PORT_NUMBER 16
-#define TOMTOM_RX_PORT_START_NUMBER 16
-
-#define TOMTOM_I2S_MASTER_MODE_MASK 0x08
-
-#define TOMTOM_SLIM_CLOSE_TIMEOUT 1000
-#define TOMTOM_SLIM_IRQ_OVERFLOW (1 << 0)
-#define TOMTOM_SLIM_IRQ_UNDERFLOW (1 << 1)
-#define TOMTOM_SLIM_IRQ_PORT_CLOSED (1 << 2)
-#define TOMTOM_MCLK_CLK_12P288MHZ 12288000
-#define TOMTOM_MCLK_CLK_9P6MHZ 9600000
-
-#define TOMTOM_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FORMAT_S24_LE)
-
-#define TOMTOM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
-
-#define TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 (TOMTOM_SLIM_PGD_PORT_INT_EN0 + 2)
-#define TOMTOM_ZDET_BOX_CAR_AVG_LOOP_COUNT 1
-#define TOMTOM_ZDET_MUL_FACTOR_1X 7218
-#define TOMTOM_ZDET_MUL_FACTOR_10X (TOMTOM_ZDET_MUL_FACTOR_1X * 10)
-#define TOMTOM_ZDET_MUL_FACTOR_100X (TOMTOM_ZDET_MUL_FACTOR_1X * 100)
-#define TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR 655
-#define TOMTOM_ZDET_ERROR_APPROX_SHIFT 16
-#define TOMTOM_ZDET_ZONE_3_DEFAULT_VAL 1000000
-
-enum {
- AIF1_PB = 0,
- AIF1_CAP,
- AIF2_PB,
- AIF2_CAP,
- AIF3_PB,
- AIF3_CAP,
- AIF4_VIFEED,
- AIF4_MAD_TX,
- NUM_CODEC_DAIS,
-};
-
-enum {
- RX_MIX1_INP_SEL_ZERO = 0,
- RX_MIX1_INP_SEL_SRC1,
- RX_MIX1_INP_SEL_SRC2,
- RX_MIX1_INP_SEL_IIR1,
- RX_MIX1_INP_SEL_IIR2,
- RX_MIX1_INP_SEL_RX1,
- RX_MIX1_INP_SEL_RX2,
- RX_MIX1_INP_SEL_RX3,
- RX_MIX1_INP_SEL_RX4,
- RX_MIX1_INP_SEL_RX5,
- RX_MIX1_INP_SEL_RX6,
- RX_MIX1_INP_SEL_RX7,
- RX_MIX1_INP_SEL_AUXRX,
-};
-enum {
- RX8_MIX1_INP_SEL_ZERO = 0,
- RX8_MIX1_INP_SEL_IIR1,
- RX8_MIX1_INP_SEL_IIR2,
- RX8_MIX1_INP_SEL_RX1,
- RX8_MIX1_INP_SEL_RX2,
- RX8_MIX1_INP_SEL_RX3,
- RX8_MIX1_INP_SEL_RX4,
- RX8_MIX1_INP_SEL_RX5,
- RX8_MIX1_INP_SEL_RX6,
- RX8_MIX1_INP_SEL_RX7,
- RX8_MIX1_INP_SEL_RX8,
-};
-
-#define TOMTOM_COMP_DIGITAL_GAIN_OFFSET 3
-
-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
-static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
-static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
-static struct snd_soc_dai_driver tomtom_dai[];
-static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0);
-
-/* Codec supports 2 IIR filters */
-enum {
- IIR1 = 0,
- IIR2,
- IIR_MAX,
-};
-/* Codec supports 5 bands */
-enum {
- BAND1 = 0,
- BAND2,
- BAND3,
- BAND4,
- BAND5,
- BAND_MAX,
-};
-
-enum {
- COMPANDER_0,
- COMPANDER_1,
- COMPANDER_2,
- COMPANDER_MAX,
-};
-
-enum {
- COMPANDER_FS_8KHZ = 0,
- COMPANDER_FS_16KHZ,
- COMPANDER_FS_32KHZ,
- COMPANDER_FS_48KHZ,
- COMPANDER_FS_96KHZ,
- COMPANDER_FS_192KHZ,
- COMPANDER_FS_MAX,
-};
-
-struct comp_sample_dependent_params {
- u32 peak_det_timeout;
- u32 rms_meter_div_fact;
- u32 rms_meter_resamp_fact;
-};
-
-struct hpf_work {
- struct tomtom_priv *tomtom;
- u32 decimator;
- u8 tx_hpf_cut_of_freq;
- bool tx_hpf_bypass;
- struct delayed_work dwork;
-};
-
-static struct hpf_work tx_hpf_work[NUM_DECIMATORS];
-
-static const struct wcd9xxx_ch tomtom_rx_chs[TOMTOM_RX_MAX] = {
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER, 0),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 1, 1),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 2, 2),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 3, 3),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 4, 4),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 5, 5),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 6, 6),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 7, 7),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 8, 8),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 9, 9),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 10, 10),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 11, 11),
- WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 12, 12),
-};
-
-static const struct wcd9xxx_ch tomtom_tx_chs[TOMTOM_TX_MAX] = {
- WCD9XXX_CH(0, 0),
- WCD9XXX_CH(1, 1),
- WCD9XXX_CH(2, 2),
- WCD9XXX_CH(3, 3),
- WCD9XXX_CH(4, 4),
- WCD9XXX_CH(5, 5),
- WCD9XXX_CH(6, 6),
- WCD9XXX_CH(7, 7),
- WCD9XXX_CH(8, 8),
- WCD9XXX_CH(9, 9),
- WCD9XXX_CH(10, 10),
- WCD9XXX_CH(11, 11),
- WCD9XXX_CH(12, 12),
- WCD9XXX_CH(13, 13),
- WCD9XXX_CH(14, 14),
- WCD9XXX_CH(15, 15),
-};
-
-static const u32 vport_check_table[NUM_CODEC_DAIS] = {
- 0, /* AIF1_PB */
- (1 << AIF2_CAP) | (1 << AIF3_CAP), /* AIF1_CAP */
- 0, /* AIF2_PB */
- (1 << AIF1_CAP) | (1 << AIF3_CAP), /* AIF2_CAP */
- 0, /* AIF3_PB */
- (1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF3_CAP */
-};
-
-static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = {
- 0, /* AIF1_PB */
- 0, /* AIF1_CAP */
- 0, /* AIF2_PB */
- 0, /* AIF2_CAP */
-};
-
-/*
- * Interrupt table for v3 corresponds to newer version
- * codecs (wcd9330)
- */
-static const struct intr_data wcd9330_intr_tbl[] = {
- {WCD9XXX_IRQ_SLIMBUS, false},
- {WCD9XXX_IRQ_MBHC_INSERTION, true},
- {WCD9XXX_IRQ_MBHC_POTENTIAL, true},
- {WCD9XXX_IRQ_MBHC_RELEASE, true},
- {WCD9XXX_IRQ_MBHC_PRESS, true},
- {WCD9XXX_IRQ_MBHC_SHORT_TERM, true},
- {WCD9XXX_IRQ_MBHC_REMOVAL, true},
- {WCD9330_IRQ_MBHC_JACK_SWITCH, true},
- {WCD9XXX_IRQ_BG_PRECHARGE, false},
- {WCD9XXX_IRQ_PA1_STARTUP, false},
- {WCD9XXX_IRQ_PA2_STARTUP, false},
- {WCD9XXX_IRQ_PA3_STARTUP, false},
- {WCD9XXX_IRQ_PA4_STARTUP, false},
- {WCD9XXX_IRQ_PA5_STARTUP, false},
- {WCD9XXX_IRQ_MICBIAS1_PRECHARGE, false},
- {WCD9XXX_IRQ_MICBIAS2_PRECHARGE, false},
- {WCD9XXX_IRQ_MICBIAS3_PRECHARGE, false},
- {WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, false},
- {WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, false},
- {WCD9XXX_IRQ_EAR_PA_OCPL_FAULT, false},
- {WCD9XXX_IRQ_HPH_L_PA_STARTUP, false},
- {WCD9XXX_IRQ_HPH_R_PA_STARTUP, false},
- {WCD9320_IRQ_EAR_PA_STARTUP, false},
- {WCD9330_IRQ_SVASS_ERR_EXCEPTION, false},
- {WCD9330_IRQ_SVASS_ENGINE, true},
- {WCD9330_IRQ_MAD_AUDIO, false},
- {WCD9330_IRQ_MAD_BEACON, false},
- {WCD9330_IRQ_MAD_ULTRASOUND, false},
- {WCD9330_IRQ_SPEAKER1_CLIPPING, false},
- {WCD9330_IRQ_SPEAKER2_CLIPPING, false},
- {WCD9330_IRQ_VBAT_MONITOR_ATTACK, false},
- {WCD9330_IRQ_VBAT_MONITOR_RELEASE, false},
-};
-
-struct tomtom_priv {
- struct snd_soc_codec *codec;
- u32 adc_count;
- u32 rx_bias_count;
- s32 dmic_1_2_clk_cnt;
- s32 dmic_3_4_clk_cnt;
- s32 dmic_5_6_clk_cnt;
- s32 ldo_h_users;
- s32 micb_2_users;
-
- u32 anc_slot;
- bool anc_func;
-
- /* cal info for codec */
- struct fw_info *fw_data;
-
- /*track tomtom interface type*/
- u8 intf_type;
-
- /* num of slim ports required */
- struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS];
-
- /*compander*/
- int comp_enabled[COMPANDER_MAX];
- u32 comp_fs[COMPANDER_MAX];
-
- /* Maintain the status of AUX PGA */
- int aux_pga_cnt;
- u8 aux_l_gain;
- u8 aux_r_gain;
-
- bool spkr_pa_widget_on;
- struct regulator *spkdrv_reg;
- struct regulator *spkdrv2_reg;
-
- bool mbhc_started;
-
- struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg;
-
- /* resmgr module */
- struct wcd9xxx_resmgr resmgr;
- /* mbhc module */
- struct wcd9xxx_mbhc mbhc;
-
- /* class h specific data */
- struct wcd9xxx_clsh_cdc_data clsh_d;
-
- int (*machine_codec_event_cb)(struct snd_soc_codec *codec,
- enum wcd9xxx_codec_event);
- int (*codec_ext_clk_en_cb)(struct snd_soc_codec *codec,
- int enable, bool dapm);
- int (*codec_get_ext_clk_cnt)(void);
- /*
- * list used to save/restore registers at start and
- * end of impedance measurement
- */
- struct list_head reg_save_restore;
-
- /* handle to cpe core */
- struct wcd_cpe_core *cpe_core;
-
- /* UHQA (class AB) mode */
- u8 uhqa_mode;
-
- /* Multiplication factor used for impedance detection */
- int zdet_gain_mul_fact;
-
- /* to track the status */
- unsigned long status_mask;
-
- int ext_clk_users;
- struct clk *wcd_ext_clk;
-
- /* Port values for Rx and Tx codec_dai */
- unsigned int rx_port_value;
- unsigned int tx_port_value;
-
- struct mutex codec_mutex;
-};
-
-static const u32 comp_shift[] = {
- 4, /* Compander 0's clock source is on interpolator 7 */
- 0,
- 2,
-};
-
-static const int comp_rx_path[] = {
- COMPANDER_1,
- COMPANDER_1,
- COMPANDER_2,
- COMPANDER_2,
- COMPANDER_2,
- COMPANDER_2,
- COMPANDER_0,
- COMPANDER_0,
- COMPANDER_MAX,
-};
-
-static const struct comp_sample_dependent_params comp_samp_params[] = {
- {
- /* 8 Khz */
- .peak_det_timeout = 0x06,
- .rms_meter_div_fact = 0x09,
- .rms_meter_resamp_fact = 0x06,
- },
- {
- /* 16 Khz */
- .peak_det_timeout = 0x07,
- .rms_meter_div_fact = 0x0A,
- .rms_meter_resamp_fact = 0x0C,
- },
- {
- /* 32 Khz */
- .peak_det_timeout = 0x08,
- .rms_meter_div_fact = 0x0B,
- .rms_meter_resamp_fact = 0x1E,
- },
- {
- /* 48 Khz */
- .peak_det_timeout = 0x09,
- .rms_meter_div_fact = 0x0B,
- .rms_meter_resamp_fact = 0x28,
- },
- {
- /* 96 Khz */
- .peak_det_timeout = 0x0A,
- .rms_meter_div_fact = 0x0C,
- .rms_meter_resamp_fact = 0x50,
- },
- {
- /* 192 Khz */
- .peak_det_timeout = 0x0B,
- .rms_meter_div_fact = 0xC,
- .rms_meter_resamp_fact = 0xA0,
- },
-};
-
-static unsigned short rx_digital_gain_reg[] = {
- TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
- TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
-};
-
-
-static unsigned short tx_digital_gain_reg[] = {
- TOMTOM_A_CDC_TX1_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX2_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX3_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX4_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX5_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX6_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX7_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX8_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX9_VOL_CTL_GAIN,
- TOMTOM_A_CDC_TX10_VOL_CTL_GAIN,
-};
-
-/*
- * wcd9330_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type)
-{
- u16 id_minor, id_major;
- struct regmap *wcd_regmap;
- int rc, val, version = 0;
-
- if (!wcd9xxx || !wcd_type)
- return -EINVAL;
-
- if (!wcd9xxx->regmap) {
- dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
- __func__);
- return -EINVAL;
- }
- wcd_regmap = wcd9xxx->regmap;
- rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_0,
- (u8 *)&id_minor, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_2,
- (u8 *)&id_major, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
- __func__, id_major, id_minor);
-
- if (id_minor == cpu_to_le16(0x1))
- version = 2;
- else if (id_minor == cpu_to_le16(0x0))
- version = 1;
- else
- dev_err(wcd9xxx->dev, "%s: wcd9330 version unknown (major 0x%x, minor 0x%x)\n",
- __func__, id_major, id_minor);
-
- /* Fill codec type info */
- wcd_type->id_major = id_major;
- wcd_type->id_minor = id_minor;
- wcd_type->num_irqs = WCD9330_NUM_IRQS;
- wcd_type->version = version;
- wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
- wcd_type->i2c_chip_status = 0x01;
- wcd_type->intr_tbl = wcd9330_intr_tbl;
- wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9330_intr_tbl);
-
- wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
- TOMTOM_A_INTR1_STATUS0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
- TOMTOM_A_INTR1_CLEAR0;
- wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
- TOMTOM_A_INTR1_MASK0;
- wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
- TOMTOM_A_INTR1_LEVEL0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
- TOMTOM_A_INTR_MODE;
-
- return rc;
-}
-EXPORT_SYMBOL(wcd9330_get_codec_info);
-
-/*
- * wcd9330_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_bringdown(struct wcd9xxx *wcd9xxx)
-{
- if (!wcd9xxx || !wcd9xxx->regmap)
- return -EINVAL;
-
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x7);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x6);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0xe);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x8);
-
- return 0;
-}
-EXPORT_SYMBOL(wcd9330_bringdown);
-
-/*
- * wcd9330_bringup: Bring up WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9330_bringup(struct wcd9xxx *wcd9xxx)
-{
- if (!wcd9xxx || !wcd9xxx->regmap)
- return -EINVAL;
-
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x4);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x0);
- /* wait for 5ms after codec reset for it to complete */
- usleep_range(5000, 5100);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x1);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x3);
- regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x3);
-
- return 0;
-}
-EXPORT_SYMBOL(wcd9330_bringup);
-
-int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- if (tomtom->wcd_ext_clk)
- tomtom_codec_mclk_enable(codec, true, false);
-
- snd_soc_write(codec, TOMTOM_A_QFUSE_CTL, 0x03);
- /*
- * 5ms sleep required after enabling qfuse control
- * before checking the status.
- */
- usleep_range(5000, 5500);
- if ((snd_soc_read(codec, TOMTOM_A_QFUSE_STATUS) & (0x03)) != 0x03)
- WARN(1, "%s: Qfuse sense is not complete\n", __func__);
-
- if (tomtom->wcd_ext_clk)
- tomtom_codec_mclk_enable(codec, false, false);
- return 0;
-}
-EXPORT_SYMBOL(tomtom_enable_qfuse_sensing);
-
-static int tomtom_get_sample_rate(struct snd_soc_codec *codec, int path)
-{
- if (path == RX8_PATH)
- return snd_soc_read(codec, TOMTOM_A_CDC_RX8_B5_CTL);
- else
- return snd_soc_read(codec,
- (TOMTOM_A_CDC_RX1_B5_CTL + 8 * (path - 1)));
-}
-
-static int tomtom_compare_bit_format(struct snd_soc_codec *codec,
- int bit_format)
-{
- int i = 0;
- int ret = 0;
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- for (i = 0; i < NUM_CODEC_DAIS; i++) {
- if (tomtom_p->dai[i].bit_width == bit_format) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
-
-static int tomtom_update_uhqa_mode(struct snd_soc_codec *codec, int path)
-{
- int ret = 0;
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- /* UHQA path has fs=192KHz & bit=24 bit */
- if (((tomtom_get_sample_rate(codec, path) & 0xE0) == 0xA0) &&
- (tomtom_compare_bit_format(codec, 24))) {
- tomtom_p->uhqa_mode = 1;
- } else {
- tomtom_p->uhqa_mode = 0;
- }
- dev_dbg(codec->dev, "%s: uhqa_mode=%d", __func__, tomtom_p->uhqa_mode);
- return ret;
-}
-
-static int tomtom_get_anc_slot(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = tomtom->anc_slot;
- return 0;
-}
-
-static int tomtom_put_anc_slot(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- tomtom->anc_slot = ucontrol->value.integer.value[0];
- return 0;
-}
-
-static int tomtom_get_anc_func(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = (tomtom->anc_func == true ? 1 : 0);
- return 0;
-}
-
-static int tomtom_put_anc_func(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct snd_soc_dapm_context *dapm =
- snd_soc_codec_get_dapm(codec);
-
- mutex_lock(&tomtom->codec_mutex);
- tomtom->anc_func = (!ucontrol->value.integer.value[0] ? false : true);
-
- dev_dbg(codec->dev, "%s: anc_func %x", __func__, tomtom->anc_func);
-
- if (tomtom->anc_func == true) {
- snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
- snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
- snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE");
- snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
- snd_soc_dapm_enable_pin(dapm, "ANC EAR");
- snd_soc_dapm_disable_pin(dapm, "HPHR");
- snd_soc_dapm_disable_pin(dapm, "HPHL");
- snd_soc_dapm_disable_pin(dapm, "HEADPHONE");
- snd_soc_dapm_disable_pin(dapm, "EAR PA");
- snd_soc_dapm_disable_pin(dapm, "EAR");
- } else {
- snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
- snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
- snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
- snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
- snd_soc_dapm_disable_pin(dapm, "ANC EAR");
- snd_soc_dapm_enable_pin(dapm, "HPHR");
- snd_soc_dapm_enable_pin(dapm, "HPHL");
- snd_soc_dapm_enable_pin(dapm, "HEADPHONE");
- snd_soc_dapm_enable_pin(dapm, "EAR PA");
- snd_soc_dapm_enable_pin(dapm, "EAR");
- }
- mutex_unlock(&tomtom->codec_mutex);
- snd_soc_dapm_sync(dapm);
- return 0;
-}
-
-static int tomtom_get_iir_enable_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- ucontrol->value.integer.value[0] =
- (snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) &
- (1 << band_idx)) != 0;
-
- pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
- iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[0]);
- return 0;
-}
-
-static int tomtom_put_iir_enable_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
- int value = ucontrol->value.integer.value[0];
-
- /* Mask first 5 bits, 6-8 are reserved */
- snd_soc_update_bits(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx),
- (1 << band_idx), (value << band_idx));
-
- pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
- iir_idx, band_idx,
- ((snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) &
- (1 << band_idx)) != 0));
- return 0;
-}
-static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
- int iir_idx, int band_idx,
- int coeff_idx)
-{
- uint32_t value = 0;
-
- /* Address does not automatically update if reading */
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t)) & 0x7F);
-
- value |= snd_soc_read(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx));
-
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 1) & 0x7F);
-
- value |= (snd_soc_read(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 8);
-
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 2) & 0x7F);
-
- value |= (snd_soc_read(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 16);
-
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- ((band_idx * BAND_MAX + coeff_idx)
- * sizeof(uint32_t) + 3) & 0x7F);
-
- /* Mask bits top 2 bits since they are reserved */
- value |= ((snd_soc_read(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) & 0x3F) << 24);
-
- return value;
-}
-
-static int tomtom_get_iir_band_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- ucontrol->value.integer.value[0] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 0);
- ucontrol->value.integer.value[1] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 1);
- ucontrol->value.integer.value[2] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 2);
- ucontrol->value.integer.value[3] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 3);
- ucontrol->value.integer.value[4] =
- get_iir_band_coeff(codec, iir_idx, band_idx, 4);
-
- pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
- "%s: IIR #%d band #%d b1 = 0x%x\n"
- "%s: IIR #%d band #%d b2 = 0x%x\n"
- "%s: IIR #%d band #%d a1 = 0x%x\n"
- "%s: IIR #%d band #%d a2 = 0x%x\n",
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[0],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[1],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[2],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[3],
- __func__, iir_idx, band_idx,
- (uint32_t)ucontrol->value.integer.value[4]);
- return 0;
-}
-
-static void set_iir_band_coeff(struct snd_soc_codec *codec,
- int iir_idx, int band_idx,
- uint32_t value)
-{
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
- (value & 0xFF));
-
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
- (value >> 8) & 0xFF);
-
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
- (value >> 16) & 0xFF);
-
- /* Mask top 2 bits, 7-8 are reserved */
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx),
- (value >> 24) & 0x3F);
-}
-
-static int tomtom_put_iir_band_audio_mixer(
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int iir_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->reg;
- int band_idx = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
-
- /* Mask top bit it is reserved */
- /* Updates addr automatically for each B2 write */
- snd_soc_write(codec,
- (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx),
- (band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
-
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[0]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[1]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[2]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[3]);
- set_iir_band_coeff(codec, iir_idx, band_idx,
- ucontrol->value.integer.value[4]);
-
- pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
- "%s: IIR #%d band #%d b1 = 0x%x\n"
- "%s: IIR #%d band #%d b2 = 0x%x\n"
- "%s: IIR #%d band #%d a1 = 0x%x\n"
- "%s: IIR #%d band #%d a2 = 0x%x\n",
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 0),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 1),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 2),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 3),
- __func__, iir_idx, band_idx,
- get_iir_band_coeff(codec, iir_idx, band_idx, 4));
- return 0;
-}
-
-static int tomtom_get_compander(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
-
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- int comp = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = tomtom->comp_enabled[comp];
- return 0;
-}
-
-static int tomtom_set_compander(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- int comp = ((struct soc_multi_mixer_control *)
- kcontrol->private_value)->shift;
- int value = ucontrol->value.integer.value[0];
-
- pr_debug("%s: Compander %d enable current %d, new %d\n",
- __func__, comp, tomtom->comp_enabled[comp], value);
- tomtom->comp_enabled[comp] = value;
-
- if (comp == COMPANDER_1 &&
- tomtom->comp_enabled[comp] == 1) {
- /* Wavegen to 5 msec */
- snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x2A);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x2A);
-
- /* Enable Chopper */
- snd_soc_update_bits(codec,
- TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x80);
-
- snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x20);
- pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n",
- __func__);
- } else if (comp == COMPANDER_1 &&
- tomtom->comp_enabled[comp] == 0) {
- /* Wavegen to 20 msec */
- snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x58);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x1A);
-
- /* Disable CHOPPER block */
- snd_soc_update_bits(codec,
- TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x00);
-
- snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x10);
- pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n",
- __func__);
- }
- return 0;
-}
-
-static int tomtom_config_gain_compander(struct snd_soc_codec *codec,
- int comp, bool enable)
-{
- int ret = 0;
-
- switch (comp) {
- case COMPANDER_0:
- snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV1_GAIN,
- 1 << 2, !enable << 2);
- snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV2_GAIN,
- 1 << 2, !enable << 2);
- break;
- case COMPANDER_1:
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_GAIN,
- 1 << 5, !enable << 5);
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_GAIN,
- 1 << 5, !enable << 5);
- break;
- case COMPANDER_2:
- snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_1_GAIN,
- 1 << 5, !enable << 5);
- snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_3_GAIN,
- 1 << 5, !enable << 5);
- snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_2_GAIN,
- 1 << 5, !enable << 5);
- snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_4_GAIN,
- 1 << 5, !enable << 5);
- break;
- default:
- WARN_ON(1);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static void tomtom_discharge_comp(struct snd_soc_codec *codec, int comp)
-{
- /* Level meter DIV Factor to 5*/
- snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0,
- 0x05 << 4);
- /* RMS meter Sampling to 0x01 */
- snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01);
-
- /* Worst case timeout for compander CnP sleep timeout */
- usleep_range(3000, 3100);
-}
-
-static enum wcd9xxx_buck_volt tomtom_codec_get_buck_mv(
- struct snd_soc_codec *codec)
-{
- int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (!strcmp(pdata->regulator[i].name,
- WCD9XXX_SUPPLY_BUCK_NAME)) {
- if ((pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_1P8) ||
- (pdata->regulator[i].min_uV ==
- WCD9XXX_CDC_BUCK_MV_2P15))
- buck_volt = pdata->regulator[i].min_uV;
- break;
- }
- }
- return buck_volt;
-}
-
-static int tomtom_config_compander(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- int mask, enable_mask;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- const int comp = w->shift;
- const u32 rate = tomtom->comp_fs[comp];
- const struct comp_sample_dependent_params *comp_params =
- &comp_samp_params[rate];
- enum wcd9xxx_buck_volt buck_mv;
-
- pr_debug("%s: %s event %d compander %d, enabled %d", __func__,
- w->name, event, comp, tomtom->comp_enabled[comp]);
-
- if (!tomtom->comp_enabled[comp])
- return 0;
-
- /* Compander 0 has two channels */
- mask = enable_mask = 0x03;
- buck_mv = tomtom_codec_get_buck_mv(codec);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- /* Set compander Sample rate */
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_FS_CFG + (comp * 8),
- 0x07, rate);
- /* Set the static gain offset for HPH Path */
- if (comp == COMPANDER_1) {
- if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15) {
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8),
- 0x80, 0x00);
- } else {
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8),
- 0x80, 0x80);
- }
- }
- /* Enable RX interpolation path compander clocks */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL,
- mask << comp_shift[comp],
- mask << comp_shift[comp]);
- /* Toggle compander reset bits */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
- mask << comp_shift[comp],
- mask << comp_shift[comp]);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
- mask << comp_shift[comp], 0);
-
- /* Set gain source to compander */
- tomtom_config_gain_compander(codec, comp, true);
-
- /* Compander enable */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B1_CTL +
- (comp * 8), enable_mask, enable_mask);
-
- tomtom_discharge_comp(codec, comp);
-
- /* Set sample rate dependent parameter */
- snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8),
- comp_params->rms_meter_resamp_fact);
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0xF0, comp_params->rms_meter_div_fact << 4);
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8),
- 0x0F, comp_params->peak_det_timeout);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- /* Disable compander */
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_COMP0_B1_CTL + (comp * 8),
- enable_mask, 0x00);
-
- /* Toggle compander reset bits */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
- mask << comp_shift[comp],
- mask << comp_shift[comp]);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL,
- mask << comp_shift[comp], 0);
-
- /* Turn off the clock for compander in pair */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL,
- mask << comp_shift[comp], 0);
-
- /* Set gain source to register */
- tomtom_config_gain_compander(codec, comp, false);
- break;
- }
- return 0;
-}
-
-
-
-static const char *const tomtom_anc_func_text[] = {"OFF", "ON"};
-static const struct soc_enum tomtom_anc_func_enum =
- SOC_ENUM_SINGLE_EXT(2, tomtom_anc_func_text);
-
-static const char *const tabla_ear_pa_gain_text[] = {"POS_6_DB", "POS_2_DB"};
-static const struct soc_enum tabla_ear_pa_gain_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, tabla_ear_pa_gain_text),
-};
-
-/*cut of frequency for high pass filter*/
-static const char * const cf_text[] = {
- "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
-};
-
-static const char * const rx_cf_text[] = {
- "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz",
- "MIN_3DB_0P48Hz"
-};
-
-static const struct soc_enum cf_dec1_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX1_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec2_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX2_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec3_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX3_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec4_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec5_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX5_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec6_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX6_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec7_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX7_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec8_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX8_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec9_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX9_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_dec10_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX10_MUX_CTL, 4, 3, cf_text);
-
-static const struct soc_enum cf_rxmix1_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX1_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix2_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX2_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix3_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX3_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix4_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX4_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix5_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX5_B4_CTL, 0, 4, rx_cf_text)
-;
-static const struct soc_enum cf_rxmix6_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX6_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix7_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX7_B4_CTL, 0, 4, rx_cf_text);
-
-static const struct soc_enum cf_rxmix8_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX8_B4_CTL, 0, 4, rx_cf_text);
-
-static const char * const class_h_dsm_text[] = {
- "ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7"
-};
-
-static const struct soc_enum class_h_dsm_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_CLSH_CTL, 4, 3, class_h_dsm_text);
-
-static const struct snd_kcontrol_new class_h_dsm_mux =
- SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum);
-
-static const char * const rx1_interp_text[] = {
- "ZERO", "RX1 MIX2"
-};
-
-static const struct soc_enum rx1_interp_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 2, rx1_interp_text);
-
-static const struct snd_kcontrol_new rx1_interp_mux =
- SOC_DAPM_ENUM("RX1 INTERP MUX Mux", rx1_interp_enum);
-
-static const char * const rx2_interp_text[] = {
- "ZERO", "RX2 MIX2"
-};
-
-static const struct soc_enum rx2_interp_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 2, rx2_interp_text);
-
-static const struct snd_kcontrol_new rx2_interp_mux =
- SOC_DAPM_ENUM("RX2 INTERP MUX Mux", rx2_interp_enum);
-
-static const char *const tomtom_conn_mad_text[] = {
- "ADC_MB", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "NOTUSED1",
- "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6", "NOTUSED2",
- "NOTUSED3"};
-
-static const struct soc_enum tomtom_conn_mad_enum =
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_conn_mad_text),
- tomtom_conn_mad_text);
-
-
-static int tomtom_mad_input_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 tomtom_mad_input;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
- tomtom_mad_input = snd_soc_read(codec, TOMTOM_A_CDC_MAD_INP_SEL);
-
- tomtom_mad_input = tomtom_mad_input & 0x0F;
-
- ucontrol->value.integer.value[0] = tomtom_mad_input;
-
- pr_debug("%s: tomtom_mad_input = %s\n", __func__,
- tomtom_conn_mad_text[tomtom_mad_input]);
-
- return 0;
-}
-
-static int tomtom_mad_input_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 tomtom_mad_input;
- u16 micb_int_reg, micb_4_int_reg;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct snd_soc_card *card = codec->component.card;
- char mad_amic_input_widget[6];
- u32 adc;
- const char *mad_input_widget;
- const char *source_widget = NULL;
- u32 mic_bias_found = 0;
- u32 i;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
- char *mad_input;
-
- tomtom_mad_input = ucontrol->value.integer.value[0];
- micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
-
- if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) {
- dev_err(codec->dev,
- "%s: tomtom_mad_input = %d out of bounds\n",
- __func__, tomtom_mad_input);
- return -EINVAL;
- }
-
- pr_debug("%s: tomtom_mad_input = %s\n", __func__,
- tomtom_conn_mad_text[tomtom_mad_input]);
-
- if (!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED1") ||
- !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED2") ||
- !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED3") ||
- !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "ADC_MB")) {
- pr_info("%s: tomtom mad input is set to unsupported input = %s\n",
- __func__, tomtom_conn_mad_text[tomtom_mad_input]);
- return -EINVAL;
- }
-
- if (strnstr(tomtom_conn_mad_text[tomtom_mad_input],
- "ADC", sizeof("ADC"))) {
- mad_input = strpbrk(tomtom_conn_mad_text[tomtom_mad_input],
- "123456");
- if (!mad_input) {
- dev_err(codec->dev, "%s: Invalid MAD input %s\n",
- __func__, tomtom_conn_mad_text[tomtom_mad_input]);
- return -EINVAL;
- }
- ret = kstrtouint(mad_input, 10, &adc);
- if ((ret < 0) || (adc > 6)) {
- pr_err("%s: Invalid ADC = %s\n", __func__,
- tomtom_conn_mad_text[tomtom_mad_input]);
- ret = -EINVAL;
- }
-
- snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
-
- mad_input_widget = mad_amic_input_widget;
- pr_debug("%s: tomtom amic input widget = %s\n", __func__,
- mad_amic_input_widget);
- } else {
- /* DMIC type input widget*/
- mad_input_widget = tomtom_conn_mad_text[tomtom_mad_input];
- }
-
- pr_debug("%s: tomtom input widget = %s\n", __func__, mad_input_widget);
-
- for (i = 0; i < card->num_dapm_routes; i++) {
-
- if (!strcmp(card->dapm_routes[i].sink, mad_input_widget)) {
-
- source_widget = card->dapm_routes[i].source;
- if (!source_widget) {
- dev_err(codec->dev,
- "%s: invalid source widget\n",
- __func__);
- return -EINVAL;
- }
-
- if (strnstr(source_widget,
- "MIC BIAS1", sizeof("MIC BIAS1"))) {
- mic_bias_found = 1;
- micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS;
- break;
- } else if (strnstr(source_widget,
- "MIC BIAS2", sizeof("MIC BIAS2"))) {
- mic_bias_found = 2;
- micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS;
- break;
- } else if (strnstr(source_widget,
- "MIC BIAS3", sizeof("MIC BIAS3"))) {
- mic_bias_found = 3;
- micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS;
- break;
- } else if (strnstr(source_widget,
- "MIC BIAS4", sizeof("MIC BIAS4"))) {
- mic_bias_found = 4;
- micb_int_reg = micb_4_int_reg;
- break;
- }
- }
- }
-
- if (mic_bias_found) {
- pr_debug("%s: source mic bias = %s. sink = %s\n", __func__,
- card->dapm_routes[i].source,
- card->dapm_routes[i].sink);
-
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_INP_SEL,
- 0x0F, tomtom_mad_input);
- snd_soc_update_bits(codec, TOMTOM_A_MAD_ANA_CTRL,
- 0x07, mic_bias_found);
-
- /* Setup internal micbias */
-
- if (strnstr(source_widget, "Internal1", strlen(source_widget)))
- snd_soc_update_bits(codec,
- micb_int_reg,
- 0xE0, 0xE0);
- else if (strnstr(source_widget, "Internal2",
- strlen(source_widget)))
- snd_soc_update_bits(codec,
- micb_int_reg,
- 0x1C, 0x1C);
- else if (strnstr(source_widget, "Internal3",
- strlen(source_widget)))
- snd_soc_update_bits(codec,
- micb_int_reg,
- 0x3, 0x3);
- else
- /*
- * If not internal, make sure to write the
- * register to default value
- */
- snd_soc_write(codec, micb_int_reg, 0x24);
- return 0;
- }
-
- pr_err("%s: mic bias source not found for input = %s\n",
- __func__, mad_input_widget);
- return -EINVAL;
-}
-
-static int tomtom_tx_hpf_bypass_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u32 tx_index;
-
- tx_index = (u32)kcontrol->private_value;
-
- if (tx_index > NUM_DECIMATORS) {
- pr_err("%s: Invalid TX decimator %d\n", __func__,
- tx_index);
- return -EINVAL;
- }
-
- ucontrol->value.integer.value[0] =
- tx_hpf_work[tx_index-1].tx_hpf_bypass;
-
- return 0;
-}
-
-static int tomtom_tx_hpf_bypass_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- bool tx_hpf_bypass_cfg;
- u32 tx_index;
-
- tx_hpf_bypass_cfg = (bool)ucontrol->value.integer.value[0];
-
- pr_debug("%s: tx_hpf_bypass = %d\n", __func__,
- tx_hpf_bypass_cfg);
-
- tx_index = (u32)kcontrol->private_value;
-
- if (tx_index > NUM_DECIMATORS) {
- pr_err("%s: Invalid TX decimator %d\n", __func__,
- tx_index);
- return -EINVAL;
- }
- if (tx_hpf_work[tx_index-1].tx_hpf_bypass != tx_hpf_bypass_cfg)
- tx_hpf_work[tx_index-1].tx_hpf_bypass = tx_hpf_bypass_cfg;
-
- pr_debug("%s: Set TX%d HPF bypass configuration %d",
- __func__, tx_index,
- tx_hpf_work[tx_index-1].tx_hpf_bypass);
-
- return 0;
-}
-
-static const struct snd_kcontrol_new tomtom_snd_controls[] = {
-
- SOC_SINGLE_SX_TLV("RX1 Digital Volume", TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX2 Digital Volume", TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX3 Digital Volume", TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX4 Digital Volume", TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX5 Digital Volume", TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX6 Digital Volume", TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX7 Digital Volume", TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("RX8 Digital Volume", TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL,
- 0, -84, 40, digital_gain),
-
- SOC_SINGLE_SX_TLV("DEC1 Volume", TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC2 Volume", TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC3 Volume", TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC4 Volume", TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC5 Volume", TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC6 Volume", TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC7 Volume", TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC8 Volume", TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC9 Volume", TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("DEC10 Volume", TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, 0,
- -84, 40, digital_gain),
-
- SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, 0,
- -84, 40, digital_gain),
- SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, 0,
- -84, 40, digital_gain),
-
- SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tomtom_get_anc_slot,
- tomtom_put_anc_slot),
- SOC_ENUM_EXT("ANC Function", tomtom_anc_func_enum, tomtom_get_anc_func,
- tomtom_put_anc_func),
-
- SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
- SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
- SOC_ENUM("TX3 HPF cut off", cf_dec3_enum),
- SOC_ENUM("TX4 HPF cut off", cf_dec4_enum),
- SOC_ENUM("TX5 HPF cut off", cf_dec5_enum),
- SOC_ENUM("TX6 HPF cut off", cf_dec6_enum),
- SOC_ENUM("TX7 HPF cut off", cf_dec7_enum),
- SOC_ENUM("TX8 HPF cut off", cf_dec8_enum),
- SOC_ENUM("TX9 HPF cut off", cf_dec9_enum),
- SOC_ENUM("TX10 HPF cut off", cf_dec10_enum),
-
- SOC_SINGLE_BOOL_EXT("TX1 HPF Switch", 1,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX2 HPF Switch", 2,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX3 HPF Switch", 3,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX4 HPF Switch", 4,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX5 HPF Switch", 5,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX6 HPF Switch", 6,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX7 HPF Switch", 7,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX8 HPF Switch", 8,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX9 HPF Switch", 9,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
- SOC_SINGLE_BOOL_EXT("TX10 HPF Switch", 10,
- tomtom_tx_hpf_bypass_get,
- tomtom_tx_hpf_bypass_put),
-
- SOC_SINGLE("RX1 HPF Switch", TOMTOM_A_CDC_RX1_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX2 HPF Switch", TOMTOM_A_CDC_RX2_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX3 HPF Switch", TOMTOM_A_CDC_RX3_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX4 HPF Switch", TOMTOM_A_CDC_RX4_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX5 HPF Switch", TOMTOM_A_CDC_RX5_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX6 HPF Switch", TOMTOM_A_CDC_RX6_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX7 HPF Switch", TOMTOM_A_CDC_RX7_B5_CTL, 2, 1, 0),
- SOC_SINGLE("RX8 HPF Switch", TOMTOM_A_CDC_RX8_B5_CTL, 2, 1, 0),
-
- SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
- SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
- SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
- SOC_ENUM("RX4 HPF cut off", cf_rxmix4_enum),
- SOC_ENUM("RX5 HPF cut off", cf_rxmix5_enum),
- SOC_ENUM("RX6 HPF cut off", cf_rxmix6_enum),
- SOC_ENUM("RX7 HPF cut off", cf_rxmix7_enum),
- SOC_ENUM("RX8 HPF cut off", cf_rxmix8_enum),
-
- SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
- SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0,
- tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer),
-
- SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
- SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5,
- tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer),
-
- SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0,
- tomtom_get_compander, tomtom_set_compander),
- SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0,
- tomtom_get_compander, tomtom_set_compander),
- SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0,
- tomtom_get_compander, tomtom_set_compander),
-
- SOC_ENUM_EXT("MAD Input", tomtom_conn_mad_enum,
- tomtom_mad_input_get, tomtom_mad_input_put),
-
-};
-
-static int tomtom_pa_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 ear_pa_gain;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
- ear_pa_gain = snd_soc_read(codec, TOMTOM_A_RX_EAR_GAIN);
-
- ear_pa_gain = ear_pa_gain >> 5;
-
- ucontrol->value.integer.value[0] = ear_pa_gain;
-
- pr_debug("%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
-
- return 0;
-}
-
-static int tomtom_pa_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 ear_pa_gain;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-
- pr_debug("%s: ucontrol->value.integer.value[0] = %ld\n", __func__,
- ucontrol->value.integer.value[0]);
-
- ear_pa_gain = ucontrol->value.integer.value[0] << 5;
-
- snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_GAIN, 0xE0, ear_pa_gain);
- return 0;
-}
-
-static const char * const tomtom_1_x_ear_pa_gain_text[] = {
- "POS_6_DB", "POS_4P5_DB", "POS_3_DB", "POS_1P5_DB",
- "POS_0_DB", "NEG_2P5_DB", "UNDEFINED", "NEG_12_DB"
-};
-
-static const struct soc_enum tomtom_1_x_ear_pa_gain_enum =
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_1_x_ear_pa_gain_text),
- tomtom_1_x_ear_pa_gain_text);
-
-static const struct snd_kcontrol_new tomtom_1_x_analog_gain_controls[] = {
-
- SOC_ENUM_EXT("EAR PA Gain", tomtom_1_x_ear_pa_gain_enum,
- tomtom_pa_gain_get, tomtom_pa_gain_put),
-
- SOC_SINGLE_TLV("HPHL Volume", TOMTOM_A_RX_HPH_L_GAIN, 0, 20, 1,
- line_gain),
- SOC_SINGLE_TLV("HPHR Volume", TOMTOM_A_RX_HPH_R_GAIN, 0, 20, 1,
- line_gain),
-
- SOC_SINGLE_TLV("LINEOUT1 Volume", TOMTOM_A_RX_LINE_1_GAIN, 0, 20, 1,
- line_gain),
- SOC_SINGLE_TLV("LINEOUT2 Volume", TOMTOM_A_RX_LINE_2_GAIN, 0, 20, 1,
- line_gain),
- SOC_SINGLE_TLV("LINEOUT3 Volume", TOMTOM_A_RX_LINE_3_GAIN, 0, 20, 1,
- line_gain),
- SOC_SINGLE_TLV("LINEOUT4 Volume", TOMTOM_A_RX_LINE_4_GAIN, 0, 20, 1,
- line_gain),
-
- SOC_SINGLE_TLV("SPK DRV Volume", TOMTOM_A_SPKR_DRV1_GAIN, 3, 8, 1,
- line_gain),
- SOC_SINGLE_TLV("SPK DRV2 Volume", TOMTOM_A_SPKR_DRV2_GAIN, 3, 8, 1,
- line_gain),
-
- SOC_SINGLE_TLV("ADC1 Volume", TOMTOM_A_TX_1_GAIN, 2, 19, 0,
- analog_gain),
- SOC_SINGLE_TLV("ADC2 Volume", TOMTOM_A_TX_2_GAIN, 2, 19, 0,
- analog_gain),
- SOC_SINGLE_TLV("ADC3 Volume", TOMTOM_A_TX_3_GAIN, 2, 19, 0,
- analog_gain),
- SOC_SINGLE_TLV("ADC4 Volume", TOMTOM_A_TX_4_GAIN, 2, 19, 0,
- analog_gain),
- SOC_SINGLE_TLV("ADC5 Volume", TOMTOM_A_TX_5_GAIN, 2, 19, 0,
- analog_gain),
- SOC_SINGLE_TLV("ADC6 Volume", TOMTOM_A_TX_6_GAIN, 2, 19, 0,
- analog_gain),
-};
-
-static int tomtom_hph_impedance_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- uint32_t zl, zr;
- bool hphr;
- struct soc_multi_mixer_control *mc;
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- mc = (struct soc_multi_mixer_control *)(kcontrol->private_value);
-
- hphr = mc->shift;
- wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr);
- pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr);
- ucontrol->value.integer.value[0] = hphr ? zr : zl;
-
- return 0;
-}
-
-static const struct snd_kcontrol_new impedance_detect_controls[] = {
- SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0,
- tomtom_hph_impedance_get, NULL),
- SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0,
- tomtom_hph_impedance_get, NULL),
-};
-
-static int tomtom_get_hph_type(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_mbhc *mbhc;
-
- if (!priv) {
- pr_debug("%s: wcd9330 private data is NULL\n", __func__);
- return 0;
- }
-
- mbhc = &priv->mbhc;
- if (!mbhc) {
- pr_debug("%s: mbhc not initialized\n", __func__);
- return 0;
- }
-
- ucontrol->value.integer.value[0] = (u32) mbhc->hph_type;
- pr_debug("%s: hph_type = %u\n", __func__, mbhc->hph_type);
-
- return 0;
-}
-
-static const struct snd_kcontrol_new hph_type_detect_controls[] = {
- SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0,
- tomtom_get_hph_type, NULL),
-};
-
-static const char * const rx_mix1_text[] = {
- "ZERO", "SRC1", "SRC2", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
- "RX5", "RX6", "RX7"
-};
-
-static const char * const rx8_mix1_text[] = {
- "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4",
- "RX5", "RX6", "RX7", "RX8"
-};
-
-static const char * const rx_mix2_text[] = {
- "ZERO", "SRC1", "SRC2", "IIR1", "IIR2"
-};
-
-static const char * const rx_rdac5_text[] = {
- "DEM4", "DEM3_INV"
-};
-
-static const char * const rx_rdac7_text[] = {
- "DEM6", "DEM5_INV"
-};
-
-static const char * const mad_sel_text[] = {
- "SPE", "MSM"
-};
-
-static const char * const sb_tx1_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC1", "RMIX8"
-};
-
-static const char * const sb_tx2_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC2", "RMIX8"
-};
-
-static const char * const sb_tx3_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC3", "RMIX8"
-};
-
-static const char * const sb_tx4_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC4", "RMIX8"
-};
-
-static const char * const sb_tx5_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC5", "RMIX8"
-};
-
-static const char * const sb_tx6_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC6", "RMIX8"
-};
-
-static const char * const sb_tx7_to_tx10_mux_text[] = {
- "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7",
- "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
- "DEC9", "DEC10"
-};
-
-static const char * const dec1_mux_text[] = {
- "ZERO", "DMIC1", "ADC6",
-};
-
-static const char * const dec2_mux_text[] = {
- "ZERO", "DMIC2", "ADC5",
-};
-
-static const char * const dec3_mux_text[] = {
- "ZERO", "DMIC3", "ADC4",
-};
-
-static const char * const dec4_mux_text[] = {
- "ZERO", "DMIC4", "ADC3",
-};
-
-static const char * const dec5_mux_text[] = {
- "ZERO", "DMIC5", "ADC2",
-};
-
-static const char * const dec6_mux_text[] = {
- "ZERO", "DMIC6", "ADC1",
-};
-
-static const char * const dec7_mux_text[] = {
- "ZERO", "DMIC1", "DMIC6", "ADC1", "ADC6", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec8_mux_text[] = {
- "ZERO", "DMIC2", "DMIC5", "ADC2", "ADC5", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec9_mux_text[] = {
- "ZERO", "DMIC4", "DMIC5", "ADC2", "ADC3", "ADCMB", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const dec10_mux_text[] = {
- "ZERO", "DMIC3", "DMIC6", "ADC1", "ADC4", "ADCMB", "ANC1_FB", "ANC2_FB",
-};
-
-static const char * const anc_mux_text[] = {
- "ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB",
- "RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6"
-};
-
-static const char * const anc1_fb_mux_text[] = {
- "ZERO", "EAR_HPH_L", "EAR_LINE_1",
-};
-
-static const char * const iir_inp1_text[] = {
- "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
- "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp2_text[] = {
- "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
- "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp3_text[] = {
- "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
- "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const char * const iir_inp4_text[] = {
- "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
- "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
-};
-
-static const struct soc_enum rx_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx_mix1_inp3_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B2_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx2_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx3_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx4_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx4_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx5_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx5_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx6_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx6_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx7_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 0, 12, rx_mix1_text);
-
-static const struct soc_enum rx7_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 4, 12, rx_mix1_text);
-
-static const struct soc_enum rx8_mix1_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 0, 11, rx8_mix1_text);
-
-static const struct soc_enum rx8_mix1_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 4, 11, rx8_mix1_text);
-
-static const struct soc_enum rx1_mix2_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx1_mix2_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx2_mix2_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx2_mix2_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx7_mix2_inp1_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 0, 5, rx_mix2_text);
-
-static const struct soc_enum rx7_mix2_inp2_chain_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 3, 5, rx_mix2_text);
-
-static const struct soc_enum rx_rdac5_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 2, 2, rx_rdac5_text);
-
-static const struct soc_enum rx_rdac7_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 1, 2, rx_rdac7_text);
-
-static const struct soc_enum mad_sel_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_SVASS_CFG, 0, 2, mad_sel_text);
-
-static const struct soc_enum sb_tx1_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0, 10, sb_tx1_mux_text);
-
-static const struct soc_enum sb_tx2_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0, 10, sb_tx2_mux_text);
-
-static const struct soc_enum sb_tx3_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0, 10, sb_tx3_mux_text);
-
-static const struct soc_enum sb_tx4_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0, 10, sb_tx4_mux_text);
-
-static const struct soc_enum sb_tx5_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0, 10, sb_tx5_mux_text);
-
-static const struct soc_enum sb_tx6_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0, 10, sb_tx6_mux_text);
-
-static const struct soc_enum sb_tx7_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0, 18,
- sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx8_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0, 18,
- sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx9_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0, 18,
- sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum sb_tx10_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0, 18,
- sb_tx7_to_tx10_mux_text);
-
-static const struct soc_enum dec1_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 0, 3, dec1_mux_text);
-
-static const struct soc_enum dec2_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 2, 3, dec2_mux_text);
-
-static const struct soc_enum dec3_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 4, 3, dec3_mux_text);
-
-static const struct soc_enum dec4_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 6, 3, dec4_mux_text);
-
-static const struct soc_enum dec5_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 0, 3, dec5_mux_text);
-
-static const struct soc_enum dec6_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 2, 3, dec6_mux_text);
-
-static const struct soc_enum dec7_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 4, 7, dec7_mux_text);
-
-static const struct soc_enum dec8_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 0, 7, dec8_mux_text);
-
-static const struct soc_enum dec9_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 3, 8, dec9_mux_text);
-
-static const struct soc_enum dec10_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B4_CTL, 0, 8, dec10_mux_text);
-
-static const struct soc_enum anc1_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 0, 15, anc_mux_text);
-
-static const struct soc_enum anc2_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text);
-
-static const struct soc_enum anc1_fb_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text);
-
-static const struct soc_enum iir1_inp1_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B1_CTL, 0, 18, iir_inp1_text);
-
-static const struct soc_enum iir2_inp1_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
-
-static const struct soc_enum iir1_inp2_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
-
-static const struct soc_enum iir2_inp2_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
-
-static const struct soc_enum iir1_inp3_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
-
-static const struct soc_enum iir2_inp3_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
-
-static const struct soc_enum iir1_inp4_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
-
-static const struct soc_enum iir2_inp4_mux_enum =
- SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
-
-static const struct snd_kcontrol_new rx_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_mix1_inp3_mux =
- SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx4_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx4_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx5_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX5 MIX1 INP1 Mux", rx5_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx5_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX5 MIX1 INP2 Mux", rx5_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx6_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX6 MIX1 INP1 Mux", rx6_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx6_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX6 MIX1 INP2 Mux", rx6_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX7 MIX1 INP1 Mux", rx7_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX7 MIX1 INP2 Mux", rx7_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx8_mix1_inp1_mux =
- SOC_DAPM_ENUM("RX8 MIX1 INP1 Mux", rx8_mix1_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx8_mix1_inp2_mux =
- SOC_DAPM_ENUM("RX8 MIX1 INP2 Mux", rx8_mix1_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
- SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx1_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx1_mix2_inp2_mux =
- SOC_DAPM_ENUM("RX1 MIX2 INP2 Mux", rx1_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
- SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx2_mix2_inp2_mux =
- SOC_DAPM_ENUM("RX2 MIX2 INP2 Mux", rx2_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix2_inp1_mux =
- SOC_DAPM_ENUM("RX7 MIX2 INP1 Mux", rx7_mix2_inp1_chain_enum);
-
-static const struct snd_kcontrol_new rx7_mix2_inp2_mux =
- SOC_DAPM_ENUM("RX7 MIX2 INP2 Mux", rx7_mix2_inp2_chain_enum);
-
-static const struct snd_kcontrol_new rx_dac5_mux =
- SOC_DAPM_ENUM("RDAC5 MUX Mux", rx_rdac5_enum);
-
-static const struct snd_kcontrol_new rx_dac7_mux =
- SOC_DAPM_ENUM("RDAC7 MUX Mux", rx_rdac7_enum);
-
-static const struct snd_kcontrol_new mad_sel_mux =
- SOC_DAPM_ENUM("MAD_SEL MUX Mux", mad_sel_enum);
-
-static const struct snd_kcontrol_new sb_tx1_mux =
- SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx2_mux =
- SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx3_mux =
- SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx4_mux =
- SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx5_mux =
- SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx6_mux =
- SOC_DAPM_ENUM("SLIM TX6 MUX Mux", sb_tx6_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx7_mux =
- SOC_DAPM_ENUM("SLIM TX7 MUX Mux", sb_tx7_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx8_mux =
- SOC_DAPM_ENUM("SLIM TX8 MUX Mux", sb_tx8_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx9_mux =
- SOC_DAPM_ENUM("SLIM TX9 MUX Mux", sb_tx9_mux_enum);
-
-static const struct snd_kcontrol_new sb_tx10_mux =
- SOC_DAPM_ENUM("SLIM TX10 MUX Mux", sb_tx10_mux_enum);
-
-
-static int wcd9330_put_dec_enum(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int dec_mux, decimator;
- char *dec_name = NULL;
- char *widget_name = NULL;
- char *temp;
- u16 tx_mux_ctl_reg;
- u8 adc_dmic_sel = 0x0;
- int ret = 0;
- char *dec;
-
- if (ucontrol->value.enumerated.item[0] >= e->items)
- return -EINVAL;
-
- dec_mux = ucontrol->value.enumerated.item[0];
-
- widget_name = kstrndup(w->name, 15, GFP_KERNEL);
- if (!widget_name)
- return -ENOMEM;
- temp = widget_name;
-
- dec_name = strsep(&widget_name, " ");
- widget_name = temp;
- if (!dec_name) {
- pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
- ret = -EINVAL;
- goto out;
- }
- dec = strpbrk(dec_name, "123456789");
- if (!dec) {
- dev_err(w->dapm->dev, "%s: decimator index not found\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- ret = kstrtouint(dec, 10, &decimator);
- if (ret < 0) {
- pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
- ret = -EINVAL;
- goto out;
- }
-
- dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n"
- , __func__, w->name, decimator, dec_mux);
-
-
- switch (decimator) {
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- if (dec_mux == 1)
- adc_dmic_sel = 0x1;
- else
- adc_dmic_sel = 0x0;
- break;
- case 7:
- case 8:
- case 9:
- case 10:
- if ((dec_mux == 1) || (dec_mux == 2))
- adc_dmic_sel = 0x1;
- else
- adc_dmic_sel = 0x0;
- break;
- default:
- pr_err("%s: Invalid Decimator = %u\n", __func__, decimator);
- ret = -EINVAL;
- goto out;
- }
-
- tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
-
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel);
-
- ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
-
-out:
- kfree(widget_name);
- return ret;
-}
-
-#define WCD9330_DEC_ENUM(xname, xenum) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_enum_double, \
- .get = snd_soc_dapm_get_enum_double, \
- .put = wcd9330_put_dec_enum, \
- .private_value = (unsigned long)&xenum }
-
-static const struct snd_kcontrol_new dec1_mux =
- WCD9330_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum);
-
-static const struct snd_kcontrol_new dec2_mux =
- WCD9330_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum);
-
-static const struct snd_kcontrol_new dec3_mux =
- WCD9330_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum);
-
-static const struct snd_kcontrol_new dec4_mux =
- WCD9330_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum);
-
-static const struct snd_kcontrol_new dec5_mux =
- WCD9330_DEC_ENUM("DEC5 MUX Mux", dec5_mux_enum);
-
-static const struct snd_kcontrol_new dec6_mux =
- WCD9330_DEC_ENUM("DEC6 MUX Mux", dec6_mux_enum);
-
-static const struct snd_kcontrol_new dec7_mux =
- WCD9330_DEC_ENUM("DEC7 MUX Mux", dec7_mux_enum);
-
-static const struct snd_kcontrol_new dec8_mux =
- WCD9330_DEC_ENUM("DEC8 MUX Mux", dec8_mux_enum);
-
-static const struct snd_kcontrol_new dec9_mux =
- WCD9330_DEC_ENUM("DEC9 MUX Mux", dec9_mux_enum);
-
-static const struct snd_kcontrol_new dec10_mux =
- WCD9330_DEC_ENUM("DEC10 MUX Mux", dec10_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp1_mux =
- SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp1_mux =
- SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp2_mux =
- SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp2_mux =
- SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp3_mux =
- SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp3_mux =
- SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
-
-static const struct snd_kcontrol_new iir1_inp4_mux =
- SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
-
-static const struct snd_kcontrol_new iir2_inp4_mux =
- SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
-
-static const struct snd_kcontrol_new anc1_mux =
- SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
-
-static const struct snd_kcontrol_new anc2_mux =
- SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum);
-
-static const struct snd_kcontrol_new anc1_fb_mux =
- SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum);
-
-static const struct snd_kcontrol_new dac1_switch[] = {
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_EAR_EN, 5, 1, 0)
-};
-static const struct snd_kcontrol_new hphl_switch[] = {
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_HPH_L_DAC_CTL, 6, 1, 0)
-};
-
-static const struct snd_kcontrol_new hphl_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 7, 1, 0),
-};
-
-static const struct snd_kcontrol_new hphr_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 6, 1, 0),
-};
-
-static const struct snd_kcontrol_new ear_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 5, 1, 0),
-};
-static const struct snd_kcontrol_new lineout1_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 4, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout2_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 3, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout3_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 2, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout4_pa_mix[] = {
- SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN,
- 1, 1, 0),
-};
-
-static const struct snd_kcontrol_new lineout3_ground_switch =
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_3_DAC_CTL, 6, 1, 0);
-
-static const struct snd_kcontrol_new lineout4_ground_switch =
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_4_DAC_CTL, 6, 1, 0);
-
-static const struct snd_kcontrol_new aif4_mad_switch =
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_SVASS_CLKRST_CTL, 0, 1, 0);
-
-static const struct snd_kcontrol_new aif4_vi_switch =
- SOC_DAPM_SINGLE("Switch", TOMTOM_A_SPKR1_PROT_EN, 3, 1, 0);
-
-/* virtual port entries */
-static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = tomtom_p->tx_port_value;
- return 0;
-}
-
-static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
- struct snd_soc_dapm_update *update = NULL;
- struct soc_multi_mixer_control *mixer =
- ((struct soc_multi_mixer_control *)kcontrol->private_value);
- u32 dai_id = widget->shift;
- u32 port_id = mixer->shift;
- u32 enable = ucontrol->value.integer.value[0];
- u32 vtable = vport_check_table[dai_id];
-
-
- pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
- widget->name, ucontrol->id.name, tomtom_p->tx_port_value,
- widget->shift, ucontrol->value.integer.value[0]);
-
- mutex_lock(&tomtom_p->codec_mutex);
-
- if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- if (dai_id != AIF1_CAP) {
- dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
- __func__);
- mutex_unlock(&tomtom_p->codec_mutex);
- return -EINVAL;
- }
- }
- switch (dai_id) {
- case AIF1_CAP:
- case AIF2_CAP:
- case AIF3_CAP:
- /* only add to the list if value not set
- */
- if (enable && !(tomtom_p->tx_port_value & 1 << port_id)) {
-
- if (tomtom_p->intf_type ==
- WCD9XXX_INTERFACE_TYPE_SLIMBUS)
- vtable = vport_check_table[dai_id];
- if (tomtom_p->intf_type ==
- WCD9XXX_INTERFACE_TYPE_I2C)
- vtable = vport_i2s_check_table[dai_id];
-
- if (wcd9xxx_tx_vport_validation(
- vtable,
- port_id,
- tomtom_p->dai, NUM_CODEC_DAIS)) {
- dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n",
- __func__, port_id + 1);
- mutex_unlock(&tomtom_p->codec_mutex);
- return 0;
- }
- tomtom_p->tx_port_value |= 1 << port_id;
- list_add_tail(&core->tx_chs[port_id].list,
- &tomtom_p->dai[dai_id].wcd9xxx_ch_list
- );
- } else if (!enable && (tomtom_p->tx_port_value &
- 1 << port_id)) {
- tomtom_p->tx_port_value &= ~(1 << port_id);
- list_del_init(&core->tx_chs[port_id].list);
- } else {
- if (enable)
- dev_dbg(codec->dev, "%s: TX%u port is used by\n"
- "this virtual port\n",
- __func__, port_id + 1);
- else
- dev_dbg(codec->dev, "%s: TX%u port is not used by\n"
- "this virtual port\n",
- __func__, port_id + 1);
- /* avoid update power function */
- mutex_unlock(&tomtom_p->codec_mutex);
- return 0;
- }
- break;
- default:
- pr_err("Unknown AIF %d\n", dai_id);
- mutex_unlock(&tomtom_p->codec_mutex);
- return -EINVAL;
- }
- pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__,
- widget->name, widget->sname, tomtom_p->tx_port_value,
- widget->shift);
-
- mutex_unlock(&tomtom_p->codec_mutex);
- snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
-
- return 0;
-}
-
-static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.enumerated.item[0] = tomtom_p->rx_port_value;
- return 0;
-}
-
-static const char *const slim_rx_mux_text[] = {
- "ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB"
-};
-
-static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- struct snd_soc_dapm_update *update = NULL;
- u32 port_id = widget->shift;
-
- pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__,
- widget->name, ucontrol->id.name, tomtom_p->rx_port_value,
- widget->shift, ucontrol->value.integer.value[0]);
-
- tomtom_p->rx_port_value = ucontrol->value.enumerated.item[0];
-
- mutex_lock(&tomtom_p->codec_mutex);
-
- if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- if (tomtom_p->rx_port_value > 2) {
- dev_err(codec->dev, "%s: invalid AIF for I2C mode\n",
- __func__);
- goto err;
- }
- }
- /* value need to match the Virtual port and AIF number
- */
- switch (tomtom_p->rx_port_value) {
- case 0:
- list_del_init(&core->rx_chs[port_id].list);
- break;
- case 1:
- if (wcd9xxx_rx_vport_validation(port_id +
- TOMTOM_RX_PORT_START_NUMBER,
- &tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list)) {
- dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
- __func__, port_id + 1);
- goto rtn;
- }
- list_add_tail(&core->rx_chs[port_id].list,
- &tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list);
- break;
- case 2:
- if (wcd9xxx_rx_vport_validation(port_id +
- TOMTOM_RX_PORT_START_NUMBER,
- &tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list)) {
- dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
- __func__, port_id + 1);
- goto rtn;
- }
- list_add_tail(&core->rx_chs[port_id].list,
- &tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list);
- break;
- case 3:
- if (wcd9xxx_rx_vport_validation(port_id +
- TOMTOM_RX_PORT_START_NUMBER,
- &tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list)) {
- dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n",
- __func__, port_id + 1);
- goto rtn;
- }
- list_add_tail(&core->rx_chs[port_id].list,
- &tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list);
- break;
- default:
- pr_err("Unknown AIF %d\n", tomtom_p->rx_port_value);
- goto err;
- }
-rtn:
- mutex_unlock(&tomtom_p->codec_mutex);
- snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
- tomtom_p->rx_port_value, e, update);
-
- return 0;
-err:
- mutex_unlock(&tomtom_p->codec_mutex);
- return -EINVAL;
-}
-
-static const struct soc_enum slim_rx_mux_enum =
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text);
-
-static const struct snd_kcontrol_new slim_rx_mux[TOMTOM_RX_MAX] = {
- SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX6 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX7 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
- SOC_DAPM_ENUM_EXT("SLIM RX8 Mux", slim_rx_mux_enum,
- slim_rx_mux_get, slim_rx_mux_put),
-};
-
-static const struct snd_kcontrol_new aif1_cap_mixer[] = {
- SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static const struct snd_kcontrol_new aif2_cap_mixer[] = {
- SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static const struct snd_kcontrol_new aif3_cap_mixer[] = {
- SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
- SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0,
- slim_tx_mixer_get, slim_tx_mixer_put),
-};
-
-static void tomtom_codec_enable_adc_block(struct snd_soc_codec *codec,
- int enable)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %d\n", __func__, enable);
-
- if (enable) {
- tomtom->adc_count++;
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x2, 0x2);
- } else {
- tomtom->adc_count--;
- if (!tomtom->adc_count)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x2, 0x0);
- }
-}
-
-static int tomtom_codec_enable_adc(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
- u16 adc_reg;
- u16 tx_fe_clkdiv_reg;
- u8 tx_fe_clkdiv_mask;
- u8 init_bit_shift;
- u8 bit_pos;
-
- pr_debug("%s %d\n", __func__, event);
-
- switch (w->reg) {
- case TOMTOM_A_TX_1_GAIN:
- adc_reg = TOMTOM_A_TX_1_2_TEST_CTL;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV;
- tx_fe_clkdiv_mask = 0x0F;
- init_bit_shift = 7;
- bit_pos = ADC1_TXFE;
- break;
- case TOMTOM_A_TX_2_GAIN:
- adc_reg = TOMTOM_A_TX_1_2_TEST_CTL;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV;
- tx_fe_clkdiv_mask = 0xF0;
- init_bit_shift = 6;
- bit_pos = ADC2_TXFE;
- break;
- case TOMTOM_A_TX_3_GAIN:
- adc_reg = TOMTOM_A_TX_3_4_TEST_CTL;
- init_bit_shift = 7;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV;
- tx_fe_clkdiv_mask = 0x0F;
- bit_pos = ADC3_TXFE;
- break;
- case TOMTOM_A_TX_4_GAIN:
- adc_reg = TOMTOM_A_TX_3_4_TEST_CTL;
- init_bit_shift = 6;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV;
- tx_fe_clkdiv_mask = 0xF0;
- bit_pos = ADC4_TXFE;
- break;
- case TOMTOM_A_TX_5_GAIN:
- adc_reg = TOMTOM_A_TX_5_6_TEST_CTL;
- init_bit_shift = 7;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV;
- tx_fe_clkdiv_mask = 0x0F;
- bit_pos = ADC5_TXFE;
- break;
- case TOMTOM_A_TX_6_GAIN:
- adc_reg = TOMTOM_A_TX_5_6_TEST_CTL;
- init_bit_shift = 6;
- tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV;
- tx_fe_clkdiv_mask = 0xF0;
- bit_pos = ADC6_TXFE;
- break;
- default:
- pr_err("%s: Error, invalid adc register\n", __func__);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, tx_fe_clkdiv_reg, tx_fe_clkdiv_mask,
- 0x0);
- set_bit(bit_pos, &priv->status_mask);
- tomtom_codec_enable_adc_block(codec, 1);
- snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
- 1 << init_bit_shift);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- tomtom_codec_enable_adc_block(codec, 0);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_ext_clk_en(struct snd_soc_codec *codec,
- int enable, bool dapm)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- if (!tomtom->codec_ext_clk_en_cb) {
- dev_err(codec->dev,
- "%s: Invalid ext_clk_callback\n",
- __func__);
- return -EINVAL;
- }
-
- return tomtom->codec_ext_clk_en_cb(codec, enable, dapm);
-}
-
-static int __tomtom_mclk_enable(struct tomtom_priv *tomtom, int mclk_enable)
-{
- int ret = 0;
-
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- if (mclk_enable) {
- tomtom->ext_clk_users++;
- if (tomtom->ext_clk_users > 1)
- goto bg_clk_unlock;
- ret = clk_prepare_enable(tomtom->wcd_ext_clk);
- if (ret) {
- pr_err("%s: ext clk enable failed\n",
- __func__);
- tomtom->ext_clk_users--;
- goto bg_clk_unlock;
- }
- wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
- } else {
- tomtom->ext_clk_users--;
- if (tomtom->ext_clk_users == 0) {
- /* Put clock and BG */
- wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr,
- WCD9XXX_CLK_MCLK);
- wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- clk_disable_unprepare(tomtom->wcd_ext_clk);
- }
- }
-bg_clk_unlock:
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
- return ret;
-}
-
-int tomtom_codec_mclk_enable(struct snd_soc_codec *codec,
- int enable, bool dapm)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- if (tomtom->wcd_ext_clk) {
- dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
- __func__, enable, dapm);
- return __tomtom_mclk_enable(tomtom, enable);
- } else if (tomtom->codec_ext_clk_en_cb)
- return tomtom_codec_ext_clk_en(codec, enable, dapm);
- else {
- dev_err(codec->dev,
- "%s: Cannot turn on MCLK\n",
- __func__);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(tomtom_codec_mclk_enable);
-
-static int tomtom_codec_get_ext_clk_users(struct tomtom_priv *tomtom)
-{
- if (tomtom->wcd_ext_clk)
- return tomtom->ext_clk_users;
- else if (tomtom->codec_get_ext_clk_cnt)
- return tomtom->codec_get_ext_clk_cnt();
- else
- return 0;
-}
-
-/* tomtom_codec_internal_rco_ctrl( )
- * Make sure that BG_CLK_LOCK is not acquired. Exit if acquired to avoid
- * potential deadlock as ext_clk_en_cb() also tries to acquire the same
- * lock to enable MCLK for RCO calibration
- */
-static int tomtom_codec_internal_rco_ctrl(struct snd_soc_codec *codec,
- bool enable)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- if (enable) {
- if (wcd9xxx_resmgr_get_clk_type(&tomtom->resmgr) ==
- WCD9XXX_CLK_RCO) {
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr,
- WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- } else {
- tomtom_codec_mclk_enable(codec, true, false);
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- tomtom->resmgr.ext_clk_users =
- tomtom_codec_get_ext_clk_users(tomtom);
- wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr,
- WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- tomtom_codec_mclk_enable(codec, false, false);
- }
-
- } else {
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr,
- WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- }
-
- return ret;
-}
-
-static int tomtom_codec_enable_aux_pga(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: %d\n", __func__, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- /* AUX PGA requires RCO or MCLK */
- tomtom_codec_internal_rco_ctrl(codec, true);
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- break;
-
- case SND_SOC_DAPM_POST_PMD:
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- tomtom_codec_internal_rco_ctrl(codec, false);
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_enable_lineout(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- u16 lineout_gain_reg;
-
- pr_debug("%s %d %s\n", __func__, event, w->name);
-
- switch (w->shift) {
- case 0:
- lineout_gain_reg = TOMTOM_A_RX_LINE_1_GAIN;
- break;
- case 1:
- lineout_gain_reg = TOMTOM_A_RX_LINE_2_GAIN;
- break;
- case 2:
- lineout_gain_reg = TOMTOM_A_RX_LINE_3_GAIN;
- break;
- case 3:
- lineout_gain_reg = TOMTOM_A_RX_LINE_4_GAIN;
- break;
- default:
- pr_err("%s: Error, incorrect lineout register value\n",
- __func__);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x40);
- break;
- case SND_SOC_DAPM_POST_PMU:
- wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
- WCD9XXX_CLSH_STATE_LO,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- pr_debug("%s: sleeping 5 ms after %s PA turn on\n",
- __func__, w->name);
- /* Wait for CnP time after PA enable */
- usleep_range(5000, 5100);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00);
- pr_debug("%s: sleeping 5 ms after %s PA turn off\n",
- __func__, w->name);
- /* Wait for CnP time after PA disable */
- usleep_range(5000, 5100);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- u16 spk_drv_reg;
-
- pr_debug("%s: %d %s\n", __func__, event, w->name);
- if (strnstr(w->name, "SPK2 PA", sizeof("SPK2 PA")))
- spk_drv_reg = TOMTOM_A_SPKR_DRV2_EN;
- else
- spk_drv_reg = TOMTOM_A_SPKR_DRV1_EN;
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- tomtom->spkr_pa_widget_on = true;
- snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x80);
- break;
- case SND_SOC_DAPM_POST_PMD:
- tomtom->spkr_pa_widget_on = false;
- snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x00);
- break;
- }
- return 0;
-}
-
-static u8 tomtom_get_dmic_clk_val(struct snd_soc_codec *codec,
- u32 mclk_rate, u32 dmic_clk_rate)
-{
- u32 div_factor;
- u8 dmic_ctl_val;
-
- dev_dbg(codec->dev,
- "%s: mclk_rate = %d, dmic_sample_rate = %d\n",
- __func__, mclk_rate, dmic_clk_rate);
-
- /* Default value to return in case of error */
- if (mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2;
- else
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3;
-
- if (dmic_clk_rate == 0) {
- dev_err(codec->dev,
- "%s: dmic_sample_rate cannot be 0\n",
- __func__);
- goto done;
- }
-
- div_factor = mclk_rate / dmic_clk_rate;
- switch (div_factor) {
- case 2:
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2;
- break;
- case 3:
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3;
- break;
- case 4:
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_4;
- break;
- case 6:
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_6;
- break;
- case 16:
- dmic_ctl_val = WCD9330_DMIC_CLK_DIV_16;
- break;
- default:
- dev_err(codec->dev,
- "%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n",
- __func__, div_factor, mclk_rate, dmic_clk_rate);
- break;
- }
-
-done:
- return dmic_ctl_val;
-}
-
-static int tomtom_codec_enable_dmic(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
- u8 dmic_clk_en;
- u16 dmic_clk_reg;
- s32 *dmic_clk_cnt;
- u8 dmic_rate_val, dmic_rate_shift;
- unsigned int dmic;
- int ret;
- char *wname;
-
- wname = strpbrk(w->name, "123456");
- if (!wname) {
- dev_err(codec->dev, "%s: widget not found\n", __func__);
- return -EINVAL;
- }
-
- ret = kstrtouint(wname, 10, &dmic);
- if (ret < 0) {
- pr_err("%s: Invalid DMIC line on the codec\n", __func__);
- return -EINVAL;
- }
-
- switch (dmic) {
- case 1:
- case 2:
- dmic_clk_en = 0x01;
- dmic_clk_cnt = &(tomtom->dmic_1_2_clk_cnt);
- dmic_clk_reg = TOMTOM_A_DMIC_B1_CTL;
- dmic_rate_shift = 5;
- pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
- __func__, event, dmic, *dmic_clk_cnt);
-
- break;
-
- case 3:
- case 4:
- dmic_clk_en = 0x02;
- dmic_clk_cnt = &(tomtom->dmic_3_4_clk_cnt);
- dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL;
- dmic_rate_shift = 1;
- pr_debug("%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n",
- __func__, event, dmic, *dmic_clk_cnt);
- break;
-
- case 5:
- case 6:
- dmic_clk_en = 0x04;
- dmic_clk_cnt = &(tomtom->dmic_5_6_clk_cnt);
- dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL;
- dmic_rate_shift = 4;
- pr_debug("%s() event %d DMIC%d dmic_5_6_clk_cnt %d\n",
- __func__, event, dmic, *dmic_clk_cnt);
-
- break;
-
- default:
- pr_err("%s: Invalid DMIC Selection\n", __func__);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
-
- dmic_rate_val =
- tomtom_get_dmic_clk_val(codec,
- pdata->mclk_rate,
- pdata->dmic_sample_rate);
-
- (*dmic_clk_cnt)++;
- if (*dmic_clk_cnt == 1) {
- snd_soc_update_bits(codec, dmic_clk_reg,
- 0x07 << dmic_rate_shift,
- dmic_rate_val << dmic_rate_shift);
- snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
- dmic_clk_en, dmic_clk_en);
- }
-
- break;
- case SND_SOC_DAPM_POST_PMD:
-
- dmic_rate_val =
- tomtom_get_dmic_clk_val(codec,
- pdata->mclk_rate,
- pdata->mad_dmic_sample_rate);
- (*dmic_clk_cnt)--;
- if (*dmic_clk_cnt == 0) {
- snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
- dmic_clk_en, 0);
- snd_soc_update_bits(codec, dmic_clk_reg,
- 0x07 << dmic_rate_shift,
- dmic_rate_val << dmic_rate_shift);
- }
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_config_mad(struct snd_soc_codec *codec)
-{
- int ret = 0;
- const struct firmware *fw;
- struct firmware_cal *hwdep_cal = NULL;
- struct mad_audio_cal *mad_cal;
- const void *data;
- const char *filename = TOMTOM_MAD_AUDIO_FIRMWARE_PATH;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- size_t cal_size;
- int idx;
-
- pr_debug("%s: enter\n", __func__);
-
- if (!tomtom->fw_data) {
- dev_err(codec->dev, "%s: invalid cal data\n",
- __func__);
- return -ENODEV;
- }
-
- hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_MAD_CAL);
- if (hwdep_cal) {
- data = hwdep_cal->data;
- cal_size = hwdep_cal->size;
- dev_dbg(codec->dev, "%s: using hwdep calibration\n",
- __func__);
- } else {
- ret = request_firmware(&fw, filename, codec->dev);
- if (ret != 0) {
- pr_err("Failed to acquire MAD firwmare data %s: %d\n",
- filename, ret);
- return -ENODEV;
- }
- if (!fw) {
- dev_err(codec->dev, "failed to get mad fw");
- return -ENODEV;
- }
- data = fw->data;
- cal_size = fw->size;
- dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
- __func__);
- }
- if (cal_size < sizeof(struct mad_audio_cal)) {
- pr_err("%s: incorrect hwdep cal size %zu\n",
- __func__, cal_size);
- ret = -ENOMEM;
- goto err;
- }
-
- mad_cal = (struct mad_audio_cal *)(data);
- if (!mad_cal) {
- dev_err(codec->dev, "%s: Invalid calibration data\n",
- __func__);
- ret = -EINVAL;
- goto err;
- }
-
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_2,
- mad_cal->microphone_info.cycle_time);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0xFF << 3,
- ((uint16_t)mad_cal->microphone_info.settle_time)
- << 3);
-
- /* Audio */
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_8,
- mad_cal->audio_info.rms_omit_samples);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_1,
- 0x07 << 4, mad_cal->audio_info.rms_comp_time << 4);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03 << 2,
- mad_cal->audio_info.detection_mechanism << 2);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_7,
- mad_cal->audio_info.rms_diff_threshold & 0x3F);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_5,
- mad_cal->audio_info.rms_threshold_lsb);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_6,
- mad_cal->audio_info.rms_threshold_msb);
-
- for (idx = 0; idx < ARRAY_SIZE(mad_cal->audio_info.iir_coefficients);
- idx++) {
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR,
- 0x3F, idx);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL,
- mad_cal->audio_info.iir_coefficients[idx]);
- dev_dbg(codec->dev, "%s:MAD Audio IIR Coef[%d] = 0X%x",
- __func__, idx,
- mad_cal->audio_info.iir_coefficients[idx]);
- }
-
- /* Beacon */
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_8,
- mad_cal->beacon_info.rms_omit_samples);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1,
- 0x07 << 4, mad_cal->beacon_info.rms_comp_time);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_2, 0x03 << 2,
- mad_cal->beacon_info.detection_mechanism << 2);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_7,
- mad_cal->beacon_info.rms_diff_threshold & 0x1F);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_5,
- mad_cal->beacon_info.rms_threshold_lsb);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_6,
- mad_cal->beacon_info.rms_threshold_msb);
-
- /* Ultrasound */
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1,
- 0x07 << 4, mad_cal->beacon_info.rms_comp_time);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_2, 0x03 << 2,
- mad_cal->ultrasound_info.detection_mechanism);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_7,
- mad_cal->ultrasound_info.rms_diff_threshold & 0x1F);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_5,
- mad_cal->ultrasound_info.rms_threshold_lsb);
- snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_6,
- mad_cal->ultrasound_info.rms_threshold_msb);
-
- /* Set MAD intr time to 20 msec */
- snd_soc_update_bits(codec, 0x4E, 0x01F, 0x13);
-
- pr_debug("%s: leave ret %d\n", __func__, ret);
-err:
- if (!hwdep_cal)
- release_firmware(fw);
- return ret;
-}
-
-static int tomtom_codec_enable_mad(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
- u8 mad_micb, mad_cfilt;
- u16 mad_cfilt_reg;
-
- mad_micb = snd_soc_read(codec, TOMTOM_A_MAD_ANA_CTRL) & 0x07;
- switch (mad_micb) {
- case 1:
- mad_cfilt = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel;
- break;
- case 2:
- mad_cfilt = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel;
- break;
- case 3:
- mad_cfilt = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel;
- break;
- case 4:
- mad_cfilt = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel;
- break;
- default:
- dev_err(codec->dev,
- "%s: Invalid micbias selection 0x%x\n",
- __func__, mad_micb);
- return -EINVAL;
- }
-
- switch (mad_cfilt) {
- case WCD9XXX_CFILT1_SEL:
- mad_cfilt_reg = TOMTOM_A_MICB_CFILT_1_VAL;
- break;
- case WCD9XXX_CFILT2_SEL:
- mad_cfilt_reg = TOMTOM_A_MICB_CFILT_2_VAL;
- break;
- case WCD9XXX_CFILT3_SEL:
- mad_cfilt_reg = TOMTOM_A_MICB_CFILT_3_VAL;
- break;
- default:
- dev_err(codec->dev,
- "%s: invalid cfilt 0x%x for micb 0x%x\n",
- __func__, mad_cfilt, mad_micb);
- return -EINVAL;
- }
-
- dev_dbg(codec->dev,
- "%s event = %d, mad_cfilt_reg = 0x%x\n",
- __func__, event, mad_cfilt_reg);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- /* Undo reset for MAD */
- snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL,
- 0x02, 0x00);
-
- ret = tomtom_codec_config_mad(codec);
- if (ret) {
- pr_err("%s: Failed to config MAD\n", __func__);
- break;
- }
-
- /* setup MAD micbias to VDDIO */
- snd_soc_update_bits(codec, mad_cfilt_reg,
- 0x02, 0x02);
- break;
- case SND_SOC_DAPM_POST_PMD:
- /* Reset the MAD block */
- snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL,
- 0x02, 0x02);
-
- /* Undo setup of MAD micbias to VDDIO */
- snd_soc_update_bits(codec, mad_cfilt_reg,
- 0x02, 0x00);
- }
- return ret;
-}
-
-static int tomtom_codec_enable_micbias(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- u16 micb_int_reg = 0, micb_ctl_reg = 0;
- u8 cfilt_sel_val = 0;
- char *internal1_text = "Internal1";
- char *internal2_text = "Internal2";
- char *internal3_text = "Internal3";
- enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
-
- pr_debug("%s: w->name %s event %d\n", __func__, w->name, event);
- if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1"))) {
- micb_ctl_reg = TOMTOM_A_MICB_1_CTL;
- micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS;
- cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel;
- e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON;
- e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON;
- e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
- } else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2"))) {
- micb_ctl_reg = TOMTOM_A_MICB_2_CTL;
- micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS;
- cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel;
- e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_2_ON;
- e_post_on = WCD9XXX_EVENT_POST_MICBIAS_2_ON;
- e_post_off = WCD9XXX_EVENT_POST_MICBIAS_2_OFF;
- } else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3"))) {
- micb_ctl_reg = TOMTOM_A_MICB_3_CTL;
- micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS;
- cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel;
- e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_3_ON;
- e_post_on = WCD9XXX_EVENT_POST_MICBIAS_3_ON;
- e_post_off = WCD9XXX_EVENT_POST_MICBIAS_3_OFF;
- } else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4"))) {
- micb_ctl_reg = TOMTOM_A_MICB_4_CTL;
- micb_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
- cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel;
- e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_4_ON;
- e_post_on = WCD9XXX_EVENT_POST_MICBIAS_4_ON;
- e_post_off = WCD9XXX_EVENT_POST_MICBIAS_4_OFF;
- } else {
- pr_err("%s: Error, invalid micbias %s\n", __func__, w->name);
- return -EINVAL;
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- /* Let MBHC module know so micbias switch to be off */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on);
-
- /* Get cfilt */
- wcd9xxx_resmgr_cfilt_get(&tomtom->resmgr, cfilt_sel_val);
-
- if (strnstr(w->name, internal1_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0xE0, 0xE0);
- else if (strnstr(w->name, internal2_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0x1C, 0x1C);
- else if (strnstr(w->name, internal3_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0x3, 0x3);
- else
- /*
- * If not internal, make sure to write the
- * register to default value
- */
- snd_soc_write(codec, micb_int_reg, 0x24);
- if (tomtom->mbhc_started && micb_ctl_reg ==
- TOMTOM_A_MICB_2_CTL) {
- if (++tomtom->micb_2_users == 1) {
- if (tomtom->resmgr.pdata->
- micbias.bias2_is_headset_only)
- wcd9xxx_resmgr_add_cond_update_bits(
- &tomtom->resmgr,
- WCD9XXX_COND_HPH_MIC,
- micb_ctl_reg, w->shift,
- false);
- else
- snd_soc_update_bits(codec, micb_ctl_reg,
- 1 << w->shift,
- 1 << w->shift);
- }
- pr_debug("%s: micb_2_users %d\n", __func__,
- tomtom->micb_2_users);
- } else {
- snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
- 1 << w->shift);
- }
- break;
- case SND_SOC_DAPM_POST_PMU:
- usleep_range(5000, 5100);
- /* Let MBHC module know so micbias is on */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_on);
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (tomtom->mbhc_started && micb_ctl_reg ==
- TOMTOM_A_MICB_2_CTL) {
- if (--tomtom->micb_2_users == 0) {
- if (tomtom->resmgr.pdata->
- micbias.bias2_is_headset_only)
- wcd9xxx_resmgr_rm_cond_update_bits(
- &tomtom->resmgr,
- WCD9XXX_COND_HPH_MIC,
- micb_ctl_reg, 7, false);
- else
- snd_soc_update_bits(codec, micb_ctl_reg,
- 1 << w->shift, 0);
- }
- pr_debug("%s: micb_2_users %d\n", __func__,
- tomtom->micb_2_users);
- WARN(tomtom->micb_2_users < 0,
- "Unexpected micbias users %d\n",
- tomtom->micb_2_users);
- } else {
- snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift,
- 0);
- }
-
- /* Let MBHC module know so micbias switch to be off */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off);
-
- if (strnstr(w->name, internal1_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x00);
- else if (strnstr(w->name, internal2_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x00);
- else if (strnstr(w->name, internal3_text, 30))
- snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
-
- /* Put cfilt */
- wcd9xxx_resmgr_cfilt_put(&tomtom->resmgr, cfilt_sel_val);
- break;
- }
-
- return 0;
-}
-
-/* called under codec_resource_lock acquisition */
-static int tomtom_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable,
- enum wcd9xxx_micbias_num micb_num)
-{
- int rc;
-
- if (micb_num != MBHC_MICBIAS2) {
- dev_err(codec->dev, "%s: Unsupported micbias, micb_num=%d\n",
- __func__, micb_num);
- return -EINVAL;
- }
-
- if (enable)
- rc = snd_soc_dapm_force_enable_pin(
- snd_soc_codec_get_dapm(codec),
- DAPM_MICBIAS2_EXTERNAL_STANDALONE);
- else
- rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec),
- DAPM_MICBIAS2_EXTERNAL_STANDALONE);
- if (!rc)
- snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec));
- pr_debug("%s: leave ret %d\n", __func__, rc);
- return rc;
-}
-
-static void txfe_clkdiv_update(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- if (test_bit(ADC1_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV,
- 0x0F, 0x05);
- clear_bit(ADC1_TXFE, &priv->status_mask);
- }
- if (test_bit(ADC2_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV,
- 0xF0, 0x50);
- clear_bit(ADC2_TXFE, &priv->status_mask);
- }
- if (test_bit(ADC3_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV,
- 0x0F, 0x05);
- clear_bit(ADC3_TXFE, &priv->status_mask);
- }
- if (test_bit(ADC4_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV,
- 0xF0, 0x50);
- clear_bit(ADC4_TXFE, &priv->status_mask);
- }
- if (test_bit(ADC5_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV,
- 0x0F, 0x05);
- clear_bit(ADC5_TXFE, &priv->status_mask);
- }
- if (test_bit(ADC6_TXFE, &priv->status_mask)) {
- snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV,
- 0xF0, 0x50);
- clear_bit(ADC6_TXFE, &priv->status_mask);
- }
-}
-
-static void tx_hpf_corner_freq_callback(struct work_struct *work)
-{
- struct delayed_work *hpf_delayed_work;
- struct hpf_work *hpf_work;
- struct tomtom_priv *tomtom;
- struct snd_soc_codec *codec;
- u16 tx_mux_ctl_reg;
- u8 hpf_cut_of_freq;
-
- hpf_delayed_work = to_delayed_work(work);
- hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork);
- tomtom = hpf_work->tomtom;
- codec = hpf_work->tomtom->codec;
- hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq;
-
- tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL +
- (hpf_work->decimator - 1) * 8;
-
- pr_debug("%s(): decimator %u hpf_cut_of_freq 0x%x\n", __func__,
- hpf_work->decimator, (unsigned int)hpf_cut_of_freq);
-
- /*
- * Restore TXFE ClkDiv registers to default.
- * If any of these registers are modified during analog
- * front-end enablement, they will be restored back to the
- * default
- */
- txfe_clkdiv_update(codec);
-
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4);
-}
-
-#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30
-#define CF_MIN_3DB_4HZ 0x0
-#define CF_MIN_3DB_75HZ 0x1
-#define CF_MIN_3DB_150HZ 0x2
-
-static int tomtom_codec_enable_dec(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int decimator;
- char *dec_name = NULL;
- char *widget_name = NULL;
- char *temp;
- int ret = 0;
- u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
- u8 dec_hpf_cut_of_freq;
- int offset;
- char *dec;
-
- pr_debug("%s %d\n", __func__, event);
-
- widget_name = kstrndup(w->name, 15, GFP_KERNEL);
- if (!widget_name)
- return -ENOMEM;
- temp = widget_name;
-
- dec_name = strsep(&widget_name, " ");
- widget_name = temp;
- if (!dec_name) {
- pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
- ret = -EINVAL;
- goto out;
- }
-
- dec = strpbrk(dec_name, "123456789");
- if (!dec) {
- dev_err(codec->dev, "%s: decimator index not found\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
-
- ret = kstrtouint(dec, 10, &decimator);
- if (ret < 0) {
- pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
- ret = -EINVAL;
- goto out;
- }
-
- pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
- w->name, dec_name, decimator);
-
- if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
- dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL;
- offset = 0;
- } else if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL) {
- dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL;
- offset = 8;
- } else {
- pr_err("%s: Error, incorrect dec\n", __func__);
- return -EINVAL;
- }
-
- tx_vol_ctl_reg = TOMTOM_A_CDC_TX1_VOL_CTL_CFG + 8 * (decimator - 1);
- tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
-
- /* Enableable TX digital mute */
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
-
- snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
- 1 << w->shift);
- snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
-
- pr_debug("%s: decimator = %u, bypass = %d\n", __func__,
- decimator, tx_hpf_work[decimator - 1].tx_hpf_bypass);
- if (tx_hpf_work[decimator - 1].tx_hpf_bypass != true) {
- dec_hpf_cut_of_freq = snd_soc_read(codec,
- tx_mux_ctl_reg);
-
- dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
-
- tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq =
- dec_hpf_cut_of_freq;
-
- if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) {
-
- /* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
- CF_MIN_3DB_150HZ << 4);
- }
-
- /* enable HPF */
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
- } else
- /* bypass HPF */
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
-
- break;
-
- case SND_SOC_DAPM_POST_PMU:
-
- /* Disable TX digital mute */
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
-
- if ((tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
- CF_MIN_3DB_150HZ) &&
- (tx_hpf_work[decimator - 1].tx_hpf_bypass != true)) {
-
- schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork,
- msecs_to_jiffies(300));
- }
- /* apply the digital gain after the decimator is enabled*/
- if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg))
- snd_soc_write(codec,
- tx_digital_gain_reg[w->shift + offset],
- snd_soc_read(codec,
- tx_digital_gain_reg[w->shift + offset])
- );
-
- break;
-
- case SND_SOC_DAPM_PRE_PMD:
-
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
- cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
- break;
-
- case SND_SOC_DAPM_POST_PMD:
-
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
- snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
- (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4);
-
- break;
- }
-out:
- kfree(widget_name);
- return ret;
-}
-
-static int tomtom_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- int ret = 0;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: %d %s\n", __func__, event, w->name);
-
- WARN_ONCE(!priv->spkdrv_reg, "SPKDRV supply %s isn't defined\n",
- WCD9XXX_VDD_SPKDRV_NAME);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (priv->spkdrv_reg) {
- ret = regulator_enable(priv->spkdrv_reg);
- if (ret)
- pr_err("%s: Failed to enable spkdrv_reg %s\n",
- __func__, WCD9XXX_VDD_SPKDRV_NAME);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (priv->spkdrv_reg) {
- ret = regulator_disable(priv->spkdrv_reg);
- if (ret)
- pr_err("%s: Failed to disable spkdrv_reg %s\n",
- __func__, WCD9XXX_VDD_SPKDRV_NAME);
- }
- break;
- }
-
- return ret;
-}
-
-static int tomtom_codec_enable_vdd_spkr2(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- int ret = 0;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: %d %s\n", __func__, event, w->name);
-
- /*
- * If on-demand voltage regulators of spkr1 and spkr2 has been derived
- * from same power rail then same on-demand voltage regulator can be
- * used by both spkr1 and spkr2, if a separate device tree entry has
- * not been defined for on-demand voltage regulator for spkr2.
- */
- if (!priv->spkdrv2_reg) {
- if (priv->spkdrv_reg) {
- priv->spkdrv2_reg = priv->spkdrv_reg;
- } else {
- WARN_ONCE(!priv->spkdrv2_reg,
- "SPKDRV2 supply %s isn't defined\n",
- WCD9XXX_VDD_SPKDRV2_NAME);
- return 0;
- }
- }
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (priv->spkdrv2_reg) {
- ret = regulator_enable(priv->spkdrv2_reg);
- if (ret)
- pr_err("%s: Failed to enable spkdrv2_reg %s ret:%d\n",
- __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (priv->spkdrv2_reg) {
- ret = regulator_disable(priv->spkdrv2_reg);
- if (ret)
- pr_err("%s: Failed to disable spkdrv2_reg %s ret:%d\n",
- __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret);
- }
- break;
- }
-
- return ret;
-}
-
-static int tomtom_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- pr_debug("%s %d %s\n", __func__, event, w->name);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL,
- 1 << w->shift, 1 << w->shift);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL,
- 1 << w->shift, 0x0);
- break;
- case SND_SOC_DAPM_POST_PMU:
- /* apply the digital gain after the interpolator is enabled*/
- if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
- snd_soc_write(codec,
- rx_digital_gain_reg[w->shift],
- snd_soc_read(codec,
- rx_digital_gain_reg[w->shift])
- );
- /* Check for Rx1 and Rx2 paths for uhqa mode update */
- if (w->shift == 0 || w->shift == 1)
- tomtom_update_uhqa_mode(codec, (1 << w->shift));
-
- break;
- }
- return 0;
-}
-
-/* called under codec_resource_lock acquisition */
-static int __tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enter\n", __func__);
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- /*
- * ldo_h_users is protected by tomtom->codec_mutex, don't need
- * additional mutex
- */
- if (++priv->ldo_h_users == 1) {
- WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
- wcd9xxx_resmgr_get_bandgap(&priv->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
- tomtom_codec_internal_rco_ctrl(codec, true);
- snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1,
- 1 << 7, 1 << 7);
- tomtom_codec_internal_rco_ctrl(codec, false);
- pr_debug("%s: ldo_h_users %d\n", __func__,
- priv->ldo_h_users);
- /* LDO enable requires 1ms to settle down */
- usleep_range(1000, 1100);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (--priv->ldo_h_users == 0) {
- tomtom_codec_internal_rco_ctrl(codec, true);
- snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1,
- 1 << 7, 0);
- tomtom_codec_internal_rco_ctrl(codec, false);
- WCD9XXX_BG_CLK_LOCK(&priv->resmgr);
- wcd9xxx_resmgr_put_bandgap(&priv->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr);
- pr_debug("%s: ldo_h_users %d\n", __func__,
- priv->ldo_h_users);
- }
- WARN(priv->ldo_h_users < 0, "Unexpected ldo_h users %d\n",
- priv->ldo_h_users);
- break;
- }
- pr_debug("%s: leave\n", __func__);
- return 0;
-}
-
-static int tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- int rc;
-
- rc = __tomtom_codec_enable_ldo_h(w, kcontrol, event);
- return rc;
-}
-
-static int tomtom_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %d\n", __func__, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1);
- break;
- case SND_SOC_DAPM_POST_PMD:
- wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_enable_anc(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- const char *filename;
- const struct firmware *fw;
- int i;
- int ret = 0;
- int num_anc_slots;
- struct wcd9xxx_anc_header *anc_head;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct firmware_cal *hwdep_cal = NULL;
- u32 anc_writes_size = 0;
- u32 anc_cal_size = 0;
- int anc_size_remaining;
- u32 *anc_ptr;
- u16 reg;
- u8 mask, val, old_val;
- size_t cal_size;
- const void *data;
-
- if (tomtom->anc_func == 0)
- return 0;
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- filename = "wcd9320/wcd9320_anc.bin";
-
- hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_ANC_CAL);
- if (hwdep_cal) {
- data = hwdep_cal->data;
- cal_size = hwdep_cal->size;
- dev_dbg(codec->dev, "%s: using hwdep calibration\n",
- __func__);
- } else {
- ret = request_firmware(&fw, filename, codec->dev);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to acquire ANC data: %d\n",
- ret);
- return -ENODEV;
- }
- if (!fw) {
- dev_err(codec->dev, "failed to get anc fw");
- return -ENODEV;
- }
- data = fw->data;
- cal_size = fw->size;
- dev_dbg(codec->dev, "%s: using request_firmware calibration\n",
- __func__);
- }
- if (cal_size < sizeof(struct wcd9xxx_anc_header)) {
- dev_err(codec->dev, "Not enough data\n");
- ret = -ENOMEM;
- goto err;
- }
- /* First number is the number of register writes */
- anc_head = (struct wcd9xxx_anc_header *)(data);
- anc_ptr = (u32 *)(data +
- sizeof(struct wcd9xxx_anc_header));
- anc_size_remaining = cal_size -
- sizeof(struct wcd9xxx_anc_header);
- num_anc_slots = anc_head->num_anc_slots;
-
- if (tomtom->anc_slot >= num_anc_slots) {
- dev_err(codec->dev, "Invalid ANC slot selected\n");
- ret = -EINVAL;
- goto err;
- }
- for (i = 0; i < num_anc_slots; i++) {
- if (anc_size_remaining < TOMTOM_PACKED_REG_SIZE) {
- dev_err(codec->dev, "Invalid register format\n");
- ret = -EINVAL;
- goto err;
- }
- anc_writes_size = (u32)(*anc_ptr);
- anc_size_remaining -= sizeof(u32);
- anc_ptr += 1;
-
- if (anc_writes_size * TOMTOM_PACKED_REG_SIZE
- > anc_size_remaining) {
- dev_err(codec->dev, "Invalid register format\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (tomtom->anc_slot == i)
- break;
-
- anc_size_remaining -= (anc_writes_size *
- TOMTOM_PACKED_REG_SIZE);
- anc_ptr += anc_writes_size;
- }
- if (i == num_anc_slots) {
- dev_err(codec->dev, "Selected ANC slot not present\n");
- ret = -EINVAL;
- goto err;
- }
-
- i = 0;
- anc_cal_size = anc_writes_size;
- if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) {
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x03);
- anc_writes_size = (anc_cal_size/2);
- }
-
- if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL) {
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x0C);
- i = (anc_cal_size/2);
- anc_writes_size = anc_cal_size;
- }
-
- for (; i < anc_writes_size; i++) {
- TOMTOM_CODEC_UNPACK_ENTRY(anc_ptr[i], reg,
- mask, val);
- /*
- * ANC Soft reset register is ignored from ACDB
- * because ANC left soft reset bits will be called
- * while enabling ANC HPH Right DAC.
- */
- if ((reg == TOMTOM_A_CDC_CLK_ANC_RESET_CTL) &&
- ((w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) ||
- (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL))) {
- continue;
- }
- old_val = snd_soc_read(codec, reg);
- snd_soc_write(codec, reg, (old_val & ~mask) |
- (val & mask));
- }
- if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL)
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x00);
-
- if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL)
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x00);
- if (!hwdep_cal)
- release_firmware(fw);
- txfe_clkdiv_update(codec);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- msleep(40);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B1_CTL, 0x01,
- 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B1_CTL, 0x02,
- 0x00);
- msleep(20);
- snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0F);
- snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL, 0);
- snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x00);
- break;
- }
- return 0;
-err:
- if (!hwdep_cal)
- release_firmware(fw);
- return ret;
-}
-
-static int tomtom_hphl_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- uint32_t impedl, impedr;
- int ret = 0;
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (tomtom_p->anc_func) {
- tomtom_codec_enable_anc(w, kcontrol, event);
- msleep(50);
- }
-
- if (!high_perf_mode && !tomtom_p->uhqa_mode) {
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_HPHL,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_PRE_DAC);
- } else {
- wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
- tomtom_p->uhqa_mode,
- WCD9XXX_CLSAB_STATE_HPHL,
- WCD9XXX_CLSAB_REQ_ENABLE);
- }
- ret = wcd9xxx_mbhc_get_impedance(&tomtom_p->mbhc,
- &impedl, &impedr);
- if (!ret)
- wcd9xxx_clsh_imped_config(codec, impedl);
- else
- dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
- __func__, ret);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x94);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x10);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (!high_perf_mode && !tomtom_p->uhqa_mode) {
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_HPHL,
- WCD9XXX_CLSH_REQ_DISABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- } else {
- wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
- tomtom_p->uhqa_mode,
- WCD9XXX_CLSAB_STATE_HPHL,
- WCD9XXX_CLSAB_REQ_DISABLE);
- }
- break;
- }
- return 0;
-}
-
-static int tomtom_hphr_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (tomtom_p->anc_func) {
- tomtom_codec_enable_anc(w, kcontrol, event);
- msleep(50);
- }
-
- snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
- if (!high_perf_mode && !tomtom_p->uhqa_mode) {
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_HPHR,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_PRE_DAC);
- } else {
- wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
- tomtom_p->uhqa_mode,
- WCD9XXX_CLSAB_STATE_HPHR,
- WCD9XXX_CLSAB_REQ_ENABLE);
- }
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x94);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x10);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x00);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
- if (!high_perf_mode && !tomtom_p->uhqa_mode) {
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_HPHR,
- WCD9XXX_CLSH_REQ_DISABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- } else {
- wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d,
- tomtom_p->uhqa_mode,
- WCD9XXX_CLSAB_STATE_HPHR,
- WCD9XXX_CLSAB_REQ_DISABLE);
- }
- break;
- }
- return 0;
-}
-
-static int tomtom_hph_pa_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- enum wcd9xxx_notify_event e_pre_on, e_post_off;
- u8 req_clsh_state;
- u32 pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_OFF;
-
- pr_debug("%s: %s event = %d\n", __func__, w->name, event);
- if (w->shift == 5) {
- e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
- e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
- req_clsh_state = WCD9XXX_CLSH_STATE_HPHL;
- } else if (w->shift == 4) {
- e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
- e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
- req_clsh_state = WCD9XXX_CLSH_STATE_HPHR;
- } else {
- pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
- return -EINVAL;
- }
-
- if (tomtom->comp_enabled[COMPANDER_1])
- pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_ON;
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- set_bit(HPH_DELAY, &tomtom->status_mask);
- /* Let MBHC module know PA is turning on */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on);
- break;
-
- case SND_SOC_DAPM_POST_PMU:
- if (test_bit(HPH_DELAY, &tomtom->status_mask)) {
- /*
- * Make sure to wait 10ms after enabling HPHR_HPHL
- * in register 0x1AB
- */
- usleep_range(pa_settle_time, pa_settle_time + 1000);
- clear_bit(HPH_DELAY, &tomtom->status_mask);
- pr_debug("%s: sleep %d us after %s PA enable\n",
- __func__, pa_settle_time, w->name);
- }
- if (!high_perf_mode && !tomtom->uhqa_mode) {
- wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
- req_clsh_state,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- }
- break;
-
- case SND_SOC_DAPM_PRE_PMD:
- set_bit(HPH_DELAY, &tomtom->status_mask);
- break;
-
- case SND_SOC_DAPM_POST_PMD:
- /* Let MBHC module know PA turned off */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off);
- if (test_bit(HPH_DELAY, &tomtom->status_mask)) {
- /*
- * Make sure to wait 10ms after disabling HPHR_HPHL
- * in register 0x1AB
- */
- usleep_range(pa_settle_time, pa_settle_time + 1000);
- clear_bit(HPH_DELAY, &tomtom->status_mask);
- pr_debug("%s: sleep %d us after %s PA disable\n",
- __func__, pa_settle_time, w->name);
- }
-
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_enable_anc_hph(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- int ret = 0;
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- ret = tomtom_hph_pa_event(w, kcontrol, event);
- break;
- case SND_SOC_DAPM_POST_PMU:
- if ((snd_soc_read(codec, TOMTOM_A_RX_HPH_L_DAC_CTL) & 0x80) &&
- (snd_soc_read(codec, TOMTOM_A_RX_HPH_R_DAC_CTL)
- & 0x80)) {
- snd_soc_update_bits(codec,
- TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x30);
- msleep(30);
- }
- ret = tomtom_hph_pa_event(w, kcontrol, event);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- if (w->shift == 5) {
- snd_soc_update_bits(codec,
- TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x00);
- msleep(40);
- snd_soc_update_bits(codec,
- TOMTOM_A_TX_7_MBHC_EN, 0x80, 00);
- ret |= tomtom_codec_enable_anc(w, kcontrol, event);
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = tomtom_hph_pa_event(w, kcontrol, event);
- break;
- }
- return ret;
-}
-
-static const struct snd_soc_dapm_widget tomtom_dapm_i2s_widgets[] = {
- SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", TOMTOM_A_CDC_CLK_RX_I2S_CTL,
- 4, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", TOMTOM_A_CDC_CLK_TX_I2S_CTL, 4,
- 0, NULL, 0),
-};
-
-static int tomtom_lineout_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
- WCD9XXX_CLSH_STATE_LO,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_PRE_DAC);
- snd_soc_update_bits(codec, w->reg, 0x40, 0x40);
- break;
-
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(codec, w->reg, 0x40, 0x00);
- wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d,
- WCD9XXX_CLSH_STATE_LO,
- WCD9XXX_CLSH_REQ_DISABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- break;
- }
- return 0;
-}
-
-static int tomtom_spk_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x80, 0x80);
- break;
- case SND_SOC_DAPM_POST_PMD:
- if ((snd_soc_read(codec, w->reg) & 0x03) == 0)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x80, 0x00);
- break;
- }
- return 0;
-}
-
-static const struct snd_soc_dapm_route audio_i2s_map[] = {
- {"SLIM RX1", NULL, "RX_I2S_CLK"},
- {"SLIM RX2", NULL, "RX_I2S_CLK"},
- {"SLIM RX3", NULL, "RX_I2S_CLK"},
- {"SLIM RX4", NULL, "RX_I2S_CLK"},
-
- {"SLIM TX7 MUX", NULL, "TX_I2S_CLK"},
- {"SLIM TX8 MUX", NULL, "TX_I2S_CLK"},
- {"SLIM TX9 MUX", NULL, "TX_I2S_CLK"},
- {"SLIM TX10 MUX", NULL, "TX_I2S_CLK"},
-
- {"RX_I2S_CLK", NULL, "CDC_I2S_RX_CONN"},
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
- /* SLIMBUS Connections */
- {"AIF1 CAP", NULL, "AIF1_CAP Mixer"},
- {"AIF2 CAP", NULL, "AIF2_CAP Mixer"},
- {"AIF3 CAP", NULL, "AIF3_CAP Mixer"},
-
- /* VI Feedback */
- {"AIF4 VI", NULL, "VIONOFF"},
- {"VIONOFF", "Switch", "VIINPUT"},
-
- /* MAD */
- {"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"},
- {"MAD_SEL MUX", "MSM", "MADINPUT"},
- {"MADONOFF", "Switch", "MAD_SEL MUX"},
- {"AIF4 MAD", NULL, "MADONOFF"},
-
- /* SLIM_MIXER("AIF1_CAP Mixer"),*/
- {"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
- {"AIF1_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
- /* SLIM_MIXER("AIF2_CAP Mixer"),*/
- {"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
- {"AIF2_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
- /* SLIM_MIXER("AIF3_CAP Mixer"),*/
- {"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"},
- {"AIF3_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"},
-
- {"SLIM TX1 MUX", "DEC1", "DEC1 MUX"},
- {"SLIM TX1 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX1 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX1 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX1 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX1 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX1 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX1 MUX", "RMIX7", "RX7 MIX1"},
- {"SLIM TX1 MUX", "RMIX8", "RX8 MIX1"},
-
- {"SLIM TX2 MUX", "DEC2", "DEC2 MUX"},
- {"SLIM TX2 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX2 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX2 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX2 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX2 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX2 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX2 MUX", "RMIX7", "RX7 MIX1"},
- {"SLIM TX2 MUX", "RMIX8", "RX8 MIX1"},
-
- {"SLIM TX3 MUX", "DEC3", "DEC3 MUX"},
- {"SLIM TX3 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX3 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX3 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX3 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX3 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX3 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX3 MUX", "RMIX7", "RX7 MIX1"},
- {"SLIM TX3 MUX", "RMIX8", "RX8 MIX1"},
-
- {"SLIM TX4 MUX", "DEC4", "DEC4 MUX"},
- {"SLIM TX4 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX4 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX4 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX4 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX4 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX4 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX4 MUX", "RMIX7", "RX7 MIX1"},
- {"SLIM TX4 MUX", "RMIX8", "RX8 MIX1"},
-
- {"SLIM TX5 MUX", "DEC5", "DEC5 MUX"},
- {"SLIM TX5 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX5 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX5 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX5 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX5 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX5 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX5 MUX", "RMIX7", "RX7 MIX1"},
- {"SLIM TX5 MUX", "RMIX8", "RX8 MIX1"},
-
- {"SLIM TX6 MUX", "DEC6", "DEC6 MUX"},
-
- {"SLIM TX7 MUX", "DEC1", "DEC1 MUX"},
- {"SLIM TX7 MUX", "DEC2", "DEC2 MUX"},
- {"SLIM TX7 MUX", "DEC3", "DEC3 MUX"},
- {"SLIM TX7 MUX", "DEC4", "DEC4 MUX"},
- {"SLIM TX7 MUX", "DEC5", "DEC5 MUX"},
- {"SLIM TX7 MUX", "DEC6", "DEC6 MUX"},
- {"SLIM TX7 MUX", "DEC7", "DEC7 MUX"},
- {"SLIM TX7 MUX", "DEC8", "DEC8 MUX"},
- {"SLIM TX7 MUX", "DEC9", "DEC9 MUX"},
- {"SLIM TX7 MUX", "DEC10", "DEC10 MUX"},
- {"SLIM TX7 MUX", "RMIX1", "RX1 MIX1"},
- {"SLIM TX7 MUX", "RMIX2", "RX2 MIX1"},
- {"SLIM TX7 MUX", "RMIX3", "RX3 MIX1"},
- {"SLIM TX7 MUX", "RMIX4", "RX4 MIX1"},
- {"SLIM TX7 MUX", "RMIX5", "RX5 MIX1"},
- {"SLIM TX7 MUX", "RMIX6", "RX6 MIX1"},
- {"SLIM TX7 MUX", "RMIX7", "RX7 MIX1"},
-
- {"SLIM TX8 MUX", "DEC1", "DEC1 MUX"},
- {"SLIM TX8 MUX", "DEC2", "DEC2 MUX"},
- {"SLIM TX8 MUX", "DEC3", "DEC3 MUX"},
- {"SLIM TX8 MUX", "DEC4", "DEC4 MUX"},
- {"SLIM TX8 MUX", "DEC5", "DEC5 MUX"},
- {"SLIM TX8 MUX", "DEC6", "DEC6 MUX"},
- {"SLIM TX8 MUX", "DEC7", "DEC7 MUX"},
- {"SLIM TX8 MUX", "DEC8", "DEC8 MUX"},
- {"SLIM TX8 MUX", "DEC9", "DEC9 MUX"},
- {"SLIM TX8 MUX", "DEC10", "DEC10 MUX"},
-
- {"SLIM TX9 MUX", "DEC1", "DEC1 MUX"},
- {"SLIM TX9 MUX", "DEC2", "DEC2 MUX"},
- {"SLIM TX9 MUX", "DEC3", "DEC3 MUX"},
- {"SLIM TX9 MUX", "DEC4", "DEC4 MUX"},
- {"SLIM TX9 MUX", "DEC5", "DEC5 MUX"},
- {"SLIM TX9 MUX", "DEC6", "DEC6 MUX"},
- {"SLIM TX9 MUX", "DEC7", "DEC7 MUX"},
- {"SLIM TX9 MUX", "DEC8", "DEC8 MUX"},
- {"SLIM TX9 MUX", "DEC9", "DEC9 MUX"},
- {"SLIM TX9 MUX", "DEC10", "DEC10 MUX"},
-
- {"SLIM TX10 MUX", "DEC1", "DEC1 MUX"},
- {"SLIM TX10 MUX", "DEC2", "DEC2 MUX"},
- {"SLIM TX10 MUX", "DEC3", "DEC3 MUX"},
- {"SLIM TX10 MUX", "DEC4", "DEC4 MUX"},
- {"SLIM TX10 MUX", "DEC5", "DEC5 MUX"},
- {"SLIM TX10 MUX", "DEC6", "DEC6 MUX"},
- {"SLIM TX10 MUX", "DEC7", "DEC7 MUX"},
- {"SLIM TX10 MUX", "DEC8", "DEC8 MUX"},
- {"SLIM TX10 MUX", "DEC9", "DEC9 MUX"},
- {"SLIM TX10 MUX", "DEC10", "DEC10 MUX"},
-
- /* Earpiece (RX MIX1) */
- {"EAR", NULL, "EAR PA"},
- {"EAR PA", NULL, "EAR_PA_MIXER"},
- {"EAR_PA_MIXER", NULL, "DAC1"},
- {"DAC1", NULL, "RX_BIAS"},
-
- {"ANC EAR", NULL, "ANC EAR PA"},
- {"ANC EAR PA", NULL, "EAR_PA_MIXER"},
- {"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"},
- {"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"},
-
- /* Headset (RX MIX1 and RX MIX2) */
- {"HEADPHONE", NULL, "HPHL"},
- {"HEADPHONE", NULL, "HPHR"},
-
- {"HPHL", NULL, "HPHL_PA_MIXER"},
- {"HPHL_PA_MIXER", NULL, "HPHL DAC"},
- {"HPHL DAC", NULL, "RX_BIAS"},
-
- {"HPHR", NULL, "HPHR_PA_MIXER"},
- {"HPHR_PA_MIXER", NULL, "HPHR DAC"},
- {"HPHR DAC", NULL, "RX_BIAS"},
-
- {"ANC HEADPHONE", NULL, "ANC HPHL"},
- {"ANC HEADPHONE", NULL, "ANC HPHR"},
-
- {"ANC HPHL", NULL, "HPHL_PA_MIXER"},
- {"ANC HPHR", NULL, "HPHR_PA_MIXER"},
-
- {"ANC1 MUX", "ADC1", "ADC1"},
- {"ANC1 MUX", "ADC2", "ADC2"},
- {"ANC1 MUX", "ADC3", "ADC3"},
- {"ANC1 MUX", "ADC4", "ADC4"},
- {"ANC1 MUX", "ADC5", "ADC5"},
- {"ANC1 MUX", "ADC6", "ADC6"},
- {"ANC1 MUX", "DMIC1", "DMIC1"},
- {"ANC1 MUX", "DMIC2", "DMIC2"},
- {"ANC1 MUX", "DMIC3", "DMIC3"},
- {"ANC1 MUX", "DMIC4", "DMIC4"},
- {"ANC1 MUX", "DMIC5", "DMIC5"},
- {"ANC1 MUX", "DMIC6", "DMIC6"},
- {"ANC2 MUX", "ADC1", "ADC1"},
- {"ANC2 MUX", "ADC2", "ADC2"},
- {"ANC2 MUX", "ADC3", "ADC3"},
- {"ANC2 MUX", "ADC4", "ADC4"},
- {"ANC2 MUX", "ADC5", "ADC5"},
- {"ANC2 MUX", "ADC6", "ADC6"},
- {"ANC2 MUX", "DMIC1", "DMIC1"},
- {"ANC2 MUX", "DMIC2", "DMIC2"},
- {"ANC2 MUX", "DMIC3", "DMIC3"},
- {"ANC2 MUX", "DMIC4", "DMIC4"},
- {"ANC2 MUX", "DMIC5", "DMIC5"},
- {"ANC2 MUX", "DMIC6", "DMIC6"},
-
- {"ANC HPHR", NULL, "CDC_CONN"},
-
- {"DAC1", "Switch", "CLASS_H_DSM MUX"},
- {"HPHL DAC", "Switch", "CLASS_H_DSM MUX"},
- {"HPHR DAC", NULL, "RX2 CHAIN"},
-
- {"LINEOUT1", NULL, "LINEOUT1 PA"},
- {"LINEOUT2", NULL, "LINEOUT2 PA"},
- {"LINEOUT3", NULL, "LINEOUT3 PA"},
- {"LINEOUT4", NULL, "LINEOUT4 PA"},
- {"SPK_OUT", NULL, "SPK PA"},
- {"SPK_OUT", NULL, "SPK2 PA"},
-
- {"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"},
- {"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"},
-
- {"LINEOUT2 PA", NULL, "LINEOUT2_PA_MIXER"},
- {"LINEOUT2_PA_MIXER", NULL, "LINEOUT2 DAC"},
-
- {"LINEOUT3 PA", NULL, "LINEOUT3_PA_MIXER"},
- {"LINEOUT3_PA_MIXER", NULL, "LINEOUT3 DAC"},
-
- {"LINEOUT4 PA", NULL, "LINEOUT4_PA_MIXER"},
- {"LINEOUT4_PA_MIXER", NULL, "LINEOUT4 DAC"},
-
- {"LINEOUT1 DAC", NULL, "RX3 MIX1"},
-
- {"RDAC5 MUX", "DEM3_INV", "RX3 MIX1"},
- {"RDAC5 MUX", "DEM4", "RX4 MIX1"},
-
- {"LINEOUT3 DAC", NULL, "RDAC5 MUX"},
-
- {"LINEOUT2 DAC", NULL, "RX5 MIX1"},
-
- {"RDAC7 MUX", "DEM5_INV", "RX5 MIX1"},
- {"RDAC7 MUX", "DEM6", "RX6 MIX1"},
-
- {"LINEOUT4 DAC", NULL, "RDAC7 MUX"},
-
- {"SPK PA", NULL, "SPK DAC"},
- {"SPK DAC", NULL, "RX7 MIX2"},
- {"SPK DAC", NULL, "VDD_SPKDRV"},
-
- {"SPK2 PA", NULL, "SPK2 DAC"},
- {"SPK2 DAC", NULL, "RX8 MIX1"},
- {"SPK2 DAC", NULL, "VDD_SPKDRV2"},
-
- {"CLASS_H_DSM MUX", "DSM_HPHL_RX1", "RX1 CHAIN"},
-
- {"RX1 INTERP", NULL, "RX1 MIX2"},
- {"RX1 CHAIN", NULL, "RX1 INTERP"},
- {"RX2 INTERP", NULL, "RX2 MIX2"},
- {"RX2 CHAIN", NULL, "RX2 INTERP"},
- {"RX1 MIX2", NULL, "ANC1 MUX"},
- {"RX2 MIX2", NULL, "ANC2 MUX"},
-
- {"LINEOUT1 DAC", NULL, "RX_BIAS"},
- {"LINEOUT2 DAC", NULL, "RX_BIAS"},
- {"LINEOUT3 DAC", NULL, "RX_BIAS"},
- {"LINEOUT4 DAC", NULL, "RX_BIAS"},
- {"SPK DAC", NULL, "RX_BIAS"},
- {"SPK2 DAC", NULL, "RX_BIAS"},
-
- {"RX7 MIX1", NULL, "COMP0_CLK"},
- {"RX8 MIX1", NULL, "COMP0_CLK"},
- {"RX1 MIX1", NULL, "COMP1_CLK"},
- {"RX2 MIX1", NULL, "COMP1_CLK"},
- {"RX3 MIX1", NULL, "COMP2_CLK"},
- {"RX5 MIX1", NULL, "COMP2_CLK"},
-
- {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
- {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
- {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
- {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
- {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
- {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
- {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
- {"RX4 MIX1", NULL, "RX4 MIX1 INP1"},
- {"RX4 MIX1", NULL, "RX4 MIX1 INP2"},
- {"RX5 MIX1", NULL, "RX5 MIX1 INP1"},
- {"RX5 MIX1", NULL, "RX5 MIX1 INP2"},
- {"RX6 MIX1", NULL, "RX6 MIX1 INP1"},
- {"RX6 MIX1", NULL, "RX6 MIX1 INP2"},
- {"RX7 MIX1", NULL, "RX7 MIX1 INP1"},
- {"RX7 MIX1", NULL, "RX7 MIX1 INP2"},
- {"RX8 MIX1", NULL, "RX8 MIX1 INP1"},
- {"RX8 MIX1", NULL, "RX8 MIX1 INP2"},
- {"RX1 MIX2", NULL, "RX1 MIX1"},
- {"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
- {"RX1 MIX2", NULL, "RX1 MIX2 INP2"},
- {"RX2 MIX2", NULL, "RX2 MIX1"},
- {"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
- {"RX2 MIX2", NULL, "RX2 MIX2 INP2"},
- {"RX7 MIX2", NULL, "RX7 MIX1"},
- {"RX7 MIX2", NULL, "RX7 MIX2 INP1"},
- {"RX7 MIX2", NULL, "RX7 MIX2 INP2"},
-
- /* SLIM_MUX("AIF1_PB", "AIF1 PB"),*/
- {"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"},
- {"SLIM RX8 MUX", "AIF1_PB", "AIF1 PB"},
- /* SLIM_MUX("AIF2_PB", "AIF2 PB"),*/
- {"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"},
- {"SLIM RX8 MUX", "AIF2_PB", "AIF2 PB"},
- /* SLIM_MUX("AIF3_PB", "AIF3 PB"),*/
- {"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"},
- {"SLIM RX8 MUX", "AIF3_PB", "AIF3 PB"},
-
- {"SLIM RX1", NULL, "SLIM RX1 MUX"},
- {"SLIM RX2", NULL, "SLIM RX2 MUX"},
- {"SLIM RX3", NULL, "SLIM RX3 MUX"},
- {"SLIM RX4", NULL, "SLIM RX4 MUX"},
- {"SLIM RX5", NULL, "SLIM RX5 MUX"},
- {"SLIM RX6", NULL, "SLIM RX6 MUX"},
- {"SLIM RX7", NULL, "SLIM RX7 MUX"},
- {"SLIM RX8", NULL, "SLIM RX8 MUX"},
-
- {"RX1 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX1 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX1 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX1 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX1 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX1 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX1 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX1 MIX1 INP1", "IIR1", "IIR1"},
- {"RX1 MIX1 INP1", "IIR2", "IIR2"},
- {"RX1 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX1 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX1 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX1 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX1 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX1 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX1 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX1 MIX1 INP2", "IIR1", "IIR1"},
- {"RX1 MIX1 INP2", "IIR2", "IIR2"},
- {"RX1 MIX1 INP3", "RX1", "SLIM RX1"},
- {"RX1 MIX1 INP3", "RX2", "SLIM RX2"},
- {"RX1 MIX1 INP3", "RX3", "SLIM RX3"},
- {"RX1 MIX1 INP3", "RX4", "SLIM RX4"},
- {"RX1 MIX1 INP3", "RX5", "SLIM RX5"},
- {"RX1 MIX1 INP3", "RX6", "SLIM RX6"},
- {"RX1 MIX1 INP3", "RX7", "SLIM RX7"},
- {"RX2 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX2 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX2 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX2 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX2 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX2 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX2 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX2 MIX1 INP1", "IIR1", "IIR1"},
- {"RX2 MIX1 INP1", "IIR2", "IIR2"},
- {"RX2 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX2 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX2 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX2 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX2 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX2 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX2 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX2 MIX1 INP2", "IIR1", "IIR1"},
- {"RX2 MIX1 INP2", "IIR2", "IIR2"},
- {"RX3 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX3 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX3 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX3 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX3 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX3 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX3 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX3 MIX1 INP1", "IIR1", "IIR1"},
- {"RX3 MIX1 INP1", "IIR2", "IIR2"},
- {"RX3 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX3 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX3 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX3 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX3 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX3 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX3 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX3 MIX1 INP2", "IIR1", "IIR1"},
- {"RX3 MIX1 INP2", "IIR2", "IIR2"},
- {"RX4 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX4 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX4 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX4 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX4 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX4 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX4 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX4 MIX1 INP1", "IIR1", "IIR1"},
- {"RX4 MIX1 INP1", "IIR2", "IIR2"},
- {"RX4 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX4 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX4 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX4 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX4 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX4 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX4 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX4 MIX1 INP2", "IIR1", "IIR1"},
- {"RX4 MIX1 INP2", "IIR2", "IIR2"},
- {"RX5 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX5 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX5 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX5 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX5 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX5 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX5 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX5 MIX1 INP1", "IIR1", "IIR1"},
- {"RX5 MIX1 INP1", "IIR2", "IIR2"},
- {"RX5 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX5 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX5 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX5 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX5 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX5 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX5 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX5 MIX1 INP2", "IIR1", "IIR1"},
- {"RX5 MIX1 INP2", "IIR2", "IIR2"},
- {"RX6 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX6 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX6 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX6 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX6 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX6 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX6 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX6 MIX1 INP1", "IIR1", "IIR1"},
- {"RX6 MIX1 INP1", "IIR2", "IIR2"},
- {"RX6 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX6 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX6 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX6 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX6 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX6 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX6 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX6 MIX1 INP2", "IIR1", "IIR1"},
- {"RX6 MIX1 INP2", "IIR2", "IIR2"},
- {"RX7 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX7 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX7 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX7 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX7 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX7 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX7 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX7 MIX1 INP1", "IIR1", "IIR1"},
- {"RX7 MIX1 INP1", "IIR2", "IIR2"},
- {"RX7 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX7 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX7 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX7 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX7 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX7 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX7 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX7 MIX1 INP2", "IIR1", "IIR1"},
- {"RX7 MIX1 INP2", "IIR2", "IIR2"},
- {"RX8 MIX1 INP1", "RX1", "SLIM RX1"},
- {"RX8 MIX1 INP1", "RX2", "SLIM RX2"},
- {"RX8 MIX1 INP1", "RX3", "SLIM RX3"},
- {"RX8 MIX1 INP1", "RX4", "SLIM RX4"},
- {"RX8 MIX1 INP1", "RX5", "SLIM RX5"},
- {"RX8 MIX1 INP1", "RX6", "SLIM RX6"},
- {"RX8 MIX1 INP1", "RX7", "SLIM RX7"},
- {"RX8 MIX1 INP1", "RX8", "SLIM RX8"},
- {"RX8 MIX1 INP1", "IIR1", "IIR1"},
- {"RX8 MIX1 INP1", "IIR2", "IIR2"},
- {"RX8 MIX1 INP2", "RX1", "SLIM RX1"},
- {"RX8 MIX1 INP2", "RX2", "SLIM RX2"},
- {"RX8 MIX1 INP2", "RX3", "SLIM RX3"},
- {"RX8 MIX1 INP2", "RX4", "SLIM RX4"},
- {"RX8 MIX1 INP2", "RX5", "SLIM RX5"},
- {"RX8 MIX1 INP2", "RX6", "SLIM RX6"},
- {"RX8 MIX1 INP2", "RX7", "SLIM RX7"},
- {"RX8 MIX1 INP2", "RX8", "SLIM RX8"},
- {"RX8 MIX1 INP2", "IIR1", "IIR1"},
- {"RX8 MIX1 INP2", "IIR2", "IIR2"},
-
- /* IIR1, IIR2 inputs to Second RX Mixer on RX1, RX2 and RX7 chains. */
- {"RX1 MIX2 INP1", "IIR1", "IIR1"},
- {"RX1 MIX2 INP2", "IIR1", "IIR1"},
- {"RX2 MIX2 INP1", "IIR1", "IIR1"},
- {"RX2 MIX2 INP2", "IIR1", "IIR1"},
- {"RX7 MIX2 INP1", "IIR1", "IIR1"},
- {"RX7 MIX2 INP2", "IIR1", "IIR1"},
- {"RX1 MIX2 INP1", "IIR2", "IIR2"},
- {"RX1 MIX2 INP2", "IIR2", "IIR2"},
- {"RX2 MIX2 INP1", "IIR2", "IIR2"},
- {"RX2 MIX2 INP2", "IIR2", "IIR2"},
- {"RX7 MIX2 INP1", "IIR2", "IIR2"},
- {"RX7 MIX2 INP2", "IIR2", "IIR2"},
-
- /* Decimator Inputs */
- {"DEC1 MUX", "DMIC1", "DMIC1"},
- {"DEC1 MUX", "ADC6", "ADC6"},
- {"DEC1 MUX", NULL, "CDC_CONN"},
- {"DEC2 MUX", "DMIC2", "DMIC2"},
- {"DEC2 MUX", "ADC5", "ADC5"},
- {"DEC2 MUX", NULL, "CDC_CONN"},
- {"DEC3 MUX", "DMIC3", "DMIC3"},
- {"DEC3 MUX", "ADC4", "ADC4"},
- {"DEC3 MUX", NULL, "CDC_CONN"},
- {"DEC4 MUX", "DMIC4", "DMIC4"},
- {"DEC4 MUX", "ADC3", "ADC3"},
- {"DEC4 MUX", NULL, "CDC_CONN"},
- {"DEC5 MUX", "DMIC5", "DMIC5"},
- {"DEC5 MUX", "ADC2", "ADC2"},
- {"DEC5 MUX", NULL, "CDC_CONN"},
- {"DEC6 MUX", "DMIC6", "DMIC6"},
- {"DEC6 MUX", "ADC1", "ADC1"},
- {"DEC6 MUX", NULL, "CDC_CONN"},
- {"DEC7 MUX", "DMIC1", "DMIC1"},
- {"DEC7 MUX", "DMIC6", "DMIC6"},
- {"DEC7 MUX", "ADC1", "ADC1"},
- {"DEC7 MUX", "ADC6", "ADC6"},
- {"DEC7 MUX", "ANC1_FB", "ANC1 MUX"},
- {"DEC7 MUX", "ANC2_FB", "ANC2 MUX"},
- {"DEC7 MUX", NULL, "CDC_CONN"},
- {"DEC8 MUX", "DMIC2", "DMIC2"},
- {"DEC8 MUX", "DMIC5", "DMIC5"},
- {"DEC8 MUX", "ADC2", "ADC2"},
- {"DEC8 MUX", "ADC5", "ADC5"},
- {"DEC8 MUX", "ANC1_FB", "ANC1 MUX"},
- {"DEC8 MUX", "ANC2_FB", "ANC2 MUX"},
- {"DEC8 MUX", NULL, "CDC_CONN"},
- {"DEC9 MUX", "DMIC4", "DMIC4"},
- {"DEC9 MUX", "DMIC5", "DMIC5"},
- {"DEC9 MUX", "ADC2", "ADC2"},
- {"DEC9 MUX", "ADC3", "ADC3"},
- {"DEC9 MUX", "ANC1_FB", "ANC1 MUX"},
- {"DEC9 MUX", "ANC2_FB", "ANC2 MUX"},
- {"DEC9 MUX", NULL, "CDC_CONN"},
- {"DEC10 MUX", "DMIC3", "DMIC3"},
- {"DEC10 MUX", "DMIC6", "DMIC6"},
- {"DEC10 MUX", "ADC1", "ADC1"},
- {"DEC10 MUX", "ADC4", "ADC4"},
- {"DEC10 MUX", "ANC1_FB", "ANC1 MUX"},
- {"DEC10 MUX", "ANC2_FB", "ANC2 MUX"},
- {"DEC10 MUX", NULL, "CDC_CONN"},
-
- /* ADC Connections */
- {"ADC1", NULL, "AMIC1"},
- {"ADC2", NULL, "AMIC2"},
- {"ADC3", NULL, "AMIC3"},
- {"ADC4", NULL, "AMIC4"},
- {"ADC5", NULL, "AMIC5"},
- {"ADC6", NULL, "AMIC6"},
-
- /* AUX PGA Connections */
- {"EAR_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
- {"HPHL_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
- {"HPHR_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
- {"LINEOUT1_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
- {"LINEOUT2_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
- {"LINEOUT3_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"},
- {"LINEOUT4_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"},
- {"AUX_PGA_Left", NULL, "AMIC5"},
- {"AUX_PGA_Right", NULL, "AMIC6"},
-
- {"IIR1", NULL, "IIR1 INP1 MUX"},
- {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
- {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
- {"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"},
- {"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"},
- {"IIR1 INP1 MUX", "DEC5", "DEC5 MUX"},
- {"IIR1 INP1 MUX", "DEC6", "DEC6 MUX"},
- {"IIR1 INP1 MUX", "DEC7", "DEC7 MUX"},
- {"IIR1 INP1 MUX", "DEC8", "DEC8 MUX"},
- {"IIR1 INP1 MUX", "DEC9", "DEC9 MUX"},
- {"IIR1 INP1 MUX", "DEC10", "DEC10 MUX"},
- {"IIR1 INP1 MUX", "RX1", "SLIM RX1"},
- {"IIR1 INP1 MUX", "RX2", "SLIM RX2"},
- {"IIR1 INP1 MUX", "RX3", "SLIM RX3"},
- {"IIR1 INP1 MUX", "RX4", "SLIM RX4"},
- {"IIR1 INP1 MUX", "RX5", "SLIM RX5"},
- {"IIR1 INP1 MUX", "RX6", "SLIM RX6"},
- {"IIR1 INP1 MUX", "RX7", "SLIM RX7"},
-
- {"IIR2", NULL, "IIR2 INP1 MUX"},
- {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
- {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
- {"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"},
- {"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"},
- {"IIR2 INP1 MUX", "DEC5", "DEC5 MUX"},
- {"IIR2 INP1 MUX", "DEC6", "DEC6 MUX"},
- {"IIR2 INP1 MUX", "DEC7", "DEC7 MUX"},
- {"IIR2 INP1 MUX", "DEC8", "DEC8 MUX"},
- {"IIR2 INP1 MUX", "DEC9", "DEC9 MUX"},
- {"IIR2 INP1 MUX", "DEC10", "DEC10 MUX"},
- {"IIR2 INP1 MUX", "RX1", "SLIM RX1"},
- {"IIR2 INP1 MUX", "RX2", "SLIM RX2"},
- {"IIR2 INP1 MUX", "RX3", "SLIM RX3"},
- {"IIR2 INP1 MUX", "RX4", "SLIM RX4"},
- {"IIR2 INP1 MUX", "RX5", "SLIM RX5"},
- {"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
- {"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
-
- {"IIR1", NULL, "IIR1 INP2 MUX"},
- {"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
- {"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
- {"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
- {"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
- {"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
- {"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
- {"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
- {"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
- {"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
- {"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
- {"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
- {"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
- {"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
- {"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
- {"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
- {"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
- {"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
-
- {"IIR2", NULL, "IIR2 INP2 MUX"},
- {"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
- {"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
- {"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
- {"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
- {"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
- {"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
- {"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
- {"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
- {"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
- {"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
- {"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
- {"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
- {"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
- {"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
- {"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
- {"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
- {"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
-
- {"IIR1", NULL, "IIR1 INP3 MUX"},
- {"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
- {"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
- {"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
- {"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
- {"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
- {"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
- {"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
- {"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
- {"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
- {"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
- {"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
- {"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
- {"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
- {"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
- {"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
- {"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
- {"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
-
- {"IIR2", NULL, "IIR2 INP3 MUX"},
- {"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
- {"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
- {"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
- {"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
- {"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
- {"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
- {"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
- {"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
- {"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
- {"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
- {"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
- {"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
- {"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
- {"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
- {"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
- {"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
- {"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
-
- {"IIR1", NULL, "IIR1 INP4 MUX"},
- {"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
- {"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
- {"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
- {"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
- {"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
- {"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
- {"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
- {"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
- {"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
- {"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
- {"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
- {"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
- {"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
- {"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
- {"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
- {"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
- {"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
-
- {"IIR2", NULL, "IIR2 INP4 MUX"},
- {"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
- {"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
- {"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
- {"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
- {"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
- {"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
- {"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
- {"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
- {"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
- {"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
- {"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
- {"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
- {"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
- {"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
- {"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
- {"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
- {"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
-
- {"MIC BIAS1 Internal1", NULL, "LDO_H"},
- {"MIC BIAS1 Internal2", NULL, "LDO_H"},
- {"MIC BIAS1 External", NULL, "LDO_H"},
- {"MIC BIAS2 Internal1", NULL, "LDO_H"},
- {"MIC BIAS2 Internal2", NULL, "LDO_H"},
- {"MIC BIAS2 Internal3", NULL, "LDO_H"},
- {"MIC BIAS2 External", NULL, "LDO_H"},
- {"MIC BIAS3 Internal1", NULL, "LDO_H"},
- {"MIC BIAS3 Internal2", NULL, "LDO_H"},
- {"MIC BIAS3 External", NULL, "LDO_H"},
- {"MIC BIAS4 External", NULL, "LDO_H"},
- {DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"},
-};
-
-static int tomtom_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- pr_debug("%s(): substream = %s stream = %d\n", __func__,
- substream->name, substream->stream);
-
- return 0;
-}
-
-static void tomtom_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- pr_debug("%s(): substream = %s stream = %d\n", __func__,
- substream->name, substream->stream);
-}
-
-int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: mclk_enable = %u, dapm = %d\n", __func__, mclk_enable,
- dapm);
-
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- if (mclk_enable) {
- wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
- } else {
- /* Put clock and BG */
- wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK);
- wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- }
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
- return 0;
-}
-
-static int tomtom_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
-{
- pr_debug("%s\n", __func__);
- return 0;
-}
-
-static int tomtom_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
-{
- u8 val = 0;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
-
- pr_debug("%s\n", __func__);
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
- /* CPU is master */
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- if (dai->id == AIF1_CAP)
- snd_soc_update_bits(dai->codec,
- TOMTOM_A_CDC_CLK_TX_I2S_CTL,
- TOMTOM_I2S_MASTER_MODE_MASK, 0);
- else if (dai->id == AIF1_PB)
- snd_soc_update_bits(dai->codec,
- TOMTOM_A_CDC_CLK_RX_I2S_CTL,
- TOMTOM_I2S_MASTER_MODE_MASK, 0);
- }
- break;
- case SND_SOC_DAIFMT_CBM_CFM:
- /* CPU is slave */
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- val = TOMTOM_I2S_MASTER_MODE_MASK;
- if (dai->id == AIF1_CAP)
- snd_soc_update_bits(dai->codec,
- TOMTOM_A_CDC_CLK_TX_I2S_CTL, val, val);
- else if (dai->id == AIF1_PB)
- snd_soc_update_bits(dai->codec,
- TOMTOM_A_CDC_CLK_RX_I2S_CTL, val, val);
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int tomtom_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot)
-
-{
- struct wcd9xxx_codec_dai_data *dai_data = NULL;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
- struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent);
-
- if (!tx_slot || !rx_slot) {
- pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n",
- __func__, tx_slot, rx_slot);
- return -EINVAL;
- }
- pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n"
- "tomtom->intf_type %d\n",
- __func__, dai->name, dai->id, tx_num, rx_num,
- tomtom->intf_type);
-
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- wcd9xxx_init_slimslave(core, core->slim->laddr,
- tx_num, tx_slot, rx_num, rx_slot);
- /*Reserve tx11 and tx12 for VI feedback path*/
- dai_data = &tomtom->dai[AIF4_VIFEED];
- if (dai_data) {
- list_add_tail(&core->tx_chs[TOMTOM_TX11].list,
- &dai_data->wcd9xxx_ch_list);
- list_add_tail(&core->tx_chs[TOMTOM_TX12].list,
- &dai_data->wcd9xxx_ch_list);
- }
-
- /* Reserve TX13 for MAD data channel */
- dai_data = &tomtom->dai[AIF4_MAD_TX];
- if (dai_data)
- list_add_tail(&core->tx_chs[TOMTOM_TX13].list,
- &dai_data->wcd9xxx_ch_list);
- }
-
- return 0;
-}
-
-static int tomtom_get_channel_map(struct snd_soc_dai *dai,
- unsigned int *tx_num, unsigned int *tx_slot,
- unsigned int *rx_num, unsigned int *rx_slot)
-
-{
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(dai->codec);
- u32 i = 0;
- struct wcd9xxx_ch *ch;
-
- switch (dai->id) {
- case AIF1_PB:
- case AIF2_PB:
- case AIF3_PB:
- if (!rx_slot || !rx_num) {
- pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n",
- __func__, rx_slot, rx_num);
- return -EINVAL;
- }
- list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list,
- list) {
- pr_debug("%s: slot_num %u ch->ch_num %d\n",
- __func__, i, ch->ch_num);
- rx_slot[i++] = ch->ch_num;
- }
- pr_debug("%s: rx_num %d\n", __func__, i);
- *rx_num = i;
- break;
- case AIF1_CAP:
- case AIF2_CAP:
- case AIF3_CAP:
- case AIF4_VIFEED:
- case AIF4_MAD_TX:
- if (!tx_slot || !tx_num) {
- pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n",
- __func__, tx_slot, tx_num);
- return -EINVAL;
- }
- list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list,
- list) {
- pr_debug("%s: slot_num %u ch->ch_num %d\n",
- __func__, i, ch->ch_num);
- tx_slot[i++] = ch->ch_num;
- }
- pr_debug("%s: tx_num %d\n", __func__, i);
- *tx_num = i;
- break;
-
- default:
- pr_err("%s: Invalid DAI ID %x\n", __func__, dai->id);
- break;
- }
-
- return 0;
-}
-
-static int tomtom_set_interpolator_rate(struct snd_soc_dai *dai,
- u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate)
-{
- u32 j;
- u8 rx_mix1_inp, rx8_mix1_inp;
- u16 rx_mix_1_reg_1, rx_mix_1_reg_2;
- u16 rx_fs_reg;
- u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val;
- struct snd_soc_codec *codec = dai->codec;
- struct wcd9xxx_ch *ch;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- int port_rx_8 = TOMTOM_RX_PORT_START_NUMBER + NUM_INTERPOLATORS - 1;
-
- list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) {
- /* for RX port starting from 16 instead of 10 like tabla */
- rx_mix1_inp = ch->port + RX_MIX1_INP_SEL_RX1 -
- TOMTOM_TX_PORT_NUMBER;
- rx8_mix1_inp = ch->port + RX8_MIX1_INP_SEL_RX1 -
- TOMTOM_RX_PORT_START_NUMBER;
- if (((ch->port < port_rx_8) &&
- ((rx_mix1_inp < RX_MIX1_INP_SEL_RX1) ||
- (rx_mix1_inp > RX_MIX1_INP_SEL_RX7))) ||
- ((rx8_mix1_inp < RX8_MIX1_INP_SEL_RX1) ||
- (rx8_mix1_inp > RX8_MIX1_INP_SEL_RX8))) {
- pr_err("%s: Invalid TOMTOM_RX%u port. Dai ID is %d\n",
- __func__, rx8_mix1_inp - 2,
- dai->id);
- return -EINVAL;
- }
-
- rx_mix_1_reg_1 = TOMTOM_A_CDC_CONN_RX1_B1_CTL;
-
- for (j = 0; j < NUM_INTERPOLATORS - 1; j++) {
- rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1;
-
- rx_mix_1_reg_1_val = snd_soc_read(codec,
- rx_mix_1_reg_1);
- rx_mix_1_reg_2_val = snd_soc_read(codec,
- rx_mix_1_reg_2);
-
- if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) ||
- (((rx_mix_1_reg_1_val >> 4) & 0x0F)
- == rx_mix1_inp) ||
- ((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) {
-
- rx_fs_reg = TOMTOM_A_CDC_RX1_B5_CTL + 8 * j;
-
- pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n",
- __func__, dai->id, j + 1);
-
- pr_debug("%s: set RX%u sample rate to %u\n",
- __func__, j + 1, sample_rate);
-
- snd_soc_update_bits(codec, rx_fs_reg,
- 0xE0, rx_fs_rate_reg_val);
-
- if (comp_rx_path[j] < COMPANDER_MAX)
- tomtom->comp_fs[comp_rx_path[j]]
- = compander_fs;
- }
- if (j < 2)
- rx_mix_1_reg_1 += 3;
- else
- rx_mix_1_reg_1 += 2;
- }
-
- /* RX8 interpolator path */
- rx_mix_1_reg_1_val = snd_soc_read(codec,
- TOMTOM_A_CDC_CONN_RX8_B1_CTL);
- if (((rx_mix_1_reg_1_val & 0x0F) == rx8_mix1_inp) ||
- (((rx_mix_1_reg_1_val >> 4) & 0x0F) == rx8_mix1_inp)) {
- snd_soc_update_bits(codec, TOMTOM_A_CDC_RX8_B5_CTL,
- 0xE0, rx_fs_rate_reg_val);
- pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n",
- __func__, dai->id, NUM_INTERPOLATORS);
-
- pr_debug("%s: set RX%u sample rate to %u\n",
- __func__, NUM_INTERPOLATORS,
- sample_rate);
- if (comp_rx_path[NUM_INTERPOLATORS - 1] < COMPANDER_MAX)
- tomtom->comp_fs[comp_rx_path[j]] =
- compander_fs;
- }
- }
- return 0;
-}
-
-static int tomtom_set_decimator_rate(struct snd_soc_dai *dai,
- u8 tx_fs_rate_reg_val, u32 sample_rate)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct wcd9xxx_ch *ch;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- u32 tx_port;
- u16 tx_port_reg, tx_fs_reg;
- u8 tx_port_reg_val;
- s8 decimator;
-
- list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) {
-
- tx_port = ch->port + 1;
- pr_debug("%s: dai->id = %d, tx_port = %d",
- __func__, dai->id, tx_port);
-
- if ((tx_port < 1) || (tx_port > NUM_DECIMATORS)) {
- pr_err("%s: Invalid SLIM TX%u port. DAI ID is %d\n",
- __func__, tx_port, dai->id);
- return -EINVAL;
- }
-
- tx_port_reg = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1);
- tx_port_reg_val = snd_soc_read(codec, tx_port_reg);
-
- decimator = 0;
-
- if ((tx_port >= 1) && (tx_port <= 6)) {
-
- tx_port_reg_val = tx_port_reg_val & 0x0F;
- if (tx_port_reg_val == 0x8)
- decimator = tx_port;
-
- } else if ((tx_port >= 7) && (tx_port <= NUM_DECIMATORS)) {
-
- tx_port_reg_val = tx_port_reg_val & 0x1F;
-
- if ((tx_port_reg_val >= 0x8) &&
- (tx_port_reg_val <= 0x11)) {
-
- decimator = (tx_port_reg_val - 0x8) + 1;
- }
- }
-
- if (decimator) { /* SLIM_TX port has a DEC as input */
-
- tx_fs_reg = TOMTOM_A_CDC_TX1_CLK_FS_CTL +
- 8 * (decimator - 1);
-
- pr_debug("%s: set DEC%u (-> SLIM_TX%u) rate to %u\n",
- __func__, decimator, tx_port, sample_rate);
-
- snd_soc_update_bits(codec, tx_fs_reg, 0x07,
- tx_fs_rate_reg_val);
-
- } else {
- if ((tx_port_reg_val >= 0x1) &&
- (tx_port_reg_val <= 0x7)) {
-
- pr_debug("%s: RMIX%u going to SLIM TX%u\n",
- __func__, tx_port_reg_val, tx_port);
-
- } else if ((tx_port_reg_val >= 0x8) &&
- (tx_port_reg_val <= 0x11)) {
-
- pr_err("%s: ERROR: Should not be here\n",
- __func__);
- pr_err("%s: ERROR: DEC connected to SLIM TX%u\n",
- __func__, tx_port);
- return -EINVAL;
-
- } else if (tx_port_reg_val == 0) {
- pr_debug("%s: no signal to SLIM TX%u\n",
- __func__, tx_port);
- } else {
- pr_err("%s: ERROR: wrong signal to SLIM TX%u\n",
- __func__, tx_port);
- pr_err("%s: ERROR: wrong signal = %u\n",
- __func__, tx_port_reg_val);
- return -EINVAL;
- }
- }
- }
- return 0;
-}
-
-static void tomtom_set_rxsb_port_format(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_codec_dai_data *cdc_dai;
- struct wcd9xxx_ch *ch;
- int port;
- u8 bit_sel;
- u16 sb_ctl_reg, field_shift;
-
- switch (params_width(params)) {
- case 16:
- bit_sel = 0x2;
- tomtom_p->dai[dai->id].bit_width = 16;
- break;
- case 24:
- bit_sel = 0x0;
- tomtom_p->dai[dai->id].bit_width = 24;
- break;
- default:
- dev_err(codec->dev, "Invalid format\n");
- return;
- }
-
- cdc_dai = &tomtom_p->dai[dai->id];
-
- list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
- port = wcd9xxx_get_slave_port(ch->ch_num);
- if (port < 0 ||
- !TOMTOM_VALIDATE_RX_SBPORT_RANGE(port)) {
- dev_warn(codec->dev,
- "%s: invalid port ID %d returned for RX DAI\n",
- __func__, port);
- return;
- }
-
- port = TOMTOM_CONVERT_RX_SBPORT_ID(port);
-
- if (port <= 3) {
- sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL;
- field_shift = port << 1;
- } else if (port <= 7) {
- sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL;
- field_shift = (port - 4) << 1;
- } else { /* should not happen */
- dev_warn(codec->dev,
- "%s: bad port ID %d\n", __func__, port);
- return;
- }
-
- dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n",
- __func__, sb_ctl_reg, field_shift);
- snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
- bit_sel << field_shift);
- }
-}
-
-static void tomtom_set_tx_sb_port_format(struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_codec_dai_data *cdc_dai;
- struct wcd9xxx_ch *ch;
- int port;
- u8 bit_sel, bit_shift;
- u16 sb_ctl_reg;
-
- switch (params_width(params)) {
- case 16:
- bit_sel = 0x2;
- tomtom_p->dai[dai->id].bit_width = 16;
- break;
- case 24:
- bit_sel = 0x0;
- tomtom_p->dai[dai->id].bit_width = 24;
- break;
- default:
- dev_err(codec->dev, "%s: Invalid format %d\n", __func__,
- params_width(params));
- return;
- }
-
- cdc_dai = &tomtom_p->dai[dai->id];
-
- list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
- port = wcd9xxx_get_slave_port(ch->ch_num);
- if (port < 0 ||
- !TOMTOM_VALIDATE_TX_SBPORT_RANGE(port)) {
- dev_warn(codec->dev,
- "%s: invalid port ID %d returned for TX DAI\n",
- __func__, port);
- return;
- }
-
- if (port < 6) /* 6 = SLIMBUS TX7 */
- bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT1_6;
- else if (port < 10)
- bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT7_10;
- else {
- dev_warn(codec->dev,
- "%s: port ID %d bitwidth is fixed\n",
- __func__, port);
- return;
- }
-
- sb_ctl_reg = (TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + port);
-
- dev_dbg(codec->dev, "%s: reg %x bit_sel %x bit_shift %x\n",
- __func__, sb_ctl_reg, bit_sel, bit_shift);
- snd_soc_update_bits(codec, sb_ctl_reg, 0x3 <<
- bit_shift, bit_sel << bit_shift);
- }
-}
-
-static int tomtom_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec);
- u8 tx_fs_rate, rx_fs_rate, i2s_bit_mode;
- u32 compander_fs;
- int ret;
-
- pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
- dai->name, dai->id, params_rate(params),
- params_channels(params));
-
- switch (params_rate(params)) {
- case 8000:
- tx_fs_rate = 0x00;
- rx_fs_rate = 0x00;
- compander_fs = COMPANDER_FS_8KHZ;
- break;
- case 16000:
- tx_fs_rate = 0x01;
- rx_fs_rate = 0x20;
- compander_fs = COMPANDER_FS_16KHZ;
- break;
- case 32000:
- tx_fs_rate = 0x02;
- rx_fs_rate = 0x40;
- compander_fs = COMPANDER_FS_32KHZ;
- break;
- case 48000:
- tx_fs_rate = 0x03;
- rx_fs_rate = 0x60;
- compander_fs = COMPANDER_FS_48KHZ;
- break;
- case 96000:
- tx_fs_rate = 0x04;
- rx_fs_rate = 0x80;
- compander_fs = COMPANDER_FS_96KHZ;
- break;
- case 192000:
- tx_fs_rate = 0x05;
- rx_fs_rate = 0xA0;
- compander_fs = COMPANDER_FS_192KHZ;
- break;
- default:
- pr_err("%s: Invalid sampling rate %d\n", __func__,
- params_rate(params));
- return -EINVAL;
- }
-
- switch (substream->stream) {
- case SNDRV_PCM_STREAM_CAPTURE:
- if (dai->id != AIF4_VIFEED &&
- dai->id != AIF4_MAD_TX) {
- ret = tomtom_set_decimator_rate(dai, tx_fs_rate,
- params_rate(params));
- if (ret < 0) {
- pr_err("%s: set decimator rate failed %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- tomtom->dai[dai->id].rate = params_rate(params);
-
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- i2s_bit_mode = 0x01;
- tomtom->dai[dai->id].bit_width = 16;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- tomtom->dai[dai->id].bit_width = 24;
- i2s_bit_mode = 0x00;
- break;
- case SNDRV_PCM_FORMAT_S32_LE:
- tomtom->dai[dai->id].bit_width = 32;
- i2s_bit_mode = 0x00;
- break;
- default:
- dev_err(codec->dev,
- "%s: Invalid format 0x%x\n",
- __func__, params_format(params));
- return -EINVAL;
- }
-
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL,
- 0x20, i2s_bit_mode << 5);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL,
- 0x07, tx_fs_rate);
- } else {
- /* only generic ports can have sample bit adjustment */
- if (dai->id != AIF4_VIFEED &&
- dai->id != AIF4_MAD_TX)
- tomtom_set_tx_sb_port_format(params, dai);
- }
-
- break;
-
- case SNDRV_PCM_STREAM_PLAYBACK:
- ret = tomtom_set_interpolator_rate(dai, rx_fs_rate,
- compander_fs,
- params_rate(params));
- if (ret < 0) {
- pr_err("%s: set decimator rate failed %d\n", __func__,
- ret);
- return ret;
- }
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_RX_I2S_CTL,
- 0x20, 0x20);
- break;
- case SNDRV_PCM_FORMAT_S32_LE:
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_RX_I2S_CTL,
- 0x20, 0x00);
- break;
- default:
- pr_err("invalid format\n");
- break;
- }
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_I2S_CTL,
- 0x03, (rx_fs_rate >> 0x05));
- } else {
- tomtom_set_rxsb_port_format(params, dai);
- tomtom->dai[dai->id].rate = params_rate(params);
- }
- break;
- default:
- pr_err("%s: Invalid stream type %d\n", __func__,
- substream->stream);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct snd_soc_dai_ops tomtom_dai_ops = {
- .startup = tomtom_startup,
- .shutdown = tomtom_shutdown,
- .hw_params = tomtom_hw_params,
- .set_sysclk = tomtom_set_dai_sysclk,
- .set_fmt = tomtom_set_dai_fmt,
- .set_channel_map = tomtom_set_channel_map,
- .get_channel_map = tomtom_get_channel_map,
-};
-
-static struct snd_soc_dai_driver tomtom_dai[] = {
- {
- .name = "tomtom_rx1",
- .id = AIF1_PB,
- .playback = {
- .stream_name = "AIF1 Playback",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS_S16_S24_LE,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 2,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_tx1",
- .id = AIF1_CAP,
- .capture = {
- .stream_name = "AIF1 Capture",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_rx2",
- .id = AIF2_PB,
- .playback = {
- .stream_name = "AIF2 Playback",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS_S16_S24_LE,
- .rate_min = 8000,
- .rate_max = 192000,
- .channels_min = 1,
- .channels_max = 2,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_tx2",
- .id = AIF2_CAP,
- .capture = {
- .stream_name = "AIF2 Capture",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 8,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_rx3",
- .id = AIF3_PB,
- .playback = {
- .stream_name = "AIF3 Playback",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS_S16_S24_LE,
- .rate_min = 8000,
- .rate_max = 192000,
- .channels_min = 1,
- .channels_max = 2,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_tx3",
- .id = AIF3_CAP,
- .capture = {
- .stream_name = "AIF3 Capture",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 48000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 2,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_vifeedback",
- .id = AIF4_VIFEED,
- .capture = {
- .stream_name = "VIfeed",
- .rates = SNDRV_PCM_RATE_48000,
- .formats = TOMTOM_FORMATS,
- .rate_max = 48000,
- .rate_min = 48000,
- .channels_min = 2,
- .channels_max = 2,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_mad1",
- .id = AIF4_MAD_TX,
- .capture = {
- .stream_name = "AIF4 MAD TX",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = TOMTOM_FORMATS_S16_S24_LE,
- .rate_min = 16000,
- .rate_max = 16000,
- .channels_min = 1,
- .channels_max = 1,
- },
- .ops = &tomtom_dai_ops,
- },
-};
-
-static struct snd_soc_dai_driver tomtom_i2s_dai[] = {
- {
- .name = "tomtom_i2s_rx1",
- .id = AIF1_PB,
- .playback = {
- .stream_name = "AIF1 Playback",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_i2s_tx1",
- .id = AIF1_CAP,
- .capture = {
- .stream_name = "AIF1 Capture",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_i2s_rx2",
- .id = AIF1_PB,
- .playback = {
- .stream_name = "AIF2 Playback",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &tomtom_dai_ops,
- },
- {
- .name = "tomtom_i2s_tx2",
- .id = AIF1_CAP,
- .capture = {
- .stream_name = "AIF2 Capture",
- .rates = WCD9330_RATES,
- .formats = TOMTOM_FORMATS,
- .rate_max = 192000,
- .rate_min = 8000,
- .channels_min = 1,
- .channels_max = 4,
- },
- .ops = &tomtom_dai_ops,
- },
-};
-
-static int tomtom_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai,
- bool up)
-{
- int ret = 0;
- struct wcd9xxx_ch *ch;
-
- if (up) {
- list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
- ret = wcd9xxx_get_slave_port(ch->ch_num);
- if (ret < 0) {
- pr_err("%s: Invalid slave port ID: %d\n",
- __func__, ret);
- ret = -EINVAL;
- } else {
- set_bit(ret, &dai->ch_mask);
- }
- }
- } else {
- ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0),
- msecs_to_jiffies(
- TOMTOM_SLIM_CLOSE_TIMEOUT));
- if (!ret) {
- pr_err("%s: Slim close tx/rx wait timeout\n", __func__);
- ret = -ETIMEDOUT;
- } else {
- ret = 0;
- }
- }
- return ret;
-}
-
-static void tomtom_codec_enable_int_port(struct wcd9xxx_codec_dai_data *dai,
- struct snd_soc_codec *codec)
-{
- struct wcd9xxx_ch *ch;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
- int port_num = 0;
- unsigned short reg = 0;
- u8 val = 0;
-
- if (!dai || !codec) {
- pr_err("%s: Invalid params\n", __func__);
- return;
- }
- list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) {
- if (ch->port >= TOMTOM_RX_PORT_START_NUMBER) {
- port_num = ch->port - TOMTOM_RX_PORT_START_NUMBER;
- reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 + (port_num / 8);
- val = wcd9xxx_interface_reg_read(wcd9xxx,
- reg);
- if (!(val & (1 << (port_num % 8)))) {
- val |= (1 << (port_num % 8));
- wcd9xxx_interface_reg_write(
- wcd9xxx, reg, val);
- val = wcd9xxx_interface_reg_read(
- wcd9xxx, reg);
- }
- } else {
- port_num = ch->port;
- reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8);
- val = wcd9xxx_interface_reg_read(wcd9xxx,
- reg);
- if (!(val & (1 << (port_num % 8)))) {
- val |= (1 << (port_num % 8));
- wcd9xxx_interface_reg_write(wcd9xxx,
- reg, val);
- val = wcd9xxx_interface_reg_read(
- wcd9xxx, reg);
- }
- }
- }
-}
-
-static int tomtom_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
-{
- struct wcd9xxx *core;
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
- struct wcd9xxx_codec_dai_data *dai;
-
- core = dev_get_drvdata(codec->dev->parent);
-
- pr_debug("%s: event called! codec name %s num_dai %d\n"
- "stream name %s event %d\n",
- __func__, codec->component.name,
- codec->component.num_dai, w->sname, event);
-
- /* Execute the callback only if interface type is slimbus */
- if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
- return 0;
-
- dai = &tomtom_p->dai[w->shift];
- pr_debug("%s: w->name %s w->shift %d event %d\n",
- __func__, w->name, w->shift, event);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- dai->bus_down_in_recovery = false;
- tomtom_codec_enable_int_port(dai, codec);
- (void) tomtom_codec_enable_slim_chmask(dai, true);
- ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
- dai->rate, dai->bit_width,
- &dai->grph);
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list,
- dai->grph);
- if (!dai->bus_down_in_recovery)
- ret = tomtom_codec_enable_slim_chmask(dai, false);
- else
- pr_debug("%s: bus in recovery skip enable slim_chmask",
- __func__);
- if (ret < 0) {
- ret = wcd9xxx_disconnect_port(core,
- &dai->wcd9xxx_ch_list,
- dai->grph);
- pr_debug("%s: Disconnect RX port, ret = %d\n",
- __func__, ret);
- }
- break;
- }
- return ret;
-}
-
-static int tomtom_codec_enable_slimvi_feedback(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
-{
- struct wcd9xxx *core = NULL;
- struct snd_soc_codec *codec = NULL;
- struct tomtom_priv *tomtom_p = NULL;
- u32 ret = 0;
- struct wcd9xxx_codec_dai_data *dai = NULL;
-
- if (!w) {
- pr_err("%s invalid params\n", __func__);
- return -EINVAL;
- }
- codec = snd_soc_dapm_to_codec(w->dapm);
- tomtom_p = snd_soc_codec_get_drvdata(codec);
- core = dev_get_drvdata(codec->dev->parent);
-
- pr_debug("%s: event called! codec name %s num_dai %d stream name %s\n",
- __func__, codec->component.name,
- codec->component.num_dai, w->sname);
-
- /* Execute the callback only if interface type is slimbus */
- if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- pr_err("%s Interface is not correct", __func__);
- return 0;
- }
-
- pr_debug("%s(): w->name %s event %d w->shift %d\n",
- __func__, w->name, event, w->shift);
- if (w->shift != AIF4_VIFEED) {
- pr_err("%s Error in enabling the tx path\n", __func__);
- ret = -EINVAL;
- goto out_vi;
- }
- dai = &tomtom_p->dai[w->shift];
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- /*Enable V&I sensing*/
- snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN,
- 0x88, 0x88);
- /*Enable spkr VI clocks*/
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0xC, 0xC);
- dai->bus_down_in_recovery = false;
- tomtom_codec_enable_int_port(dai, codec);
- (void) tomtom_codec_enable_slim_chmask(dai, true);
- ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
- dai->rate, dai->bit_width,
- &dai->grph);
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list,
- dai->grph);
- if (ret)
- pr_err("%s error in close_slim_sch_tx %d\n",
- __func__, ret);
- if (!dai->bus_down_in_recovery)
- ret = tomtom_codec_enable_slim_chmask(dai, false);
- if (ret < 0) {
- ret = wcd9xxx_disconnect_port(core,
- &dai->wcd9xxx_ch_list,
- dai->grph);
- pr_debug("%s: Disconnect TX port, ret = %d\n",
- __func__, ret);
- }
-
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL,
- 0xC, 0x0);
- /*Disable V&I sensing*/
- snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN,
- 0x88, 0x00);
- break;
- }
-out_vi:
- return ret;
-}
-
-/* __tomtom_codec_enable_slimtx: Enable the slimbus slave port
- * for TX path
- * @codec: Handle to the codec for which the slave port is to be
- * enabled.
- * @dai_data: The dai specific data for dai which is enabled.
- */
-static int __tomtom_codec_enable_slimtx(struct snd_soc_codec *codec,
- int event, struct wcd9xxx_codec_dai_data *dai_data)
-{
- struct wcd9xxx *core;
- int ret = 0;
-
- core = dev_get_drvdata(codec->dev->parent);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- dai_data->bus_down_in_recovery = false;
- tomtom_codec_enable_int_port(dai_data, codec);
- (void) tomtom_codec_enable_slim_chmask(dai_data, true);
- ret = wcd9xxx_cfg_slim_sch_tx(core, &dai_data->wcd9xxx_ch_list,
- dai_data->rate,
- dai_data->bit_width,
- &dai_data->grph);
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = wcd9xxx_close_slim_sch_tx(core,
- &dai_data->wcd9xxx_ch_list,
- dai_data->grph);
- if (!dai_data->bus_down_in_recovery)
- ret = tomtom_codec_enable_slim_chmask(dai_data, false);
- if (ret < 0) {
- ret = wcd9xxx_disconnect_port(core,
- &dai_data->wcd9xxx_ch_list,
- dai_data->grph);
- dev_dbg(codec->dev,
- "%s: Disconnect TX port, ret = %d\n",
- __func__, ret);
- }
- break;
- }
-
- return ret;
-}
-
-/*
- * tomtom_codec_enable_slimtx_mad: Callback function that will be invoked
- * to setup the slave port for MAD.
- * @codec: Handle to the codec
- * @event: Indicates whether to enable or disable the slave port
- */
-static int tomtom_codec_enable_slimtx_mad(struct snd_soc_codec *codec,
- u8 event)
-{
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_codec_dai_data *dai;
- int dapm_event = SND_SOC_DAPM_POST_PMU;
-
- dai = &tomtom_p->dai[AIF4_MAD_TX];
-
- if (event == 0)
- dapm_event = SND_SOC_DAPM_POST_PMD;
-
- dev_dbg(codec->dev,
- "%s: mad_channel, event = 0x%x\n",
- __func__, event);
- return __tomtom_codec_enable_slimtx(codec, dapm_event, dai);
-}
-
-/*
- * tomtom_codec_enable_slimtx: DAPM widget allback for TX widgets
- * @w: widget for which this callback is invoked
- * @kcontrol: kcontrol associated with this widget
- * @event: DAPM supplied event indicating enable/disable
- */
-static int tomtom_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol,
- int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx_codec_dai_data *dai;
-
- dev_dbg(codec->dev, "%s: event called! codec name %s num_dai %d stream name %s\n",
- __func__, codec->component.name,
- codec->component.num_dai, w->sname);
-
- /* Execute the callback only if interface type is slimbus */
- if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS)
- return 0;
-
- dev_dbg(codec->dev,
- "%s(): w->name %s event %d w->shift %d\n",
- __func__, w->name, event, w->shift);
-
- dai = &tomtom_p->dai[w->shift];
- return __tomtom_codec_enable_slimtx(codec, event, dai);
-}
-
-static int tomtom_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_EAR,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
-
- usleep_range(5000, 5100);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_EAR,
- WCD9XXX_CLSH_REQ_ENABLE,
- WCD9XXX_CLSH_EVENT_PRE_DAC);
- break;
- case SND_SOC_DAPM_POST_PMD:
- wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d,
- WCD9XXX_CLSH_STATE_EAR,
- WCD9XXX_CLSH_REQ_DISABLE,
- WCD9XXX_CLSH_EVENT_POST_PA);
- usleep_range(5000, 5100);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_set_iir_gain(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- pr_debug("%s: event = %d\n", __func__, event);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU: /* fall through */
- case SND_SOC_DAPM_PRE_PMD:
- if (strnstr(w->name, "IIR1", sizeof("IIR1"))) {
- snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR1_GAIN_B1_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR1_GAIN_B2_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR1_GAIN_B3_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR1_GAIN_B4_CTL));
- } else {
- snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR2_GAIN_B1_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR2_GAIN_B2_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR2_GAIN_B3_CTL));
- snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL,
- snd_soc_read(codec,
- TOMTOM_A_CDC_IIR2_GAIN_B4_CTL));
- }
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_dsm_mux_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- u8 reg_val, zoh_mux_val = 0x00;
-
- pr_debug("%s: event = %d\n", __func__, event);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- reg_val = snd_soc_read(codec, TOMTOM_A_CDC_CONN_CLSH_CTL);
-
- if ((reg_val & 0x30) == 0x10)
- zoh_mux_val = 0x04;
- else if ((reg_val & 0x30) == 0x20)
- zoh_mux_val = 0x08;
-
- if (zoh_mux_val != 0x00)
- snd_soc_update_bits(codec,
- TOMTOM_A_CDC_CONN_CLSH_CTL,
- 0x0C, zoh_mux_val);
- break;
-
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(codec, TOMTOM_A_CDC_CONN_CLSH_CTL,
- 0x0C, 0x00);
- break;
- }
- return 0;
-}
-
-static int tomtom_codec_enable_anc_ear(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- int ret = 0;
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- ret = tomtom_codec_enable_anc(w, kcontrol, event);
- msleep(50);
- snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x10);
- break;
- case SND_SOC_DAPM_POST_PMU:
- ret = tomtom_codec_enable_ear_pa(w, kcontrol, event);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x00);
- msleep(40);
- ret |= tomtom_codec_enable_anc(w, kcontrol, event);
- break;
- case SND_SOC_DAPM_POST_PMD:
- ret = tomtom_codec_enable_ear_pa(w, kcontrol, event);
- break;
- }
- return ret;
-}
-
-/* Todo: Have separate dapm widgets for I2S and Slimbus.
- * Might Need to have callbacks registered only for slimbus
- */
-static const struct snd_soc_dapm_widget tomtom_dapm_widgets[] = {
- /*RX stuff */
- SND_SOC_DAPM_OUTPUT("EAR"),
-
- SND_SOC_DAPM_PGA_E("EAR PA", TOMTOM_A_RX_EAR_EN, 4, 0, NULL, 0,
- tomtom_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU),
-
- SND_SOC_DAPM_MIXER_E("DAC1", TOMTOM_A_RX_EAR_EN, 6, 0, dac1_switch,
- ARRAY_SIZE(dac1_switch), tomtom_codec_ear_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM,
- AIF1_PB, 0, tomtom_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM,
- AIF2_PB, 0, tomtom_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM,
- AIF3_PB, 0, tomtom_codec_enable_slimrx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, TOMTOM_RX1, 0,
- &slim_rx_mux[TOMTOM_RX1]),
- SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, TOMTOM_RX2, 0,
- &slim_rx_mux[TOMTOM_RX2]),
- SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, TOMTOM_RX3, 0,
- &slim_rx_mux[TOMTOM_RX3]),
- SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, TOMTOM_RX4, 0,
- &slim_rx_mux[TOMTOM_RX4]),
- SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, TOMTOM_RX5, 0,
- &slim_rx_mux[TOMTOM_RX5]),
- SND_SOC_DAPM_MUX("SLIM RX6 MUX", SND_SOC_NOPM, TOMTOM_RX6, 0,
- &slim_rx_mux[TOMTOM_RX6]),
- SND_SOC_DAPM_MUX("SLIM RX7 MUX", SND_SOC_NOPM, TOMTOM_RX7, 0,
- &slim_rx_mux[TOMTOM_RX7]),
- SND_SOC_DAPM_MUX("SLIM RX8 MUX", SND_SOC_NOPM, TOMTOM_RX8, 0,
- &slim_rx_mux[TOMTOM_RX8]),
-
- SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("SLIM RX8", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- /* Headphone */
- SND_SOC_DAPM_OUTPUT("HEADPHONE"),
- SND_SOC_DAPM_PGA_E("HPHL", TOMTOM_A_RX_HPH_CNP_EN, 5, 0, NULL, 0,
- tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MIXER_E("HPHL DAC", TOMTOM_A_RX_HPH_L_DAC_CTL, 7, 0,
- hphl_switch, ARRAY_SIZE(hphl_switch), tomtom_hphl_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_PGA_E("HPHR", TOMTOM_A_RX_HPH_CNP_EN, 4, 0, NULL, 0,
- tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_DAC_E("HPHR DAC", NULL, TOMTOM_A_RX_HPH_R_DAC_CTL, 7, 0,
- tomtom_hphr_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- /* Speaker */
- SND_SOC_DAPM_OUTPUT("LINEOUT1"),
- SND_SOC_DAPM_OUTPUT("LINEOUT2"),
- SND_SOC_DAPM_OUTPUT("LINEOUT3"),
- SND_SOC_DAPM_OUTPUT("LINEOUT4"),
- SND_SOC_DAPM_OUTPUT("SPK_OUT"),
-
- SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TOMTOM_A_RX_LINE_CNP_EN, 0, 0, NULL,
- 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("LINEOUT2 PA", TOMTOM_A_RX_LINE_CNP_EN, 1, 0, NULL,
- 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("LINEOUT3 PA", TOMTOM_A_RX_LINE_CNP_EN, 2, 0, NULL,
- 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("LINEOUT4 PA", TOMTOM_A_RX_LINE_CNP_EN, 3, 0, NULL,
- 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0, NULL,
- 0, tomtom_codec_enable_spk_pa,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("SPK2 PA", SND_SOC_NOPM, 0, 0, NULL,
- 0, tomtom_codec_enable_spk_pa,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TOMTOM_A_RX_LINE_1_DAC_CTL, 7,
- 0, tomtom_lineout_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("LINEOUT2 DAC", NULL, TOMTOM_A_RX_LINE_2_DAC_CTL, 7,
- 0, tomtom_lineout_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("LINEOUT3 DAC", NULL, TOMTOM_A_RX_LINE_3_DAC_CTL, 7,
- 0, tomtom_lineout_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SWITCH("LINEOUT3 DAC GROUND", SND_SOC_NOPM, 0, 0,
- &lineout3_ground_switch),
- SND_SOC_DAPM_DAC_E("LINEOUT4 DAC", NULL, TOMTOM_A_RX_LINE_4_DAC_CTL, 7,
- 0, tomtom_lineout_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SWITCH("LINEOUT4 DAC GROUND", SND_SOC_NOPM, 0, 0,
- &lineout4_ground_switch),
-
- SND_SOC_DAPM_DAC_E("SPK DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 0, 0,
- tomtom_spk_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("SPK2 DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 1, 0,
- tomtom_spk_dac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_vdd_spkr,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("VDD_SPKDRV2", SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_vdd_spkr2,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX7 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- SND_SOC_DAPM_MIXER("RX1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0),
-
- SND_SOC_DAPM_MIXER_E("RX3 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MIXER_E("RX4 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 3, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MIXER_E("RX5 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 4, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MIXER_E("RX6 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 5, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MIXER_E("RX7 MIX2", TOMTOM_A_CDC_CLK_RX_B1_CTL, 6, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MIXER_E("RX8 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 7, 0, NULL,
- 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU),
-
- SND_SOC_DAPM_MUX_E("RX1 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 0,
- &rx1_interp_mux, tomtom_codec_enable_interpolator,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_MUX_E("RX2 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 0,
- &rx2_interp_mux, tomtom_codec_enable_interpolator,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-
-
- SND_SOC_DAPM_MIXER("RX1 CHAIN", TOMTOM_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("RX2 CHAIN", TOMTOM_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0),
-
- SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
- &rx_mix1_inp3_mux),
- SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx2_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx2_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx3_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx3_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx4_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx4_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx5_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx5_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX6 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx6_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX6 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx6_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX7 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx7_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX7 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx7_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX8 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx8_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX8 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx8_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
- &rx1_mix2_inp1_mux),
- SND_SOC_DAPM_MUX("RX1 MIX2 INP2", SND_SOC_NOPM, 0, 0,
- &rx1_mix2_inp2_mux),
- SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
- &rx2_mix2_inp1_mux),
- SND_SOC_DAPM_MUX("RX2 MIX2 INP2", SND_SOC_NOPM, 0, 0,
- &rx2_mix2_inp2_mux),
- SND_SOC_DAPM_MUX("RX7 MIX2 INP1", SND_SOC_NOPM, 0, 0,
- &rx7_mix2_inp1_mux),
- SND_SOC_DAPM_MUX("RX7 MIX2 INP2", SND_SOC_NOPM, 0, 0,
- &rx7_mix2_inp2_mux),
-
- SND_SOC_DAPM_MUX("RDAC5 MUX", SND_SOC_NOPM, 0, 0,
- &rx_dac5_mux),
- SND_SOC_DAPM_MUX("RDAC7 MUX", SND_SOC_NOPM, 0, 0,
- &rx_dac7_mux),
-
- SND_SOC_DAPM_MUX("MAD_SEL MUX", SND_SOC_NOPM, 0, 0,
- &mad_sel_mux),
-
- SND_SOC_DAPM_MUX_E("CLASS_H_DSM MUX", SND_SOC_NOPM, 0, 0,
- &class_h_dsm_mux, tomtom_codec_dsm_mux_event,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("CDC_I2S_RX_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 5, 0,
- NULL, 0),
-
- /* TX */
-
- SND_SOC_DAPM_SUPPLY("CDC_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 2, 0, NULL,
- 0),
-
- SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_ldo_h,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- /*
- * DAPM 'LDO_H Standalone' is to be powered by mbhc driver after
- * acquring codec_resource lock.
- * So call __tomtom_codec_enable_ldo_h instead and avoid deadlock.
- */
- SND_SOC_DAPM_SUPPLY("LDO_H Standalone", SND_SOC_NOPM, 7, 0,
- __tomtom_codec_enable_ldo_h,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0,
- tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_PRE_PMD),
- SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0,
- tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_PRE_PMD),
- SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0,
- tomtom_config_compander, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_PRE_PMD),
-
-
- SND_SOC_DAPM_INPUT("AMIC1"),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_INPUT("AMIC3"),
-
- SND_SOC_DAPM_INPUT("AMIC4"),
-
- SND_SOC_DAPM_INPUT("AMIC5"),
-
- SND_SOC_DAPM_INPUT("AMIC6"),
-
- SND_SOC_DAPM_MUX_E("DEC1 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
- &dec1_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC2 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
- &dec2_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC3 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0,
- &dec3_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC4 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0,
- &dec4_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC5 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 4, 0,
- &dec5_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC6 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 5, 0,
- &dec6_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC7 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 6, 0,
- &dec7_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC8 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 7, 0,
- &dec8_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC9 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0, 0,
- &dec9_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX_E("DEC10 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 1, 0,
- &dec10_mux, tomtom_codec_enable_dec,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux),
- SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux),
-
- SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"),
- SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0,
- tomtom_codec_enable_anc_hph,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0,
- tomtom_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
- SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_OUTPUT("ANC EAR"),
- SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
- tomtom_codec_enable_anc_ear,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux),
-
- SND_SOC_DAPM_INPUT("AMIC2"),
- SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_EXTERNAL_STANDALONE, SND_SOC_NOPM,
- 7, 0, tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 7, 0,
- tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_MICBIAS_E("MIC BIAS4 External", SND_SOC_NOPM, 7,
- 0, tomtom_codec_enable_micbias,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM,
- AIF1_CAP, 0, tomtom_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM,
- AIF2_CAP, 0, tomtom_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM,
- AIF3_CAP, 0, tomtom_codec_enable_slimtx,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM,
- AIF4_VIFEED, 0, tomtom_codec_enable_slimvi_feedback,
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_AIF_OUT_E("AIF4 MAD", "AIF4 MAD TX", 0,
- SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_mad,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SWITCH("MADONOFF", SND_SOC_NOPM, 0, 0,
- &aif4_mad_switch),
- SND_SOC_DAPM_INPUT("MADINPUT"),
- SND_SOC_DAPM_INPUT("MAD_CPE_INPUT"),
-
- SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0,
- aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)),
-
- SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0,
- aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)),
-
- SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0,
- aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)),
-
- SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, TOMTOM_TX1, 0,
- &sb_tx1_mux),
- SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, TOMTOM_TX2, 0,
- &sb_tx2_mux),
- SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, TOMTOM_TX3, 0,
- &sb_tx3_mux),
- SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, TOMTOM_TX4, 0,
- &sb_tx4_mux),
- SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, TOMTOM_TX5, 0,
- &sb_tx5_mux),
- SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, TOMTOM_TX6, 0,
- &sb_tx6_mux),
- SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, TOMTOM_TX7, 0,
- &sb_tx7_mux),
- SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, TOMTOM_TX8, 0,
- &sb_tx8_mux),
- SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, TOMTOM_TX9, 0,
- &sb_tx9_mux),
- SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, TOMTOM_TX10, 0,
- &sb_tx10_mux),
-
- /* Digital Mic Inputs */
- SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("DMIC6", NULL, SND_SOC_NOPM, 0, 0,
- tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- /* Sidetone */
- SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
-
- SND_SOC_DAPM_MUX("IIR1 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp2_mux),
-
- SND_SOC_DAPM_MUX("IIR1 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp3_mux),
-
- SND_SOC_DAPM_MUX("IIR1 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp4_mux),
-
- SND_SOC_DAPM_MIXER_E("IIR1", TOMTOM_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0,
- tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
-
- SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
-
- SND_SOC_DAPM_MUX("IIR2 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp2_mux),
-
- SND_SOC_DAPM_MUX("IIR2 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp3_mux),
-
- SND_SOC_DAPM_MUX("IIR2 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp4_mux),
-
- SND_SOC_DAPM_MIXER_E("IIR2", TOMTOM_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0,
- tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
-
- /* AUX PGA */
- SND_SOC_DAPM_ADC_E("AUX_PGA_Left", NULL, TOMTOM_A_RX_AUX_SW_CTL, 7, 0,
- tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_ADC_E("AUX_PGA_Right", NULL, TOMTOM_A_RX_AUX_SW_CTL, 6, 0,
- tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD),
-
- /* Lineout, ear and HPH PA Mixers */
-
- SND_SOC_DAPM_MIXER("EAR_PA_MIXER", SND_SOC_NOPM, 0, 0,
- ear_pa_mix, ARRAY_SIZE(ear_pa_mix)),
-
- SND_SOC_DAPM_MIXER("HPHL_PA_MIXER", SND_SOC_NOPM, 0, 0,
- hphl_pa_mix, ARRAY_SIZE(hphl_pa_mix)),
-
- SND_SOC_DAPM_MIXER("HPHR_PA_MIXER", SND_SOC_NOPM, 0, 0,
- hphr_pa_mix, ARRAY_SIZE(hphr_pa_mix)),
-
- SND_SOC_DAPM_MIXER("LINEOUT1_PA_MIXER", SND_SOC_NOPM, 0, 0,
- lineout1_pa_mix, ARRAY_SIZE(lineout1_pa_mix)),
-
- SND_SOC_DAPM_MIXER("LINEOUT2_PA_MIXER", SND_SOC_NOPM, 0, 0,
- lineout2_pa_mix, ARRAY_SIZE(lineout2_pa_mix)),
-
- SND_SOC_DAPM_MIXER("LINEOUT3_PA_MIXER", SND_SOC_NOPM, 0, 0,
- lineout3_pa_mix, ARRAY_SIZE(lineout3_pa_mix)),
-
- SND_SOC_DAPM_MIXER("LINEOUT4_PA_MIXER", SND_SOC_NOPM, 0, 0,
- lineout4_pa_mix, ARRAY_SIZE(lineout4_pa_mix)),
-
- SND_SOC_DAPM_SWITCH("VIONOFF", SND_SOC_NOPM, 0, 0,
- &aif4_vi_switch),
-
- SND_SOC_DAPM_INPUT("VIINPUT"),
-};
-
-static irqreturn_t tomtom_slimbus_irq(int irq, void *data)
-{
- struct tomtom_priv *priv = data;
- struct snd_soc_codec *codec = priv->codec;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
- unsigned long status = 0;
- int i, j, port_id, k;
- u32 bit;
- u8 val, int_val = 0;
- bool tx, cleared;
- unsigned short reg = 0;
-
- for (i = TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0;
- i <= TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) {
- val = wcd9xxx_interface_reg_read(wcd9xxx, i);
- status |= ((u32)val << (8 * j));
- }
-
- for_each_set_bit(j, &status, 32) {
- tx = (j >= 16 ? true : false);
- port_id = (tx ? j - 16 : j);
- val = wcd9xxx_interface_reg_read(wcd9xxx,
- TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0 + j);
- if (val) {
- if (!tx)
- reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 +
- (port_id / 8);
- else
- reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 +
- (port_id / 8);
- int_val = wcd9xxx_interface_reg_read(
- wcd9xxx, reg);
- /*
- * Ignore interrupts for ports for which the
- * interrupts are not specifically enabled.
- */
- if (!(int_val & (1 << (port_id % 8))))
- continue;
- }
- if (val & TOMTOM_SLIM_IRQ_OVERFLOW)
- pr_err_ratelimited(
- "%s: overflow error on %s port %d, value %x\n",
- __func__, (tx ? "TX" : "RX"), port_id, val);
- if (val & TOMTOM_SLIM_IRQ_UNDERFLOW)
- pr_err_ratelimited(
- "%s: underflow error on %s port %d, value %x\n",
- __func__, (tx ? "TX" : "RX"), port_id, val);
- if ((val & TOMTOM_SLIM_IRQ_OVERFLOW) ||
- (val & TOMTOM_SLIM_IRQ_UNDERFLOW)) {
- if (!tx)
- reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 +
- (port_id / 8);
- else
- reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 +
- (port_id / 8);
- int_val = wcd9xxx_interface_reg_read(wcd9xxx, reg);
- if (int_val & (1 << (port_id % 8))) {
- int_val = int_val ^ (1 << (port_id % 8));
- wcd9xxx_interface_reg_write(wcd9xxx, reg,
- int_val);
- }
- }
- if (val & TOMTOM_SLIM_IRQ_PORT_CLOSED) {
- /*
- * INT SOURCE register starts from RX to TX
- * but port number in the ch_mask is in opposite way
- */
- bit = (tx ? j - 16 : j + 16);
- pr_debug("%s: %s port %d closed value %x, bit %u\n",
- __func__, (tx ? "TX" : "RX"), port_id, val,
- bit);
- for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) {
- pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n",
- __func__, k, priv->dai[k].ch_mask);
- if (test_and_clear_bit(bit,
- &priv->dai[k].ch_mask)) {
- cleared = true;
- if (!priv->dai[k].ch_mask)
- wake_up(&priv->dai[k].dai_wait);
- /*
- * There are cases when multiple DAIs
- * might be using the same slimbus
- * channel. Hence don't break here.
- */
- }
- }
- WARN(!cleared,
- "Couldn't find slimbus %s port %d for closing\n",
- (tx ? "TX" : "RX"), port_id);
- }
- wcd9xxx_interface_reg_write(wcd9xxx,
- TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0 +
- (j / 8),
- 1 << (j % 8));
- }
-
- return IRQ_HANDLED;
-}
-
-static int tomtom_handle_pdata(struct tomtom_priv *tomtom)
-{
- struct snd_soc_codec *codec = tomtom->codec;
- struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata;
- int k1, k2, k3, dec, rc = 0;
- u8 leg_mode, txfe_bypass, txfe_buff, flag;
- u8 i = 0, j = 0;
- u8 val_txfe = 0, value = 0;
- u8 dmic_ctl_val, mad_dmic_ctl_val;
- u8 anc_ctl_value = 0;
- u32 def_dmic_rate;
- u16 tx_dmic_ctl_reg;
-
- if (!pdata) {
- pr_err("%s: NULL pdata\n", __func__);
- rc = -ENODEV;
- goto done;
- }
-
- leg_mode = pdata->amic_settings.legacy_mode;
- txfe_bypass = pdata->amic_settings.txfe_enable;
- txfe_buff = pdata->amic_settings.txfe_buff;
- flag = pdata->amic_settings.use_pdata;
-
- /* Make sure settings are correct */
- if ((pdata->micbias.ldoh_v > WCD9XXX_LDOH_3P0_V) ||
- (pdata->micbias.bias1_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
- (pdata->micbias.bias2_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
- (pdata->micbias.bias3_cfilt_sel > WCD9XXX_CFILT3_SEL) ||
- (pdata->micbias.bias4_cfilt_sel > WCD9XXX_CFILT3_SEL)) {
- rc = -EINVAL;
- goto done;
- }
- /* figure out k value */
- k1 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
- pdata->micbias.cfilt1_mv);
- k2 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
- pdata->micbias.cfilt2_mv);
- k3 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
- pdata->micbias.cfilt3_mv);
- if (k1 < 0 || k2 < 0 || k3 < 0) {
- rc = -EINVAL;
- goto done;
- }
- /* Set voltage level and always use LDO */
- snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1, 0x0C,
- (pdata->micbias.ldoh_v << 2));
-
- snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_1_VAL, 0xFC, (k1 << 2));
- snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_VAL, 0xFC, (k2 << 2));
- snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_3_VAL, 0xFC, (k3 << 2));
-
- snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x60,
- (pdata->micbias.bias1_cfilt_sel << 5));
- snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x60,
- (pdata->micbias.bias2_cfilt_sel << 5));
- snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x60,
- (pdata->micbias.bias3_cfilt_sel << 5));
- snd_soc_update_bits(codec, tomtom->resmgr.reg_addr->micb_4_ctl, 0x60,
- (pdata->micbias.bias4_cfilt_sel << 5));
-
- for (i = 0; i < 6; j++, i += 2) {
- if (flag & (0x01 << i)) {
- val_txfe = (txfe_bypass & (0x01 << i)) ? 0x20 : 0x00;
- val_txfe = val_txfe |
- ((txfe_buff & (0x01 << i)) ? 0x10 : 0x00);
- snd_soc_update_bits(codec,
- TOMTOM_A_TX_1_2_TEST_EN + j * 10,
- 0x30, val_txfe);
- }
- if (flag & (0x01 << (i + 1))) {
- val_txfe = (txfe_bypass &
- (0x01 << (i + 1))) ? 0x02 : 0x00;
- val_txfe |= (txfe_buff &
- (0x01 << (i + 1))) ? 0x01 : 0x00;
- snd_soc_update_bits(codec,
- TOMTOM_A_TX_1_2_TEST_EN + j * 10,
- 0x03, val_txfe);
- }
- }
- if (flag & 0x40) {
- value = (leg_mode & 0x40) ? 0x10 : 0x00;
- value = value | ((txfe_bypass & 0x40) ? 0x02 : 0x00);
- value = value | ((txfe_buff & 0x40) ? 0x01 : 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_TX_7_MBHC_EN,
- 0x13, value);
- }
-
- if (pdata->ocp.use_pdata) {
- /* not defined in CODEC specification */
- if (pdata->ocp.hph_ocp_limit == 1 ||
- pdata->ocp.hph_ocp_limit == 5) {
- rc = -EINVAL;
- goto done;
- }
- snd_soc_update_bits(codec, TOMTOM_A_RX_COM_OCP_CTL,
- 0x0F, pdata->ocp.num_attempts);
- snd_soc_write(codec, TOMTOM_A_RX_COM_OCP_COUNT,
- ((pdata->ocp.run_time << 4) | pdata->ocp.wait_time));
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_OCP_CTL,
- 0xE0, (pdata->ocp.hph_ocp_limit << 5));
- }
-
- for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) {
- if (pdata->regulator[i].name &&
- !strcmp(pdata->regulator[i].name, "CDC_VDDA_RX")) {
- if (pdata->regulator[i].min_uV == 1800000 &&
- pdata->regulator[i].max_uV == 1800000) {
- snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL,
- 0x1C);
- } else if (pdata->regulator[i].min_uV == 2200000 &&
- pdata->regulator[i].max_uV == 2200000) {
- snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL,
- 0x1E);
- } else {
- pr_err("%s: unsupported CDC_VDDA_RX voltage\n"
- "min %d, max %d\n", __func__,
- pdata->regulator[i].min_uV,
- pdata->regulator[i].max_uV);
- rc = -EINVAL;
- }
- break;
- }
- }
-
- /* Set micbias capless mode with tail current */
- value = (pdata->micbias.bias1_cap_mode == MICBIAS_EXT_BYP_CAP ?
- 0x00 : 0x16);
- snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x1E, value);
- value = (pdata->micbias.bias2_cap_mode == MICBIAS_EXT_BYP_CAP ?
- 0x00 : 0x16);
- snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x1E, value);
- value = (pdata->micbias.bias3_cap_mode == MICBIAS_EXT_BYP_CAP ?
- 0x00 : 0x16);
- snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x1E, value);
- value = (pdata->micbias.bias4_cap_mode == MICBIAS_EXT_BYP_CAP ?
- 0x00 : 0x16);
- snd_soc_update_bits(codec, TOMTOM_A_MICB_4_CTL, 0x1E, value);
-
- /* Set the DMIC sample rate */
- switch (pdata->mclk_rate) {
- case TOMTOM_MCLK_CLK_9P6MHZ:
- def_dmic_rate =
- WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
- break;
- case TOMTOM_MCLK_CLK_12P288MHZ:
- def_dmic_rate =
- WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ;
- break;
- default:
- /* should never happen */
- pr_err("%s: Invalid mclk_rate %d\n",
- __func__, pdata->mclk_rate);
- rc = -EINVAL;
- goto done;
- }
-
- if (pdata->dmic_sample_rate ==
- WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
- pr_info("%s: dmic_rate invalid default = %d\n",
- __func__, def_dmic_rate);
- pdata->dmic_sample_rate = def_dmic_rate;
- }
-
- if (pdata->mad_dmic_sample_rate ==
- WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
- pr_info("%s: mad_dmic_rate invalid default = %d\n",
- __func__, def_dmic_rate);
- /*
- * use dmic_sample_rate as the default for MAD
- * if mad dmic sample rate is undefined
- */
- pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate;
- }
-
- /*
- * Default the DMIC clk rates to mad_dmic_sample_rate,
- * whereas, the anc/txfe dmic rates to dmic_sample_rate
- * since the anc/txfe are independent of mad block.
- */
- mad_dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec,
- pdata->mclk_rate,
- pdata->mad_dmic_sample_rate);
- snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL,
- 0xE0, mad_dmic_ctl_val << 5);
- snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL,
- 0x70, mad_dmic_ctl_val << 4);
- snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL,
- 0x0E, mad_dmic_ctl_val << 1);
-
- dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec,
- pdata->mclk_rate,
- pdata->dmic_sample_rate);
-
- if (dmic_ctl_val == WCD9330_DMIC_CLK_DIV_2)
- anc_ctl_value = WCD9XXX_ANC_DMIC_X2_ON;
- else
- anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF;
-
- for (dec = 0; dec < NUM_DECIMATORS; dec++) {
- tx_dmic_ctl_reg =
- TOMTOM_A_CDC_TX1_DMIC_CTL + (8 * dec);
- snd_soc_update_bits(codec, tx_dmic_ctl_reg,
- 0x07, dmic_ctl_val);
- }
- snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B2_CTL,
- 0x1, anc_ctl_value);
- snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B2_CTL,
- 0x1, anc_ctl_value);
-done:
- return rc;
-}
-
-static const struct wcd9xxx_reg_mask_val tomtom_reg_defaults[] = {
-
- /* set MCLk to 9.6 */
- TOMTOM_REG_VAL(TOMTOM_A_CHIP_CTL, 0x02),
-
- /* EAR PA deafults */
- TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CMBUFF, 0x05),
-
- /* RX deafults */
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B5_CTL, 0x79),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B5_CTL, 0x79),
-
- /* RX1 and RX2 defaults */
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B6_CTL, 0xA0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B6_CTL, 0xA0),
-
- /* RX3 to RX7 defaults */
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B6_CTL, 0x80),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B6_CTL, 0x80),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B6_CTL, 0x80),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B6_CTL, 0x80),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B6_CTL, 0x80),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B6_CTL, 0x80),
-
- /* MAD registers */
- TOMTOM_REG_VAL(TOMTOM_A_MAD_ANA_CTRL, 0xF1),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_1, 0x00),
- /* Set SAMPLE_TX_EN bit */
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_3, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_4, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_5, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_6, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_7, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_8, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL, 0x40),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_DEBUG_B7_CTL, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_CTL, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_INP_SEL, 0x01),
-
- /* Set HPH Path to low power mode */
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_BIAS_PA, 0x57),
-
- /* BUCK default */
- TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51),
- TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_1, 0x5B),
-};
-
-/*
- * Don't update TOMTOM_A_CHIP_CTL, TOMTOM_A_BUCK_CTRL_CCL_1 and
- * TOMTOM_A_RX_EAR_CMBUFF as those are updated in tomtom_reg_defaults
- */
-static const struct wcd9xxx_reg_mask_val tomtom_1_0_reg_defaults[] = {
- TOMTOM_REG_VAL(TOMTOM_A_TX_1_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_2_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_1_2_ADC_IB, 0x44),
- TOMTOM_REG_VAL(TOMTOM_A_TX_3_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_4_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_3_4_ADC_IB, 0x44),
- TOMTOM_REG_VAL(TOMTOM_A_TX_5_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_6_GAIN, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_TX_5_6_ADC_IB, 0x44),
- TOMTOM_REG_VAL(WCD9XXX_A_BUCK_MODE_3, 0xCE),
- TOMTOM_REG_VAL(WCD9XXX_A_BUCK_CTRL_VCL_1, 0x8),
- TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51),
- TOMTOM_REG_VAL(TOMTOM_A_NCP_DTEST, 0x10),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CHOP_CTL, 0xA4),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_OCP_CTL, 0x69),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDA),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x15),
- TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_BIAS_PA, 0x76),
- TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CNP, 0xC0),
- TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_BIAS_PA, 0x78),
- TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_1_TEST, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_2_TEST, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_3_TEST, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_4_TEST, 0x2),
- TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_OCP_CTL, 0x97),
- TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_CLIP_DET, 0x1),
- TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_IEC, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_OCP_CTL, 0x97),
- TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_CLIP_DET, 0x1),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX1_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX2_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX3_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX4_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX5_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX6_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX7_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX8_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX9_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX10_MUX_CTL, 0x4A),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B4_CTL, 0xB),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B1_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B2_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B3_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B4_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B4_CTL, 0x37),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f),
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_defaults[] = {
- TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x32),
- TOMTOM_REG_VAL(TOMTOM_A_RCO_CTRL, 0x10),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_L_TEST, 0x0A),
- TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_R_TEST, 0x0A),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0xC3),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_DATA0, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0xE0),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x03),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_JTCK_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDI_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_JTMS_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDO_MODE, 0x04),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_JTRST_MODE, 0x04),
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_i2c_defaults[] = {
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0x00),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0x0),
- TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x0),
-};
-
-static void tomtom_update_reg_defaults(struct snd_soc_codec *codec)
-{
- u32 i;
- struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- for (i = 0; i < ARRAY_SIZE(tomtom_reg_defaults); i++)
- snd_soc_write(codec, tomtom_reg_defaults[i].reg,
- tomtom_reg_defaults[i].val);
-
- for (i = 0; i < ARRAY_SIZE(tomtom_1_0_reg_defaults); i++)
- snd_soc_write(codec, tomtom_1_0_reg_defaults[i].reg,
- tomtom_1_0_reg_defaults[i].val);
-
- if (!TOMTOM_IS_1_0(tomtom_core->version)) {
- for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_defaults); i++)
- snd_soc_write(codec, tomtom_2_0_reg_defaults[i].reg,
- tomtom_2_0_reg_defaults[i].val);
-
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_i2c_defaults);
- i++)
- snd_soc_write(codec,
- tomtom_2_0_reg_i2c_defaults[i].reg,
- tomtom_2_0_reg_i2c_defaults[i].val);
- }
- }
-}
-
-static const struct wcd9xxx_reg_mask_val tomtom_codec_reg_init_val[] = {
- /* Initialize current threshold to 350MA
- * number of wait and run cycles to 4096
- */
- {TOMTOM_A_RX_HPH_OCP_CTL, 0xE1, 0x61},
- {TOMTOM_A_RX_COM_OCP_COUNT, 0xFF, 0xFF},
- {TOMTOM_A_RX_HPH_L_TEST, 0x01, 0x01},
- {TOMTOM_A_RX_HPH_R_TEST, 0x01, 0x01},
-
- /* Initialize gain registers to use register gain */
- {TOMTOM_A_RX_HPH_L_GAIN, 0x20, 0x20},
- {TOMTOM_A_RX_HPH_R_GAIN, 0x20, 0x20},
- {TOMTOM_A_RX_LINE_1_GAIN, 0x20, 0x20},
- {TOMTOM_A_RX_LINE_2_GAIN, 0x20, 0x20},
- {TOMTOM_A_RX_LINE_3_GAIN, 0x20, 0x20},
- {TOMTOM_A_RX_LINE_4_GAIN, 0x20, 0x20},
- {TOMTOM_A_SPKR_DRV1_GAIN, 0x04, 0x04},
- {TOMTOM_A_SPKR_DRV2_GAIN, 0x04, 0x04},
-
- /* Use 16 bit sample size for TX1 to TX6 */
- {TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0x30, 0x20},
- {TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0x30, 0x20},
- {TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0x30, 0x20},
- {TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0x30, 0x20},
- {TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0x30, 0x20},
- {TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0x30, 0x20},
-
- /* Use 16 bit sample size for TX7 to TX10 */
- {TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0x60, 0x40},
- {TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0x60, 0x40},
- {TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0x60, 0x40},
- {TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0x60, 0x40},
-
- /*enable HPF filter for TX paths */
- {TOMTOM_A_CDC_TX1_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX2_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX3_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX4_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX5_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX6_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX7_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX8_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX9_MUX_CTL, 0x8, 0x0},
- {TOMTOM_A_CDC_TX10_MUX_CTL, 0x8, 0x0},
-
- /* Compander zone selection */
- {TOMTOM_A_CDC_COMP0_B4_CTL, 0x3F, 0x37},
- {TOMTOM_A_CDC_COMP1_B4_CTL, 0x3F, 0x37},
- {TOMTOM_A_CDC_COMP2_B4_CTL, 0x3F, 0x37},
- {TOMTOM_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F},
- {TOMTOM_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F},
- {TOMTOM_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F},
-
- /*
- * Setup wavegen timer to 20msec and disable chopper
- * as default. This corresponds to Compander OFF
- */
- {TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB},
- {TOMTOM_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58},
- {TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A},
- {TOMTOM_A_RX_HPH_CHOP_CTL, 0xFF, 0x24},
-
- /* Choose max non-overlap time for NCP */
- {TOMTOM_A_NCP_CLK, 0xFF, 0xFC},
-
- /* Program the 0.85 volt VBG_REFERENCE */
- {TOMTOM_A_BIAS_CURR_CTL_2, 0xFF, 0x04},
-
- /* set MAD input MIC to DMIC1 */
- {TOMTOM_A_CDC_MAD_INP_SEL, 0x0F, 0x08},
-
- {TOMTOM_A_INTR_MODE, 0x04, 0x04},
-};
-
-static const struct wcd9xxx_reg_mask_val tomtom_codec_2_0_reg_init_val[] = {
- {TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00},
- {TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD, 0xFF, 0x00},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD, 0xFF, 0x00},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING, 0x01, 0x01},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING, 0x01, 0x01},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL, 0x01, 0x00},
- {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL, 0x01, 0x00},
-};
-
-static void tomtom_codec_init_reg(struct snd_soc_codec *codec)
-{
- u32 i;
- struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
-
- for (i = 0; i < ARRAY_SIZE(tomtom_codec_reg_init_val); i++)
- snd_soc_update_bits(codec, tomtom_codec_reg_init_val[i].reg,
- tomtom_codec_reg_init_val[i].mask,
- tomtom_codec_reg_init_val[i].val);
-
- if (!TOMTOM_IS_1_0(tomtom_core->version)) {
- for (i = 0; i < ARRAY_SIZE(tomtom_codec_2_0_reg_init_val); i++)
- snd_soc_update_bits(codec,
- tomtom_codec_2_0_reg_init_val[i].reg,
- tomtom_codec_2_0_reg_init_val[i].mask,
- tomtom_codec_2_0_reg_init_val[i].val);
- }
-
-}
-
-static void tomtom_slim_interface_init_reg(struct snd_soc_codec *codec)
-{
- int i;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
-
- for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
- wcd9xxx_interface_reg_write(wcd9xxx,
- TOMTOM_SLIM_PGD_PORT_INT_EN0 + i,
- 0xFF);
-}
-
-static int tomtom_setup_irqs(struct tomtom_priv *tomtom)
-{
- int ret = 0;
- struct snd_soc_codec *codec = tomtom->codec;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
- struct wcd9xxx_core_resource *core_res =
- &wcd9xxx->core_res;
-
- ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS,
- tomtom_slimbus_irq, "SLIMBUS Slave", tomtom);
- if (ret)
- pr_err("%s: Failed to request irq %d\n", __func__,
- WCD9XXX_IRQ_SLIMBUS);
- else
- tomtom_slim_interface_init_reg(codec);
-
- return ret;
-}
-
-static void tomtom_cleanup_irqs(struct tomtom_priv *tomtom)
-{
- struct snd_soc_codec *codec = tomtom->codec;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
- struct wcd9xxx_core_resource *core_res =
- &wcd9xxx->core_res;
-
- wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tomtom);
-}
-
-static
-struct firmware_cal *tomtom_get_hwdep_fw_cal(struct snd_soc_codec *codec,
- enum wcd_cal_type type)
-{
- struct tomtom_priv *tomtom;
- struct firmware_cal *hwdep_cal;
-
- if (!codec) {
- pr_err("%s: NULL codec pointer\n", __func__);
- return NULL;
- }
- tomtom = snd_soc_codec_get_drvdata(codec);
- hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, type);
- if (!hwdep_cal) {
- dev_err(codec->dev, "%s: cal not sent by %d\n",
- __func__, type);
- return NULL;
- } else {
- return hwdep_cal;
- }
-}
-
-int tomtom_hs_detect(struct snd_soc_codec *codec,
- struct wcd9xxx_mbhc_config *mbhc_cfg)
-{
- int rc;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- if (mbhc_cfg->insert_detect) {
- rc = wcd9xxx_mbhc_start(&tomtom->mbhc, mbhc_cfg);
- if (!rc)
- tomtom->mbhc_started = true;
- } else {
- /* MBHC is disabled, so disable Auto pulldown */
- snd_soc_update_bits(codec, TOMTOM_A_MBHC_INSERT_DETECT2, 0xC0,
- 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_CTL, 0x01,
- 0x00);
- tomtom->mbhc.mbhc_cfg = NULL;
- rc = 0;
- }
- return rc;
-}
-EXPORT_SYMBOL(tomtom_hs_detect);
-
-void tomtom_hs_detect_exit(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- wcd9xxx_mbhc_stop(&tomtom->mbhc);
- tomtom->mbhc_started = false;
-}
-EXPORT_SYMBOL(tomtom_hs_detect_exit);
-
-void tomtom_event_register(
- int (*machine_event_cb)(struct snd_soc_codec *codec,
- enum wcd9xxx_codec_event),
- struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- tomtom->machine_codec_event_cb = machine_event_cb;
-}
-EXPORT_SYMBOL(tomtom_event_register);
-
-void tomtom_register_ext_clk_cb(
- int (*codec_ext_clk_en)(struct snd_soc_codec *codec,
- int enable, bool dapm),
- int (*get_ext_clk_cnt)(void),
- struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
- tomtom->codec_ext_clk_en_cb = codec_ext_clk_en;
- tomtom->codec_get_ext_clk_cnt = get_ext_clk_cnt;
-}
-EXPORT_SYMBOL(tomtom_register_ext_clk_cb);
-
-static void tomtom_init_slim_slave_cfg(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct afe_param_cdc_slimbus_slave_cfg *cfg;
- struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
- uint64_t eaddr = 0;
-
- cfg = &priv->slimbus_slave_cfg;
- cfg->minor_version = 1;
- cfg->tx_slave_port_offset = 0;
- cfg->rx_slave_port_offset = 16;
-
- memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr));
- WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6);
- cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF;
- cfg->device_enum_addr_msw = eaddr >> 32;
-
- pr_debug("%s: slimbus logical address 0x%llx\n", __func__, eaddr);
-}
-
-static int tomtom_device_down(struct wcd9xxx *wcd9xxx)
-{
- int count;
- struct snd_soc_codec *codec;
- struct tomtom_priv *priv;
-
- codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
- priv = snd_soc_codec_get_drvdata(codec);
- wcd_cpe_ssr_event(priv->cpe_core, WCD_CPE_BUS_DOWN_EVENT);
- snd_soc_card_change_online_state(codec->component.card, 0);
- set_bit(BUS_DOWN, &priv->status_mask);
-
- for (count = 0; count < NUM_CODEC_DAIS; count++)
- priv->dai[count].bus_down_in_recovery = true;
- return 0;
-}
-
-static int wcd9xxx_prepare_static_pa(struct wcd9xxx_mbhc *mbhc,
- struct list_head *lh)
-{
- int i;
- struct snd_soc_codec *codec = mbhc->codec;
- u32 delay;
-
- const struct wcd9xxx_reg_mask_val reg_set_paon[] = {
- {TOMTOM_A_TX_COM_BIAS, 0xff, 0xF0},
- {WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x81},
- {WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x01, 0x01},
- {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEF},
- {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEE},
- {TOMTOM_A_NCP_DTEST, 0xff, 0x20},
- {WCD9XXX_A_CDC_CLK_OTHR_CTL, 0xff, 0x21},
- {WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x81},
- {WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x02, 0x02},
-
- {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAE},
- {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAA},
- {WCD9XXX_A_NCP_CLK, 0xff, 0x9C},
- {WCD9XXX_A_NCP_CLK, 0xff, 0xFC},
- {WCD9XXX_A_RX_COM_BIAS, 0xff, 0xA0},
- {WCD9XXX_A_BUCK_MODE_3, 0xff, 0xC6},
- {WCD9XXX_A_BUCK_MODE_4, 0xff, 0xE6},
- {WCD9XXX_A_BUCK_MODE_5, 0xff, 0x02},
- {WCD9XXX_A_BUCK_MODE_1, 0xff, 0xA1},
- /* Add a delay of 1ms after this reg write */
-
- {WCD9XXX_A_NCP_STATIC, 0xff, 0x28},
- {WCD9XXX_A_NCP_EN, 0xff, 0xFF},
- /* Add a delay of 1ms after this reg write */
-
- /* set HPHL */
- {WCD9XXX_A_RX_HPH_L_TEST, 0xff, 0x00},
- {TOMTOM_A_RX_HPH_L_PA_CTL, 0xff, 0x42},
- {TOMTOM_A_RX_HPH_BIAS_LDO, 0xff, 0x8C},
- {TOMTOM_A_RX_HPH_CHOP_CTL, 0xff, 0xA4},
- {WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xE0},
- {WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xEC},
-
- /* set HPHR */
- {WCD9XXX_A_RX_HPH_R_TEST, 0xff, 0x00},
- {TOMTOM_A_RX_HPH_R_PA_CTL, 0xff, 0x42},
- {WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x20},
- {WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x2C},
-
- /* set HPH PAs */
- {WCD9XXX_A_RX_HPH_BIAS_WG_OCP, 0xff, 0x2A},
- {WCD9XXX_A_RX_HPH_CNP_WG_CTL, 0xff, 0xDA},
- {WCD9XXX_A_RX_HPH_CNP_WG_TIME, 0xff, 0x15},
- {WCD9XXX_A_CDC_CLSH_B1_CTL, 0xff, 0xE6},
- {WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0x40},
- {WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0xC0},
- {WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0x40},
- {WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0xC0},
-
- {TOMTOM_A_RX_HPH_L_ATEST, 0xff, 0x00},
- {TOMTOM_A_RX_HPH_R_ATEST, 0xff, 0x00},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set_paon); i++) {
- /*
- * Some of the codec registers like BUCK_MODE_1
- * and NCP_EN requires 1ms wait time for them
- * to take effect. Other register writes for
- * PA configuration do not require any wait time.
- */
- if (reg_set_paon[i].reg == WCD9XXX_A_BUCK_MODE_1 ||
- reg_set_paon[i].reg == WCD9XXX_A_NCP_EN)
- delay = 1000;
- else
- delay = 0;
- wcd9xxx_soc_update_bits_push(codec, lh,
- reg_set_paon[i].reg,
- reg_set_paon[i].mask,
- reg_set_paon[i].val, delay);
- }
- pr_debug("%s: PAs are prepared\n", __func__);
-
- return 0;
-}
-
-static int wcd9xxx_enable_static_pa(struct wcd9xxx_mbhc *mbhc, bool enable,
- u8 hph_pa)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- const int wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME) *
- TOMTOM_WG_TIME_FACTOR_US;
- u8 mask = (hph_pa << 4);
- u8 pa_en = enable ? mask : ~mask;
-
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, mask, pa_en);
- /* Wait for wave gen time to avoid pop noise */
- usleep_range(wg_time, wg_time + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- pr_debug("%s: PAs are %s as static mode (wg_time %d)\n", __func__,
- enable ? "enabled" : "disabled", wg_time);
- return 0;
-}
-
-static int tomtom_setup_zdet(struct wcd9xxx_mbhc *mbhc,
- enum mbhc_impedance_detect_stages stage)
-{
- int ret = 0;
- struct snd_soc_codec *codec = mbhc->codec;
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
-
-#define __wr(reg, mask, value) \
- do { \
- ret = wcd9xxx_soc_update_bits_push(codec, \
- &tomtom->reg_save_restore, \
- reg, mask, value, 0); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
- switch (stage) {
-
- case MBHC_ZDET_PRE_MEASURE:
- INIT_LIST_HEAD(&tomtom->reg_save_restore);
- wcd9xxx_prepare_static_pa(mbhc, &tomtom->reg_save_restore);
- /* Set HPH_MBHC for zdet */
- __wr(WCD9XXX_A_MBHC_HPH, 0xff, 0xC4);
- usleep_range(10, 10 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_L_R);
-
- /* save old value of registers and write the new value */
- __wr(WCD9XXX_A_RX_HPH_OCP_CTL, 0xff, 0x69);
- __wr(WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x80);
- __wr(WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x80);
- /* Enable MBHC MUX, Set MUX current to 37.5uA and ADC7 */
- __wr(WCD9XXX_A_MBHC_SCALING_MUX_1, 0xff, 0xC0);
- __wr(WCD9XXX_A_MBHC_SCALING_MUX_2, 0xff, 0xF0);
- __wr(TOMTOM_A_TX_7_TXFE_CLKDIV, 0xff, 0x8B);
- __wr(WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0xff, 0x78);
- __wr(WCD9XXX_A_TX_7_MBHC_EN, 0xff, 0x8C);
- __wr(WCD9XXX_A_CDC_MBHC_B1_CTL, 0xff, 0xDC);
- /* Reset MBHC and set it up for STA */
- __wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x00);
- __wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x02);
- __wr(WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL, 0xff, 0x80);
- __wr(WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0xff, 0x25);
- /* Wait for ~50us to let MBHC hardware settle down */
- usleep_range(50, 50 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- break;
- case MBHC_ZDET_POST_MEASURE:
- /* 0x69 for 105 number of samples for PA RAMP */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
- /* Program the PA Ramp to FS_16K, L shift 1 */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
- 0x1 << 4 | 0x6);
- /* Reset the PA Ramp */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1C);
- /*
- * Connect the PA Ramp to PA chain and release reset with
- * keep it connected.
- */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1F);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
-
- /* Start the PA ramp on HPH L and R */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x05);
- /* Ramp generator takes ~30ms */
- usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
- TOMTOM_HPH_PA_RAMP_DELAY +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- /*
- * Set the multiplication factor for zdet calculation
- * based on the Ramp voltage and Gain used
- */
- tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X;
- break;
- case MBHC_ZDET_GAIN_0:
- /* Set Gain at 1x */
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42);
- /* Allow 100us for gain registers to settle */
- usleep_range(100,
- 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- break;
- case MBHC_ZDET_GAIN_UPDATE_1X:
- /*
- * Set the multiplication factor for zdet calculation
- * based on the Gain value used
- */
- tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X;
- break;
- case MBHC_ZDET_GAIN_1:
- /* Set Gain at 10x */
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x10);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42);
- /* Allow 100us for gain registers to settle */
- usleep_range(100,
- 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- /*
- * Set the multiplication factor for zdet calculation
- * based on the Gain value used
- */
- tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_10X;
- break;
- case MBHC_ZDET_GAIN_2:
- /* Set Gain at 100x */
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x10);
- snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x43);
- /* Allow 100us for gain registers to settle */
- usleep_range(100,
- 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- /*
- * Set the multiplication factor for zdet calculation
- * based on the Gain value used
- */
- tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_100X;
- break;
- case MBHC_ZDET_RAMP_DISABLE:
- /* Ramp HPH L & R back to Zero */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
- /* 0x69 for 105 number of samples for PA RAMP */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
- /* Program the PA Ramp to FS_16K, L shift 1 */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
- 0x1 << 4 | 0x6);
- /* Reset the PA Ramp */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
- /*
- * Connect the PA Ramp to PA chain and release reset with
- * keep it connected.
- */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
- /* Start the PA ramp on HPH L and R */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x0A);
- /* Ramp generator takes ~30ms to settle down */
- usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
- TOMTOM_HPH_PA_RAMP_DELAY +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- break;
- case MBHC_ZDET_HPHR_RAMP_DISABLE:
- /* Ramp HPHR back to Zero */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
- 0x1 << 4 | 0x6);
- /* Reset the PA Ramp */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
- /*
- * Connect the PA Ramp to PA chain and release reset with
- * keep it connected.
- */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x08);
- /* Ramp generator takes ~30ms to settle down */
- usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
- TOMTOM_HPH_PA_RAMP_DELAY +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- break;
- case MBHC_ZDET_HPHL_RAMP_DISABLE:
- /* Ramp back to Zero */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL,
- 0x1 << 4 | 0x6);
- /* Reset the PA Ramp */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17);
- /*
- * Connect the PA Ramp to PA chain and release reset with
- * keep it connected.
- */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x02);
- /* Ramp generator takes ~30ms to settle down */
- usleep_range(TOMTOM_HPH_PA_RAMP_DELAY,
- TOMTOM_HPH_PA_RAMP_DELAY +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- break;
- case MBHC_ZDET_HPHR_PA_DISABLE:
- /* Disable PA */
- wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE, HPH_PA_R);
- break;
- case MBHC_ZDET_PA_DISABLE:
- /* Disable PA */
- if (!mbhc->hph_pa_dac_state &&
- (!(test_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state) ||
- test_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state))))
- wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE,
- HPH_PA_L_R);
- else if (!(snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN) & 0x10))
- wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_R);
-
- /* Turn off PA ramp generator */
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x00);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, 0x00);
- snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x00);
-
- /* Restore registers */
- wcd9xxx_restore_registers(codec, &tomtom->reg_save_restore);
- break;
- }
-#undef __wr
-
- return ret;
-}
-
-/* Calculate final impedance values for HPH left and right based on formulae */
-static void tomtom_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r,
- uint32_t *zl, uint32_t *zr)
-{
- s64 zln, zrn;
- int zld, zrd;
- s64 rl = 0, rr = 0;
- struct snd_soc_codec *codec;
- struct tomtom_priv *tomtom;
-
- if (!mbhc) {
- pr_err("%s: Invalid parameters mbhc = %pK\n",
- __func__, mbhc);
- return;
- }
- codec = mbhc->codec;
- tomtom = snd_soc_codec_get_drvdata(codec);
-
- if (l && zl) {
- zln = (s64) (l[1] - l[0]) * tomtom->zdet_gain_mul_fact;
- zld = (l[2] - l[0]);
- if (zld)
- rl = div_s64(zln, zld);
- else
- /* If L0 and L2 are same, Z has to be on Zone 3.
- * Assign a default value so that atleast the value
- * is read again with Ramp-up
- */
- rl = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL;
-
- /* 32-bit LSBs are enough to hold Impedance values */
- *zl = (u32) rl;
- }
- if (r && zr) {
- zrn = (s64) (r[1] - r[0]) * tomtom->zdet_gain_mul_fact;
- zrd = (r[2] - r[0]);
- if (zrd)
- rr = div_s64(zrn, zrd);
- else
- /* If R0 and R2 are same, Z has to be on Zone 3.
- * Assign a default value so that atleast the value
- * is read again with Ramp-up
- */
- rr = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL;
-
- /* 32-bit LSBs are enough to hold Impedance values */
- *zr = (u32) rr;
- }
-}
-
-/*
- * Calculate error approximation of impedance values for HPH left
- * and HPH right based on QFuse values
- */
-static void tomtom_zdet_error_approx(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr)
-{
- struct snd_soc_codec *codec;
- struct tomtom_priv *tomtom;
- s8 q1_t, q2_t;
- s8 q1_m, q2_m;
- s8 q1, q2;
- u8 div_shift;
- int rl_alpha = 0, rr_alpha = 0;
- int rl_beta = 0, rr_beta = 0;
- u64 rl = 0, rr = 0;
- const int mult_factor = TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR;
- const int shift = TOMTOM_ZDET_ERROR_APPROX_SHIFT;
-
- if (!zl || !zr || !mbhc) {
- pr_err("%s: Invalid parameters zl = %pK zr = %pK, mbhc = %pK\n",
- __func__, zl, zr, mbhc);
- return;
- }
- codec = mbhc->codec;
- tomtom = snd_soc_codec_get_drvdata(codec);
-
- if ((tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_1X) ||
- (tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_10X)) {
- q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT0) &
- 0x3) << 0x5);
- q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) &
- 0xF8) >> 0x3);
- q2_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) &
- 0x7) << 0x4);
- q2_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) &
- 0xF0) >> 0x4);
- /* Take out the numeric part of the Qfuse value */
- q1_m = q1_t & 0x3F;
- q2_m = q2_t & 0x3F;
- /* Check the sign part of the Qfuse and adjust value */
- q1 = (q1_t & 0x40) ? -q1_m : q1_m;
- q2 = (q2_t & 0x40) ? -q2_m : q2_m;
- div_shift = 1;
- } else {
- q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) &
- 0xF) << 0x2);
- q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) &
- 0xC0) >> 0x6);
- q2_t = (snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) & 0x3F);
- /* Take out the numeric part of the Qfuse value */
- q1_m = q1_t & 0x1F;
- q2_m = q2_t & 0x1F;
- /* Check the sign part of the Qfuse and adjust value */
- q1 = (q1_t & 0x20) ? -q1_m : q1_m;
- q2 = (q2_t & 0x20) ? -q2_m : q2_m;
- div_shift = 0;
- }
-
- dev_dbg(codec->dev, "%s: qfuse1 = %d, qfuse2 = %d\n",
- __func__, q1, q2);
- if (!q1 && !q2) {
- dev_dbg(codec->dev, "%s: qfuse1 and qfuse2 are 0. Exiting\n",
- __func__);
- return;
- }
-
- /*
- * Use multiplication and shift to avoid floating point math
- * The Z value is calculated with the below formulae using
- * the Qfuse value-
- * zl = zl * [1 - {(Q1 / div) / 100}] (Include sign for Q1)
- * zr = zr * [1 - {(Q2 / div) / 100}] (Include sign for Q2)
- * We multiply by 65536 and shift 16 times to get the approx result
- * div = 4 for 1x gain, div = 2 for 10x/100x gain
- */
- /* Q1/4 */
- rl_alpha = q1 >> div_shift;
- rl_alpha = 100 - rl_alpha;
- /* {rl_alpha/100} * 65536 */
- rl_beta = rl_alpha * mult_factor;
- rl = (u64) *zl * rl_beta;
- /* rl/65536 */
- rl = (u64) rl >> shift;
-
- rr_alpha = q2 >> div_shift;
- rr_alpha = 100 - rr_alpha;
- rr_beta = rr_alpha * mult_factor;
- rr = (u64) *zr * rr_beta;
- rr = (u64) rr >> shift;
-
- dev_dbg(codec->dev, "%s: rl = 0x%llx (%lld) \t rr = 0x%llx (%lld)\n",
- __func__, rl, rl, rr, rr);
-
- *zl = (u32) rl;
- *zr = (u32) rr;
-}
-
-static enum wcd9xxx_cdc_type tomtom_get_cdc_type(void)
-{
- return WCD9XXX_CDC_TYPE_TOMTOM;
-}
-
-static bool tomtom_mbhc_ins_rem_status(struct snd_soc_codec *codec)
-{
- return !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DET_STATUS) &
- (1 << 4));
-}
-
-static void tomtom_mbhc_micb_pulldown_ctrl(struct wcd9xxx_mbhc *mbhc,
- bool enable)
-{
- struct snd_soc_codec *codec = mbhc->codec;
-
- if (!enable) {
- /* Remove automatic pulldown on micbias */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
- 0x01, 0x00);
- } else {
- /* Enable automatic pulldown on micbias */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
- 0x01, 0x01);
- }
-}
-
-static void tomtom_codec_hph_auto_pull_down(struct snd_soc_codec *codec,
- bool enable)
-{
- struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent);
-
- if (TOMTOM_IS_1_0(tomtom_core->version))
- return;
-
- dev_dbg(codec->dev, "%s: %s auto pull down\n", __func__,
- enable ? "enable" : "disable");
- if (enable) {
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x08);
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x08);
- } else {
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00);
- snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00);
- }
-}
-
-static const struct wcd9xxx_mbhc_cb mbhc_cb = {
- .get_cdc_type = tomtom_get_cdc_type,
- .setup_zdet = tomtom_setup_zdet,
- .compute_impedance = tomtom_compute_impedance,
- .zdet_error_approx = tomtom_zdet_error_approx,
- .insert_rem_status = tomtom_mbhc_ins_rem_status,
- .micbias_pulldown_ctrl = tomtom_mbhc_micb_pulldown_ctrl,
- .codec_rco_ctrl = tomtom_codec_internal_rco_ctrl,
- .hph_auto_pulldown_ctrl = tomtom_codec_hph_auto_pull_down,
- .get_hwdep_fw_cal = tomtom_get_hwdep_fw_cal,
-};
-
-static const struct wcd9xxx_mbhc_intr cdc_intr_ids = {
- .poll_plug_rem = WCD9XXX_IRQ_MBHC_REMOVAL,
- .shortavg_complete = WCD9XXX_IRQ_MBHC_SHORT_TERM,
- .potential_button_press = WCD9XXX_IRQ_MBHC_PRESS,
- .button_release = WCD9XXX_IRQ_MBHC_RELEASE,
- .dce_est_complete = WCD9XXX_IRQ_MBHC_POTENTIAL,
- .insertion = WCD9XXX_IRQ_MBHC_INSERTION,
- .hph_left_ocp = WCD9XXX_IRQ_HPH_PA_OCPL_FAULT,
- .hph_right_ocp = WCD9XXX_IRQ_HPH_PA_OCPR_FAULT,
- .hs_jack_switch = WCD9330_IRQ_MBHC_JACK_SWITCH,
-};
-
-static int tomtom_post_reset_cb(struct wcd9xxx *wcd9xxx)
-{
- int ret = 0;
- struct snd_soc_codec *codec;
- struct tomtom_priv *tomtom;
- int rco_clk_rate;
-
- codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
- tomtom = snd_soc_codec_get_drvdata(codec);
-
- snd_soc_card_change_online_state(codec->component.card, 1);
- clear_bit(BUS_DOWN, &tomtom->status_mask);
-
- mutex_lock(&tomtom->codec_mutex);
-
- tomtom_update_reg_defaults(codec);
- if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
- snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0);
- else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
- snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2);
- tomtom_codec_init_reg(codec);
-
- snd_soc_cache_sync(codec);
-
- ret = tomtom_handle_pdata(tomtom);
- if (ret < 0)
- pr_err("%s: bad pdata\n", __func__);
-
- tomtom_init_slim_slave_cfg(codec);
- tomtom_slim_interface_init_reg(codec);
- wcd_cpe_ssr_event(tomtom->cpe_core, WCD_CPE_BUS_UP_EVENT);
- wcd9xxx_resmgr_post_ssr(&tomtom->resmgr);
-
- if (tomtom->mbhc_started) {
- wcd9xxx_mbhc_deinit(&tomtom->mbhc);
- tomtom->mbhc_started = false;
-
- if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
- rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ;
- else
- rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ;
-
- ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec,
- tomtom_enable_mbhc_micbias,
- &mbhc_cb, &cdc_intr_ids,
- rco_clk_rate, TOMTOM_ZDET_SUPPORTED);
- if (ret)
- pr_err("%s: mbhc init failed %d\n", __func__, ret);
- else
- tomtom_hs_detect(codec, tomtom->mbhc.mbhc_cfg);
- }
-
- if (tomtom->machine_codec_event_cb)
- tomtom->machine_codec_event_cb(codec,
- WCD9XXX_CODEC_EVENT_CODEC_UP);
-
- tomtom_cleanup_irqs(tomtom);
- ret = tomtom_setup_irqs(tomtom);
- if (ret)
- pr_err("%s: Failed to setup irq: %d\n", __func__, ret);
-
- /*
- * After SSR, the qfuse sensing is lost.
- * Perform qfuse sensing again after SSR
- * handling is finished.
- */
- tomtom_enable_qfuse_sensing(codec);
- mutex_unlock(&tomtom->codec_mutex);
- return ret;
-}
-
-void *tomtom_get_afe_config(struct snd_soc_codec *codec,
- enum afe_config_type config_type)
-{
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- switch (config_type) {
- case AFE_SLIMBUS_SLAVE_CONFIG:
- return &priv->slimbus_slave_cfg;
- case AFE_CDC_REGISTERS_CONFIG:
- return &tomtom_audio_reg_cfg;
- case AFE_SLIMBUS_SLAVE_PORT_CONFIG:
- return &tomtom_slimbus_slave_port_cfg;
- case AFE_AANC_VERSION:
- return &tomtom_cdc_aanc_version;
- case AFE_CLIP_BANK_SEL:
- return &clip_bank_sel;
- case AFE_CDC_CLIP_REGISTERS_CONFIG:
- return &tomtom_clip_reg_cfg;
- default:
- pr_err("%s: Unknown config_type 0x%x\n", __func__, config_type);
- return NULL;
- }
-}
-
-static struct wcd9xxx_reg_address tomtom_reg_address = {
- .micb_4_mbhc = TOMTOM_A_MICB_4_MBHC,
- .micb_4_int_rbias = TOMTOM_A_MICB_4_INT_RBIAS,
- .micb_4_ctl = TOMTOM_A_MICB_4_CTL,
-};
-
-static int wcd9xxx_ssr_register(struct wcd9xxx *control,
- int (*device_down_cb)(struct wcd9xxx *wcd9xxx),
- int (*device_up_cb)(struct wcd9xxx *wcd9xxx),
- void *priv)
-{
- control->dev_down = device_down_cb;
- control->post_reset = device_up_cb;
- control->ssr_priv = priv;
- return 0;
-}
-
-static const struct snd_soc_dapm_widget tomtom_1_dapm_widgets[] = {
- SND_SOC_DAPM_ADC_E("ADC1", NULL, TOMTOM_A_TX_1_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC2", NULL, TOMTOM_A_TX_2_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC3", NULL, TOMTOM_A_TX_3_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC4", NULL, TOMTOM_A_TX_4_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC5", NULL, TOMTOM_A_TX_5_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC6", NULL, TOMTOM_A_TX_6_GAIN, 7, 0,
- tomtom_codec_enable_adc,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_POST_PMD),
-};
-
-static struct regulator *tomtom_codec_find_regulator(struct snd_soc_codec *cdc,
- const char *name)
-{
- int i;
- struct wcd9xxx *core = dev_get_drvdata(cdc->dev->parent);
-
- for (i = 0; i < core->num_of_supplies; i++) {
- if (core->supplies[i].supply &&
- !strcmp(core->supplies[i].supply, name))
- return core->supplies[i].consumer;
- }
-
- return NULL;
-}
-
-static struct wcd_cpe_core *tomtom_codec_get_cpe_core(
- struct snd_soc_codec *codec)
-{
- struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec);
-
- return priv->cpe_core;
-}
-
-static int tomtom_codec_fll_enable(struct snd_soc_codec *codec,
- bool enable)
-{
- struct wcd9xxx *wcd9xxx;
-
- if (!codec || !codec->control_data) {
- pr_err("%s: Invalid codec handle, %pK\n",
- __func__, codec);
- return -EINVAL;
- }
-
- wcd9xxx = codec->control_data;
-
- dev_dbg(codec->dev, "%s: %s, mclk_rate = %d\n",
- __func__, (enable ? "enable" : "disable"),
- wcd9xxx->mclk_rate);
-
- switch (wcd9xxx->mclk_rate) {
- case TOMTOM_MCLK_CLK_9P6MHZ:
- snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF,
- 0x1F, 0x15);
- snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE,
- 0x07, 0x06);
- snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xD1);
- snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT,
- 0x40);
- break;
- case TOMTOM_MCLK_CLK_12P288MHZ:
- snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF,
- 0x1F, 0x11);
- snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE,
- 0x07, 0x05);
- snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xB1);
- snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT,
- 0x40);
- break;
- }
-
- return 0;
-}
-
-static int tomtom_codec_slim_reserve_bw(struct snd_soc_codec *codec,
- u32 bw_ops, bool commit)
-{
- struct wcd9xxx *wcd9xxx;
-
- if (!codec) {
- pr_err("%s: Invalid handle to codec\n",
- __func__);
- return -EINVAL;
- }
-
- wcd9xxx = dev_get_drvdata(codec->dev->parent);
-
- if (!wcd9xxx) {
- dev_err(codec->dev, "%s: Invalid parent drv_data\n",
- __func__);
- return -EINVAL;
- }
-
- return wcd9xxx_slim_reserve_bw(wcd9xxx, bw_ops, commit);
-}
-
-static int tomtom_codec_vote_max_bw(struct snd_soc_codec *codec,
- bool vote)
-{
- u32 bw_ops;
-
- if (vote)
- bw_ops = SLIM_BW_CLK_GEAR_9;
- else
- bw_ops = SLIM_BW_UNVOTE;
-
- return tomtom_codec_slim_reserve_bw(codec,
- bw_ops, true);
-}
-
-static const struct wcd9xxx_resmgr_cb resmgr_cb = {
- .cdc_rco_ctrl = tomtom_codec_internal_rco_ctrl,
-};
-
-static int tomtom_cpe_err_irq_control(struct snd_soc_codec *codec,
- enum cpe_err_irq_cntl_type cntl_type, u8 *status)
-{
- switch (cntl_type) {
- case CPE_ERR_IRQ_MASK:
- snd_soc_update_bits(codec,
- TOMTOM_A_SVASS_INT_MASK,
- 0x3F, 0x3F);
- break;
- case CPE_ERR_IRQ_UNMASK:
- snd_soc_update_bits(codec,
- TOMTOM_A_SVASS_INT_MASK,
- 0x3F, 0x0C);
- break;
- case CPE_ERR_IRQ_CLEAR:
- snd_soc_update_bits(codec,
- TOMTOM_A_SVASS_INT_CLR,
- 0x3F, 0x3F);
- break;
- case CPE_ERR_IRQ_STATUS:
- if (!status)
- return -EINVAL;
- *status = snd_soc_read(codec,
- TOMTOM_A_SVASS_INT_STATUS);
- break;
- }
-
- return 0;
-}
-
-static const struct wcd_cpe_cdc_cb cpe_cb = {
- .cdc_clk_en = tomtom_codec_internal_rco_ctrl,
- .cpe_clk_en = tomtom_codec_fll_enable,
- .lab_cdc_ch_ctl = tomtom_codec_enable_slimtx_mad,
- .cdc_ext_clk = tomtom_codec_ext_clk_en,
- .bus_vote_bw = tomtom_codec_vote_max_bw,
- .cpe_err_irq_control = tomtom_cpe_err_irq_control,
-};
-
-static struct cpe_svc_init_param cpe_svc_params = {
- .version = 0,
- .query_freq_plans_cb = NULL,
- .change_freq_plan_cb = NULL,
-};
-
-static int tomtom_cpe_initialize(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct wcd_cpe_params cpe_params;
-
- memset(&cpe_params, 0,
- sizeof(struct wcd_cpe_params));
- cpe_params.codec = codec;
- cpe_params.get_cpe_core = tomtom_codec_get_cpe_core;
- cpe_params.cdc_cb = &cpe_cb;
- cpe_params.dbg_mode = cpe_debug_mode;
- cpe_params.cdc_major_ver = CPE_SVC_CODEC_TOMTOM;
- cpe_params.cdc_minor_ver = CPE_SVC_CODEC_V1P0;
- cpe_params.cdc_id = CPE_SVC_CODEC_TOMTOM;
-
- cpe_params.cdc_irq_info.cpe_engine_irq =
- WCD9330_IRQ_SVASS_ENGINE;
- cpe_params.cdc_irq_info.cpe_err_irq =
- WCD9330_IRQ_SVASS_ERR_EXCEPTION;
- cpe_params.cdc_irq_info.cpe_fatal_irqs =
- TOMTOM_CPE_FATAL_IRQS;
-
- cpe_svc_params.context = codec;
- cpe_params.cpe_svc_params = &cpe_svc_params;
-
- tomtom->cpe_core = wcd_cpe_init("cpe", codec,
- &cpe_params);
- if (IS_ERR_OR_NULL(tomtom->cpe_core)) {
- dev_err(codec->dev,
- "%s: Failed to enable CPE\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tomtom_codec_probe(struct snd_soc_codec *codec)
-{
- struct wcd9xxx *control;
- struct tomtom_priv *tomtom;
- struct wcd9xxx_pdata *pdata;
- struct wcd9xxx *wcd9xxx;
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
- int ret = 0;
- int i, rco_clk_rate;
- void *ptr = NULL;
- struct wcd9xxx_core_resource *core_res;
- struct clk *wcd_ext_clk = NULL;
-
- dev_info(codec->dev, "%s()\n", __func__);
-
- control = dev_get_drvdata(codec->dev->parent);
-
- tomtom = snd_soc_codec_get_drvdata(codec);
-
- wcd9xxx_ssr_register(control, tomtom_device_down,
- tomtom_post_reset_cb, (void *)codec);
-
- for (i = 0; i < NUM_DECIMATORS; i++) {
- tx_hpf_work[i].tomtom = tomtom;
- tx_hpf_work[i].decimator = i + 1;
- tx_hpf_work[i].tx_hpf_bypass = false;
- INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
- tx_hpf_corner_freq_callback);
- }
-
- wcd9xxx = control;
- if (!of_find_property(wcd9xxx->dev->of_node, "clock-names", NULL)) {
- dev_dbg(wcd9xxx->dev, "%s: codec not using audio-ext-clk driver\n",
- __func__);
- } else {
- wcd_ext_clk = clk_get(wcd9xxx->dev, "wcd_clk");
- if (IS_ERR(wcd_ext_clk)) {
- dev_err(codec->dev, "%s: clk get %s failed\n",
- __func__, "wcd_ext_clk");
- goto err_nomem_slimch;
- }
- }
- tomtom->wcd_ext_clk = wcd_ext_clk;
- core_res = &wcd9xxx->core_res;
- pdata = dev_get_platdata(codec->dev->parent);
- /* codec resmgr module init */
- ret = wcd9xxx_resmgr_init(&tomtom->resmgr, codec, core_res, pdata,
- &pdata->micbias, &tomtom_reg_address,
- &resmgr_cb, WCD9XXX_CDC_TYPE_TOMTOM);
- if (ret) {
- pr_err("%s: wcd9xxx init failed %d\n", __func__, ret);
- goto err_nomem_slimch;
- }
-
- tomtom->clsh_d.buck_mv = tomtom_codec_get_buck_mv(codec);
- /* TomTom does not support dynamic switching of vdd_cp */
- tomtom->clsh_d.is_dynamic_vdd_cp = false;
- wcd9xxx_clsh_init(&tomtom->clsh_d, &tomtom->resmgr);
-
- if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
- rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ;
- else
- rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ;
-
- tomtom->fw_data = kzalloc(sizeof(*(tomtom->fw_data)), GFP_KERNEL);
- if (!tomtom->fw_data)
- goto err_nomem_slimch;
- set_bit(WCD9XXX_ANC_CAL, tomtom->fw_data->cal_bit);
- set_bit(WCD9XXX_MAD_CAL, tomtom->fw_data->cal_bit);
- set_bit(WCD9XXX_MBHC_CAL, tomtom->fw_data->cal_bit);
- ret = wcd_cal_create_hwdep(tomtom->fw_data,
- WCD9XXX_CODEC_HWDEP_NODE, codec);
- if (ret < 0) {
- dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
- goto err_hwdep;
- }
-
- /* init and start mbhc */
- ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec,
- tomtom_enable_mbhc_micbias,
- &mbhc_cb, &cdc_intr_ids,
- rco_clk_rate, TOMTOM_ZDET_SUPPORTED);
- if (ret) {
- pr_err("%s: mbhc init failed %d\n", __func__, ret);
- goto err_hwdep;
- }
-
- tomtom->codec = codec;
- for (i = 0; i < COMPANDER_MAX; i++) {
- tomtom->comp_enabled[i] = 0;
- tomtom->comp_fs[i] = COMPANDER_FS_48KHZ;
- }
- tomtom->intf_type = wcd9xxx_get_intf_type();
- tomtom->aux_pga_cnt = 0;
- tomtom->aux_l_gain = 0x1F;
- tomtom->aux_r_gain = 0x1F;
- tomtom->ldo_h_users = 0;
- tomtom->micb_2_users = 0;
- tomtom_update_reg_defaults(codec);
- pr_debug("%s: MCLK Rate = %x\n", __func__, wcd9xxx->mclk_rate);
- if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ)
- snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0);
- else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ)
- snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2);
- tomtom_codec_init_reg(codec);
-
- ret = tomtom_handle_pdata(tomtom);
- if (ret < 0) {
- pr_err("%s: bad pdata\n", __func__);
- goto err_hwdep;
- }
-
- tomtom->spkdrv_reg = tomtom_codec_find_regulator(codec,
- WCD9XXX_VDD_SPKDRV_NAME);
- tomtom->spkdrv2_reg = tomtom_codec_find_regulator(codec,
- WCD9XXX_VDD_SPKDRV2_NAME);
-
- ptr = kmalloc((sizeof(tomtom_rx_chs) +
- sizeof(tomtom_tx_chs)), GFP_KERNEL);
- if (!ptr) {
- ret = -ENOMEM;
- goto err_hwdep;
- }
-
- if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) {
- snd_soc_dapm_new_controls(dapm, tomtom_dapm_i2s_widgets,
- ARRAY_SIZE(tomtom_dapm_i2s_widgets));
- snd_soc_dapm_add_routes(dapm, audio_i2s_map,
- ARRAY_SIZE(audio_i2s_map));
- for (i = 0; i < ARRAY_SIZE(tomtom_i2s_dai); i++)
- INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list);
- } else if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) {
- for (i = 0; i < NUM_CODEC_DAIS; i++) {
- INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list);
- init_waitqueue_head(&tomtom->dai[i].dai_wait);
- }
- tomtom_slimbus_slave_port_cfg.slave_dev_intfdev_la =
- control->slim_slave->laddr;
- tomtom_slimbus_slave_port_cfg.slave_dev_pgd_la =
- control->slim->laddr;
- tomtom_slimbus_slave_port_cfg.slave_port_mapping[0] =
- TOMTOM_MAD_SLIMBUS_TX_PORT;
-
- tomtom_init_slim_slave_cfg(codec);
- }
-
- snd_soc_dapm_new_controls(dapm, tomtom_1_dapm_widgets,
- ARRAY_SIZE(tomtom_1_dapm_widgets));
- snd_soc_add_codec_controls(codec,
- tomtom_1_x_analog_gain_controls,
- ARRAY_SIZE(tomtom_1_x_analog_gain_controls));
-
- snd_soc_add_codec_controls(codec, impedance_detect_controls,
- ARRAY_SIZE(impedance_detect_controls));
- snd_soc_add_codec_controls(codec, hph_type_detect_controls,
- ARRAY_SIZE(hph_type_detect_controls));
-
- control->num_rx_port = TOMTOM_RX_MAX;
- control->rx_chs = ptr;
- memcpy(control->rx_chs, tomtom_rx_chs, sizeof(tomtom_rx_chs));
- control->num_tx_port = TOMTOM_TX_MAX;
- control->tx_chs = ptr + sizeof(tomtom_rx_chs);
- memcpy(control->tx_chs, tomtom_tx_chs, sizeof(tomtom_tx_chs));
-
- snd_soc_dapm_sync(dapm);
-
- ret = tomtom_setup_irqs(tomtom);
- if (ret) {
- pr_err("%s: tomtom irq setup failed %d\n", __func__, ret);
- goto err_pdata;
- }
-
- atomic_set(&kp_tomtom_priv, (unsigned long)tomtom);
- mutex_lock(&tomtom->codec_mutex);
- snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
- snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
- snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE");
- snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
- snd_soc_dapm_disable_pin(dapm, "ANC EAR");
- mutex_unlock(&tomtom->codec_mutex);
- snd_soc_dapm_sync(dapm);
-
- codec->component.ignore_pmdown_time = 1;
- ret = tomtom_cpe_initialize(codec);
- if (ret) {
- dev_info(codec->dev,
- "%s: cpe initialization failed, ret = %d\n",
- __func__, ret);
- /* Do not fail probe if CPE failed */
- ret = 0;
- }
- return ret;
-
-err_pdata:
- kfree(ptr);
- control->rx_chs = NULL;
- control->tx_chs = NULL;
-err_hwdep:
- kfree(tomtom->fw_data);
- tomtom->fw_data = NULL;
-err_nomem_slimch:
- devm_kfree(codec->dev, tomtom);
- return ret;
-}
-static int tomtom_codec_remove(struct snd_soc_codec *codec)
-{
- struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
- struct wcd9xxx *control;
-
- WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr);
- atomic_set(&kp_tomtom_priv, 0);
-
- WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr);
-
- control = dev_get_drvdata(codec->dev->parent);
- control->rx_chs = NULL;
- control->tx_chs = NULL;
-
- if (tomtom->wcd_ext_clk)
- clk_put(tomtom->wcd_ext_clk);
- tomtom_cleanup_irqs(tomtom);
-
- /* cleanup MBHC */
- wcd9xxx_mbhc_deinit(&tomtom->mbhc);
- /* cleanup resmgr */
- wcd9xxx_resmgr_deinit(&tomtom->resmgr);
-
- tomtom->spkdrv_reg = NULL;
- tomtom->spkdrv2_reg = NULL;
-
- devm_kfree(codec->dev, tomtom);
- return 0;
-}
-
-static struct regmap *tomtom_get_regmap(struct device *dev)
-{
- struct wcd9xxx *control = dev_get_drvdata(dev->parent);
-
- return control->regmap;
-}
-
-static struct snd_soc_codec_driver soc_codec_dev_tomtom = {
- .probe = tomtom_codec_probe,
- .remove = tomtom_codec_remove,
- .get_regmap = tomtom_get_regmap,
- .component_driver = {
- .controls = tomtom_snd_controls,
- .num_controls = ARRAY_SIZE(tomtom_snd_controls),
- .dapm_widgets = tomtom_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tomtom_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
- },
-};
-
-#ifdef CONFIG_PM
-static int tomtom_suspend(struct device *dev)
-{
- dev_dbg(dev, "%s: system suspend\n", __func__);
- return 0;
-}
-
-static int tomtom_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct tomtom_priv *tomtom = platform_get_drvdata(pdev);
-
- if (!tomtom) {
- dev_err(dev, "%s: tomtom private data is NULL\n", __func__);
- return -EINVAL;
- }
- dev_dbg(dev, "%s: system resume\n", __func__);
- /* Notify */
- wcd9xxx_resmgr_notifier_call(&tomtom->resmgr,
- WCD9XXX_EVENT_POST_RESUME);
- return 0;
-}
-
-static const struct dev_pm_ops tomtom_pm_ops = {
- .suspend = tomtom_suspend,
- .resume = tomtom_resume,
-};
-#endif
-
-static int tomtom_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct tomtom_priv *tomtom;
-
- tomtom = devm_kzalloc(&pdev->dev, sizeof(struct tomtom_priv),
- GFP_KERNEL);
- if (!tomtom)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, tomtom);
-
- if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS)
- ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom,
- tomtom_dai, ARRAY_SIZE(tomtom_dai));
- else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
- ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom,
- tomtom_i2s_dai, ARRAY_SIZE(tomtom_i2s_dai));
- mutex_init(&tomtom->codec_mutex);
- return ret;
-}
-static int tomtom_remove(struct platform_device *pdev)
-{
- struct tomtom_priv *tomtom = platform_get_drvdata(pdev);
-
- mutex_destroy(&tomtom->codec_mutex);
- snd_soc_unregister_codec(&pdev->dev);
- return 0;
-}
-static struct platform_driver tomtom_codec_driver = {
- .probe = tomtom_probe,
- .remove = tomtom_remove,
- .driver = {
- .name = "tomtom_codec",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .pm = &tomtom_pm_ops,
-#endif
- },
-};
-
-static int __init tomtom_codec_init(void)
-{
- return platform_driver_register(&tomtom_codec_driver);
-}
-
-static void __exit tomtom_codec_exit(void)
-{
- platform_driver_unregister(&tomtom_codec_driver);
-}
-
-module_init(tomtom_codec_init);
-module_exit(tomtom_codec_exit);
-
-MODULE_DESCRIPTION("TomTom codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9330.h b/sound/soc/codecs/wcd9330.h
deleted file mode 100644
index 8679d01..0000000
--- a/sound/soc/codecs/wcd9330.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef WCD9330_H
-#define WCD9330_H
-
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include <sound/apr_audio-v2.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
-#include "wcd9xxx-mbhc.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-
-#define TOMTOM_NUM_REGISTERS 0x400
-#define TOMTOM_MAX_REGISTER (TOMTOM_NUM_REGISTERS-1)
-#define TOMTOM_CACHE_SIZE TOMTOM_NUM_REGISTERS
-
-#define TOMTOM_REG_VAL(reg, val) {reg, 0, val}
-#define TOMTOM_MCLK_ID 0
-
-#define TOMTOM_REGISTER_START_OFFSET 0x800
-#define TOMTOM_SB_PGD_PORT_RX_BASE 0x40
-#define TOMTOM_SB_PGD_PORT_TX_BASE 0x50
-
-#define WCD9330_DMIC_CLK_DIV_2 0x00
-#define WCD9330_DMIC_CLK_DIV_3 0x01
-#define WCD9330_DMIC_CLK_DIV_4 0x02
-#define WCD9330_DMIC_CLK_DIV_6 0x03
-#define WCD9330_DMIC_CLK_DIV_16 0x04
-
-#define TOMTOM_ZDET_SUPPORTED true
-
-extern const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE];
-struct tomtom_codec_dai_data {
- u32 rate;
- u32 *ch_num;
- u32 ch_act;
- u32 ch_tot;
-};
-
-enum tomtom_pid_current {
- TOMTOM_PID_MIC_2P5_UA,
- TOMTOM_PID_MIC_5_UA,
- TOMTOM_PID_MIC_10_UA,
- TOMTOM_PID_MIC_20_UA,
-};
-
-enum tomtom_mbhc_analog_pwr_cfg {
- TOMTOM_ANALOG_PWR_COLLAPSED = 0,
- TOMTOM_ANALOG_PWR_ON,
- TOMTOM_NUM_ANALOG_PWR_CONFIGS,
-};
-
-enum {
- HPH_PA_NONE = 0,
- HPH_PA_R,
- HPH_PA_L,
- HPH_PA_L_R,
-};
-
-/* Number of input and output Slimbus port */
-enum {
- TOMTOM_RX1 = 0,
- TOMTOM_RX2,
- TOMTOM_RX3,
- TOMTOM_RX4,
- TOMTOM_RX5,
- TOMTOM_RX6,
- TOMTOM_RX7,
- TOMTOM_RX8,
- TOMTOM_RX9,
- TOMTOM_RX10,
- TOMTOM_RX11,
- TOMTOM_RX12,
- TOMTOM_RX13,
- TOMTOM_RX_MAX,
-};
-
-enum {
- TOMTOM_TX1 = 0,
- TOMTOM_TX2,
- TOMTOM_TX3,
- TOMTOM_TX4,
- TOMTOM_TX5,
- TOMTOM_TX6,
- TOMTOM_TX7,
- TOMTOM_TX8,
- TOMTOM_TX9,
- TOMTOM_TX10,
- TOMTOM_TX11,
- TOMTOM_TX12,
- TOMTOM_TX13,
- TOMTOM_TX14,
- TOMTOM_TX15,
- TOMTOM_TX16,
- TOMTOM_TX_MAX,
-};
-
-extern int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
- bool dapm);
-extern int tomtom_codec_mclk_enable(struct snd_soc_codec *codec,
- int mclk_enable, bool dapm);
-extern int tomtom_hs_detect(struct snd_soc_codec *codec,
- struct wcd9xxx_mbhc_config *mbhc_cfg);
-extern void tomtom_hs_detect_exit(struct snd_soc_codec *codec);
-extern void *tomtom_get_afe_config(struct snd_soc_codec *codec,
- enum afe_config_type config_type);
-
-extern void tomtom_event_register(
- int (*machine_event_cb)(struct snd_soc_codec *codec,
- enum wcd9xxx_codec_event),
- struct snd_soc_codec *codec);
-extern void tomtom_register_ext_clk_cb(
- int (*codec_ext_clk_en)(struct snd_soc_codec *codec,
- int enable, bool dapm),
- int (*get_ext_clk_cnt)(void),
- struct snd_soc_codec *codec);
-extern int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec);
-#endif
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 90d16fb..a6a5350 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -26,6 +26,7 @@
#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9335/registers.h>
+#include <linux/mfd/wcd9335/irq.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <linux/regulator/consumer.h>
#include <linux/clk.h>
@@ -347,7 +348,6 @@
AUDIO_NOMINAL,
CPE_NOMINAL,
HPH_PA_DELAY,
- SB_CLK_GEAR,
ANC_MIC_AMIC1,
ANC_MIC_AMIC2,
ANC_MIC_AMIC3,
@@ -546,38 +546,6 @@
SPLINE_SRC_MAX,
};
-/* wcd9335 interrupt table */
-static const struct intr_data wcd9335_intr_table[] = {
- {WCD9XXX_IRQ_SLIMBUS, false},
- {WCD9335_IRQ_MBHC_SW_DET, true},
- {WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true},
- {WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true},
- {WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true},
- {WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
- {WCD9335_IRQ_FLL_LOCK_LOSS, false},
- {WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false},
- {WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false},
- {WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false},
- {WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false},
- {WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false},
- {WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false},
- {WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false},
- {WCD9335_IRQ_HPH_PA_OCPL_FAULT, false},
- {WCD9335_IRQ_HPH_PA_OCPR_FAULT, false},
- {WCD9335_IRQ_EAR_PA_OCP_FAULT, false},
- {WCD9335_IRQ_SOUNDWIRE, false},
- {WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false},
- {WCD9335_IRQ_RCO_ERROR, false},
- {WCD9335_IRQ_SVA_ERROR, false},
- {WCD9335_IRQ_MAD_AUDIO, false},
- {WCD9335_IRQ_MAD_BEACON, false},
- {WCD9335_IRQ_SVA_OUTBOX1, true},
- {WCD9335_IRQ_SVA_OUTBOX2, true},
- {WCD9335_IRQ_MAD_ULTRASOUND, false},
- {WCD9335_IRQ_VBAT_ATTACK, false},
- {WCD9335_IRQ_VBAT_RESTORE, false},
-};
-
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
@@ -849,7 +817,10 @@
int rx_8_count;
bool clk_mode;
bool clk_internal;
-
+ /* Lock to prevent multiple functions voting at same time */
+ struct mutex sb_clk_gear_lock;
+ /* Count for functions voting or un-voting */
+ u32 ref_count;
/* Lock to protect mclk enablement */
struct mutex mclk_lock;
};
@@ -875,176 +846,6 @@
{WCD9335_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44},
};
-/*
- * wcd9335_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type)
-{
- u16 id_minor, id_major;
- struct regmap *wcd_regmap;
- int rc, val, version = 0;
-
- if (!wcd9xxx || !wcd_type)
- return -EINVAL;
-
- if (!wcd9xxx->regmap) {
- dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
- __func__);
- return -EINVAL;
- }
- wcd_regmap = wcd9xxx->regmap;
-
- rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
- (u8 *)&id_minor, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
- (u8 *)&id_major, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
- __func__, id_major, id_minor);
-
- /* Version detection */
- if (id_major == TASHA_MAJOR) {
- regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0,
- &val);
- version = ((u8)val & 0x80) >> 7;
- } else if (id_major == TASHA2P0_MAJOR)
- version = 2;
- else
- dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n",
- __func__, id_major, id_minor);
-
- /* Fill codec type info */
- wcd_type->id_major = id_major;
- wcd_type->id_minor = id_minor;
- wcd_type->num_irqs = WCD9335_NUM_IRQS;
- wcd_type->version = version;
- wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
- wcd_type->i2c_chip_status = 0x01;
- wcd_type->intr_tbl = wcd9335_intr_table;
- wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table);
-
- wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
- WCD9335_INTR_PIN1_STATUS0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
- WCD9335_INTR_PIN1_CLEAR0;
- wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
- WCD9335_INTR_PIN1_MASK0;
- wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
- WCD9335_INTR_LEVEL0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
- WCD9335_INTR_CLR_COMMIT;
-
- return rc;
-}
-EXPORT_SYMBOL(wcd9335_get_codec_info);
-
-/*
- * wcd9335_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_bringdown(struct wcd9xxx *wcd9xxx)
-{
- if (!wcd9xxx || !wcd9xxx->regmap)
- return -EINVAL;
-
- regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x04);
-
- return 0;
-}
-EXPORT_SYMBOL(wcd9335_bringdown);
-
-/*
- * wcd9335_bringup: Bringup WCD Codec
- *
- * @wcd9xxx: Pointer to the wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd9335_bringup(struct wcd9xxx *wcd9xxx)
-{
- int ret = 0;
- int val, byte0;
- struct regmap *wcd_regmap;
-
- if (!wcd9xxx)
- return -EINVAL;
-
- if (!wcd9xxx->regmap) {
- dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
- __func__);
- return -EINVAL;
- }
- wcd_regmap = wcd9xxx->regmap;
-
- regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val);
- regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0);
-
- if ((val < 0) || (byte0 < 0)) {
- dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n",
- __func__);
- return -EINVAL;
- }
- if ((val & 0x80) && (byte0 == 0x0)) {
- dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n",
- __func__);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x5);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x7);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x3);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
- } else if (byte0 == 0x1) {
- dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n",
- __func__);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F);
- regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x5);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x7);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x3);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
- } else if ((byte0 == 0) && (!(val & 0x80))) {
- dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n",
- __func__);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC);
- regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x3);
- regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3);
- } else {
- dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n",
- __func__);
- ret = -EINVAL;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(wcd9335_bringup);
-
/**
* tasha_set_spkr_gain_offset - offset the speaker path
* gain with the given offset value.
@@ -2213,6 +2014,32 @@
tasha_mbhc_hph_l_pull_up_control(codec, mbhc->moist_iref);
}
+static void tasha_update_anc_state(struct snd_soc_codec *codec, bool enable,
+ int anc_num)
+{
+ if (enable)
+ snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x10);
+ else
+ snd_soc_update_bits(codec, WCD9335_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x00);
+}
+
+static bool tasha_is_anc_on(struct wcd_mbhc *mbhc)
+{
+ bool anc_on = false;
+ u16 ancl, ancr;
+
+ ancl =
+ (snd_soc_read(mbhc->codec, WCD9335_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+ ancr =
+ (snd_soc_read(mbhc->codec, WCD9335_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+ anc_on = !!(ancl | ancr);
+
+ return anc_on;
+}
+
static const struct wcd_mbhc_cb mbhc_cb = {
.request_irq = tasha_mbhc_request_irq,
.irq_control = tasha_mbhc_irq_control,
@@ -2235,6 +2062,8 @@
.mbhc_gnd_det_ctrl = tasha_mbhc_gnd_det_ctrl,
.hph_pull_down_ctrl = tasha_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = tasha_mbhc_moisture_config,
+ .update_anc_state = tasha_update_anc_state,
+ .is_anc_on = tasha_is_anc_on,
};
static int tasha_get_anc_slot(struct snd_kcontrol *kcontrol,
@@ -2432,9 +2261,8 @@
static int tasha_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2446,9 +2274,8 @@
static int tasha_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = tasha_p->wcd9xxx;
@@ -2499,9 +2326,8 @@
static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2512,9 +2338,8 @@
static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -2607,9 +2432,8 @@
static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -2625,9 +2449,8 @@
static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -3149,10 +2972,7 @@
&dai->grph);
break;
case SND_SOC_DAPM_PRE_PMD:
- if (!test_bit(SB_CLK_GEAR, &tasha_p->status_mask)) {
- tasha_codec_vote_max_bw(codec, true);
- set_bit(SB_CLK_GEAR, &tasha_p->status_mask);
- }
+ tasha_codec_vote_max_bw(codec, true);
break;
case SND_SOC_DAPM_POST_PMD:
ret = wcd9xxx_disconnect_port(core, &dai->wcd9xxx_ch_list,
@@ -5464,10 +5284,7 @@
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- if (!test_bit(SB_CLK_GEAR, &tasha->status_mask)) {
- tasha_codec_vote_max_bw(codec, true);
- set_bit(SB_CLK_GEAR, &tasha->status_mask);
- }
+ tasha_codec_vote_max_bw(codec, true);
/* Reset if needed */
tasha_codec_enable_prim_interpolator(codec, reg, event);
break;
@@ -8682,9 +8499,8 @@
static int tasha_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -8742,9 +8558,8 @@
static int tasha_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -9076,9 +8891,8 @@
static int tasha_codec_aif4_mixer_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -9095,9 +8909,8 @@
static int tasha_codec_aif4_mixer_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tasha_priv *tasha_p = snd_soc_codec_get_drvdata(codec);
@@ -11328,11 +11141,8 @@
if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
return;
- if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
- test_bit(SB_CLK_GEAR, &tasha->status_mask)) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
tasha_codec_vote_max_bw(dai->codec, false);
- clear_bit(SB_CLK_GEAR, &tasha->status_mask);
- }
}
static int tasha_set_decimator_rate(struct snd_soc_dai *dai,
@@ -11567,15 +11377,11 @@
static int tasha_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct tasha_priv *tasha = snd_soc_codec_get_drvdata(dai->codec);
-
pr_debug("%s(): substream = %s stream = %d\n", __func__,
substream->name, substream->stream);
- if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) &&
- test_bit(SB_CLK_GEAR, &tasha->status_mask)) {
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
tasha_codec_vote_max_bw(dai->codec, false);
- clear_bit(SB_CLK_GEAR, &tasha->status_mask);
- }
return 0;
}
@@ -13283,13 +13089,29 @@
if (tasha->intf_type == WCD9XXX_INTERFACE_TYPE_I2C)
return 0;
- if (vote)
- bw_ops = SLIM_BW_CLK_GEAR_9;
- else
- bw_ops = SLIM_BW_UNVOTE;
+ mutex_lock(&tasha->sb_clk_gear_lock);
+ if (vote) {
+ tasha->ref_count++;
+ if (tasha->ref_count == 1) {
+ bw_ops = SLIM_BW_CLK_GEAR_9;
+ tasha_codec_slim_reserve_bw(codec,
+ bw_ops, true);
+ }
+ } else if (!vote && tasha->ref_count > 0) {
+ tasha->ref_count--;
+ if (tasha->ref_count == 0) {
+ bw_ops = SLIM_BW_UNVOTE;
+ tasha_codec_slim_reserve_bw(codec,
+ bw_ops, true);
+ }
+ };
- return tasha_codec_slim_reserve_bw(codec,
- bw_ops, true);
+ dev_dbg(codec->dev, "%s Value of counter after vote or un-vote is %d\n",
+ __func__, tasha->ref_count);
+
+ mutex_unlock(&tasha->sb_clk_gear_lock);
+
+ return 0;
}
static int tasha_cpe_err_irq_control(struct snd_soc_codec *codec,
@@ -13472,6 +13294,8 @@
if (ret < 0)
dev_err(codec->dev, "%s: invalid pdata\n", __func__);
+ /* Reset reference counter for voting for max bw */
+ tasha->ref_count = 0;
/* MBHC Init */
wcd_mbhc_deinit(&tasha->mbhc);
tasha->mbhc_started = false;
@@ -14254,6 +14078,7 @@
mutex_init(&tasha->swr_read_lock);
mutex_init(&tasha->swr_write_lock);
mutex_init(&tasha->swr_clk_lock);
+ mutex_init(&tasha->sb_clk_gear_lock);
mutex_init(&tasha->mclk_lock);
cdc_pwr = devm_kzalloc(&pdev->dev, sizeof(struct wcd9xxx_power_region),
@@ -14358,6 +14183,7 @@
mutex_destroy(&tasha->mclk_lock);
devm_kfree(&pdev->dev, tasha);
snd_soc_unregister_codec(&pdev->dev);
+ mutex_destroy(&tasha->sb_clk_gear_lock);
return 0;
}
diff --git a/sound/soc/codecs/wcd9335.h b/sound/soc/codecs/wcd9335.h
index d27bb96..c76461e 100644
--- a/sound/soc/codecs/wcd9335.h
+++ b/sound/soc/codecs/wcd9335.h
@@ -83,44 +83,6 @@
TASHA_TX_MAX,
};
-enum {
- /* INTR_REG 0 */
- WCD9335_IRQ_FLL_LOCK_LOSS = 1,
- WCD9335_IRQ_HPH_PA_OCPL_FAULT,
- WCD9335_IRQ_HPH_PA_OCPR_FAULT,
- WCD9335_IRQ_EAR_PA_OCP_FAULT,
- WCD9335_IRQ_HPH_PA_CNPL_COMPLETE,
- WCD9335_IRQ_HPH_PA_CNPR_COMPLETE,
- WCD9335_IRQ_EAR_PA_CNP_COMPLETE,
- /* INTR_REG 1 */
- WCD9335_IRQ_MBHC_SW_DET,
- WCD9335_IRQ_MBHC_ELECT_INS_REM_DET,
- WCD9335_IRQ_MBHC_BUTTON_PRESS_DET,
- WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET,
- WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
- WCD9335_IRQ_RESERVED_0,
- WCD9335_IRQ_RESERVED_1,
- WCD9335_IRQ_RESERVED_2,
- /* INTR_REG 2 */
- WCD9335_IRQ_LINE_PA1_CNP_COMPLETE,
- WCD9335_IRQ_LINE_PA2_CNP_COMPLETE,
- WCD9335_IRQ_LINE_PA3_CNP_COMPLETE,
- WCD9335_IRQ_LINE_PA4_CNP_COMPLETE,
- WCD9335_IRQ_SOUNDWIRE,
- WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE,
- WCD9335_IRQ_RCO_ERROR,
- WCD9335_IRQ_SVA_ERROR,
- /* INTR_REG 3 */
- WCD9335_IRQ_MAD_AUDIO,
- WCD9335_IRQ_MAD_BEACON,
- WCD9335_IRQ_MAD_ULTRASOUND,
- WCD9335_IRQ_VBAT_ATTACK,
- WCD9335_IRQ_VBAT_RESTORE,
- WCD9335_IRQ_SVA_OUTBOX1,
- WCD9335_IRQ_SVA_OUTBOX2,
- WCD9335_NUM_IRQS,
-};
-
enum wcd9335_codec_event {
WCD9335_CODEC_EVENT_CODEC_UP = 0,
};
diff --git a/sound/soc/codecs/wcd934x/Makefile b/sound/soc/codecs/wcd934x/Makefile
index 2843fa1..12781f6 100644
--- a/sound/soc/codecs/wcd934x/Makefile
+++ b/sound/soc/codecs/wcd934x/Makefile
@@ -1,9 +1,6 @@
#
# Makefile for wcd934x codec driver.
#
-snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o
+snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o \
+ wcd934x-mbhc.o wcd934x-dsd.o
obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
-snd-soc-wcd934x-mbhc-objs := wcd934x-mbhc.o
-obj-$(CONFIG_SND_SOC_WCD934X_MBHC) += snd-soc-wcd934x-mbhc.o
-snd-soc-wcd934x-dsd-objs := wcd934x-dsd.o
-obj-$(CONFIG_SND_SOC_WCD934X_DSD) += snd-soc-wcd934x-dsd.o
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
index 4982883..834b96c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,7 +43,7 @@
int version;
};
-#ifdef CONFIG_SND_SOC_WCD934X_DSD
+#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_DSD)
int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf,
int interp_num, int sw_value);
int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf,
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 8da0425..b62f26c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -763,10 +763,6 @@
case WDSP_EVENT_DLOAD_FAILED:
case WDSP_EVENT_POST_SHUTDOWN:
- if (event == WDSP_EVENT_POST_DLOAD_CODE)
- /* Mark DSP online since code download is complete */
- wcd_cntl_change_online_state(cntl, 1);
-
/* Disable CPAR */
wcd_cntl_cpar_ctrl(cntl, false);
/* Disable all the clocks */
@@ -775,6 +771,10 @@
dev_err(codec->dev,
"%s: Failed to disable clocks, err = %d\n",
__func__, ret);
+
+ if (event == WDSP_EVENT_POST_DLOAD_CODE)
+ /* Mark DSP online since code download is complete */
+ wcd_cntl_change_online_state(cntl, 1);
break;
case WDSP_EVENT_PRE_DLOAD_DATA:
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
index 578c347..ea19caa 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c
@@ -24,6 +24,7 @@
#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/irq.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -828,6 +829,32 @@
return wcd934x_mbhc->is_hph_recover;
}
+static void tavil_update_anc_state(struct snd_soc_codec *codec, bool enable,
+ int anc_num)
+{
+ if (enable)
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x10);
+ else
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CFG0 +
+ (20 * anc_num), 0x10, 0x00);
+}
+
+static bool tavil_is_anc_on(struct wcd_mbhc *mbhc)
+{
+ bool anc_on = false;
+ u16 ancl, ancr;
+
+ ancl =
+ (snd_soc_read(mbhc->codec, WCD934X_CDC_RX1_RX_PATH_CFG0)) & 0x10;
+ ancr =
+ (snd_soc_read(mbhc->codec, WCD934X_CDC_RX2_RX_PATH_CFG0)) & 0x10;
+
+ anc_on = !!(ancl | ancr);
+
+ return anc_on;
+}
+
static const struct wcd_mbhc_cb mbhc_cb = {
.request_irq = tavil_mbhc_request_irq,
.irq_control = tavil_mbhc_irq_control,
@@ -851,6 +878,8 @@
.hph_pull_down_ctrl = tavil_mbhc_hph_pull_down_ctrl,
.mbhc_moisture_config = tavil_mbhc_moisture_config,
.hph_register_recovery = tavil_hph_register_recovery,
+ .update_anc_state = tavil_update_anc_state,
+ .is_anc_on = tavil_is_anc_on,
};
static struct regulator *tavil_codec_find_ondemand_regulator(
@@ -997,19 +1026,26 @@
struct snd_soc_codec *codec)
{
int ret;
+ struct wcd_mbhc *wcd_mbhc;
if (!mbhc || !codec)
return -EINVAL;
- wcd_mbhc_deinit(&mbhc->wcd_mbhc);
- ret = wcd_mbhc_init(&mbhc->wcd_mbhc, codec, &mbhc_cb, &intr_ids,
+ wcd_mbhc = &mbhc->wcd_mbhc;
+ if (wcd_mbhc == NULL) {
+ pr_err("%s: wcd_mbhc is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ wcd_mbhc_deinit(wcd_mbhc);
+ ret = wcd_mbhc_init(wcd_mbhc, codec, &mbhc_cb, &intr_ids,
wcd_mbhc_registers, TAVIL_ZDET_SUPPORTED);
if (ret) {
dev_err(codec->dev, "%s: mbhc initialization failed\n",
__func__);
goto done;
}
- if (!WCD_MBHC_DETECTION) {
+ if (wcd_mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY) {
snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
}
@@ -1032,6 +1068,7 @@
{
struct regulator *supply;
struct wcd934x_mbhc *wcd934x_mbhc;
+ struct wcd_mbhc *wcd_mbhc;
int ret;
wcd934x_mbhc = devm_kzalloc(codec->dev, sizeof(struct wcd934x_mbhc),
@@ -1042,8 +1079,18 @@
wcd934x_mbhc->wcd9xxx = dev_get_drvdata(codec->dev->parent);
wcd934x_mbhc->fw_data = fw_data;
BLOCKING_INIT_NOTIFIER_HEAD(&wcd934x_mbhc->notifier);
+ wcd_mbhc = &wcd934x_mbhc->wcd_mbhc;
+ if (wcd_mbhc == NULL) {
+ pr_err("%s: wcd_mbhc is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
- ret = wcd_mbhc_init(&wcd934x_mbhc->wcd_mbhc, codec, &mbhc_cb,
+
+ /* Setting default mbhc detection logic to ADC for Tavil */
+ wcd_mbhc->mbhc_detection_logic = WCD_DETECTION_ADC;
+
+ ret = wcd_mbhc_init(wcd_mbhc, codec, &mbhc_cb,
&intr_ids, wcd_mbhc_registers,
TAVIL_ZDET_SUPPORTED);
if (ret) {
@@ -1069,7 +1116,7 @@
snd_soc_add_codec_controls(codec, hph_type_detect_controls,
ARRAY_SIZE(hph_type_detect_controls));
- if (!WCD_MBHC_DETECTION) {
+ if (wcd_mbhc->mbhc_detection_logic == WCD_DETECTION_LEGACY) {
snd_soc_update_bits(codec, WCD934X_MBHC_NEW_CTL_1, 0x04, 0x04);
snd_soc_update_bits(codec, WCD934X_MBHC_CTL_BCS, 0x01, 0x01);
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index d40546a..53c886d 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -35,7 +35,7 @@
bool is_hph_recover;
};
-#ifdef CONFIG_SND_SOC_WCD934X_MBHC
+#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_MBHC)
extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
struct snd_soc_codec *codec,
struct fw_info *fw_data);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index afd93b2..93a1ad3 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -117,6 +117,70 @@
const struct snd_soc_dapm_route tavil_audio_map[] = {
+ /* WDMA3 */
+ {"WDMA3 PORT0 MUX", "DEC0", "ADC MUX0"},
+ {"WDMA3 PORT0 MUX", "RX_MIX_TX0", "RX MIX TX0 MUX"},
+ {"WDMA3 PORT1 MUX", "DEC1", "ADC MUX1"},
+ {"WDMA3 PORT1 MUX", "RX_MIX_TX1", "RX MIX TX1 MUX"},
+ {"WDMA3 PORT2 MUX", "DEC2", "ADC MUX2"},
+ {"WDMA3 PORT2 MUX", "RX_MIX_TX2", "RX MIX TX2 MUX"},
+ {"WDMA3 PORT3 MUX", "DEC3", "ADC MUX3"},
+ {"WDMA3 PORT3 MUX", "RX_MIX_TX3", "RX MIX TX3 MUX"},
+ {"WDMA3 PORT4 MUX", "DEC4", "ADC MUX4"},
+ {"WDMA3 PORT4 MUX", "RX_MIX_TX4", "RX MIX TX4 MUX"},
+ {"WDMA3 PORT5 MUX", "DEC5", "ADC MUX5"},
+ {"WDMA3 PORT5 MUX", "RX_MIX_TX5", "RX MIX TX5 MUX"},
+ {"WDMA3 PORT6 MUX", "DEC6", "ADC MUX6"},
+ {"WDMA3 PORT6 MUX", "RX_MIX_TX6", "RX MIX TX6 MUX"},
+
+ {"WDMA3 CH0 MUX", "PORT_0", "WDMA3 PORT0 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_1", "WDMA3 PORT1 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_2", "WDMA3 PORT2 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_3", "WDMA3 PORT3 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_4", "WDMA3 PORT4 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_5", "WDMA3 PORT5 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_6", "WDMA3 PORT6 MUX"},
+ {"WDMA3 CH0 MUX", "PORT_7", "ADC MUX7"},
+ {"WDMA3 CH0 MUX", "PORT_8", "ADC MUX8"},
+
+ {"WDMA3 CH1 MUX", "PORT_0", "WDMA3 PORT0 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_1", "WDMA3 PORT1 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_2", "WDMA3 PORT2 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_3", "WDMA3 PORT3 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_4", "WDMA3 PORT4 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_5", "WDMA3 PORT5 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_6", "WDMA3 PORT6 MUX"},
+ {"WDMA3 CH1 MUX", "PORT_7", "ADC MUX7"},
+ {"WDMA3 CH1 MUX", "PORT_8", "ADC MUX8"},
+
+ {"WDMA3 CH2 MUX", "PORT_0", "WDMA3 PORT0 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_1", "WDMA3 PORT1 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_2", "WDMA3 PORT2 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_3", "WDMA3 PORT3 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_4", "WDMA3 PORT4 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_5", "WDMA3 PORT5 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_6", "WDMA3 PORT6 MUX"},
+ {"WDMA3 CH2 MUX", "PORT_7", "ADC MUX7"},
+ {"WDMA3 CH2 MUX", "PORT_8", "ADC MUX8"},
+
+ {"WDMA3 CH3 MUX", "PORT_0", "WDMA3 PORT0 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_1", "WDMA3 PORT1 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_2", "WDMA3 PORT2 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_3", "WDMA3 PORT3 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_4", "WDMA3 PORT4 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_5", "WDMA3 PORT5 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_6", "WDMA3 PORT6 MUX"},
+ {"WDMA3 CH3 MUX", "PORT_7", "ADC MUX7"},
+ {"WDMA3 CH3 MUX", "PORT_8", "ADC MUX8"},
+
+ {"WDMA3_CH_MIXER", NULL, "WDMA3 CH0 MUX"},
+ {"WDMA3_CH_MIXER", NULL, "WDMA3 CH1 MUX"},
+ {"WDMA3_CH_MIXER", NULL, "WDMA3 CH2 MUX"},
+ {"WDMA3_CH_MIXER", NULL, "WDMA3 CH3 MUX"},
+
+ {"WDMA3_ON_OFF", "Switch", "WDMA3_CH_MIXER"},
+ {"WDMA3_OUT", NULL, "WDMA3_ON_OFF"},
+
/* MAD */
{"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"},
{"MAD_SEL MUX", "MSM", "MADINPUT"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index ff08ccb..fe1ce45 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -31,6 +31,7 @@
#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd934x/registers.h>
+#include <linux/mfd/wcd934x/irq.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <linux/regulator/consumer.h>
#include <linux/soundwire/swr-wcd.h>
@@ -179,6 +180,8 @@
ANC_MIC_AMIC2,
ANC_MIC_AMIC3,
ANC_MIC_AMIC4,
+ CLK_INTERNAL,
+ CLK_MODE,
};
enum {
@@ -233,37 +236,6 @@
u8 hph_idle_detect_en;
};
-static const struct intr_data wcd934x_intr_table[] = {
- {WCD9XXX_IRQ_SLIMBUS, false},
- {WCD934X_IRQ_MBHC_SW_DET, true},
- {WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true},
- {WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true},
- {WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true},
- {WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true},
- {WCD934X_IRQ_MISC, false},
- {WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false},
- {WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false},
- {WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false},
- {WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false},
- {WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false},
- {WCD934X_IRQ_SLNQ_ANALOG_ERROR, false},
- {WCD934X_IRQ_RESERVED_3, false},
- {WCD934X_IRQ_HPH_PA_OCPL_FAULT, false},
- {WCD934X_IRQ_HPH_PA_OCPR_FAULT, false},
- {WCD934X_IRQ_EAR_PA_OCP_FAULT, false},
- {WCD934X_IRQ_SOUNDWIRE, false},
- {WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false},
- {WCD934X_IRQ_RCO_ERROR, false},
- {WCD934X_IRQ_CPE_ERROR, false},
- {WCD934X_IRQ_MAD_AUDIO, false},
- {WCD934X_IRQ_MAD_BEACON, false},
- {WCD934X_IRQ_CPE1_INTR, true},
- {WCD934X_IRQ_RESERVED_4, false},
- {WCD934X_IRQ_MAD_ULTRASOUND, false},
- {WCD934X_IRQ_VBAT_ATTACK, false},
- {WCD934X_IRQ_VBAT_RESTORE, false},
-};
-
struct tavil_cpr_reg_defaults {
int wr_data;
int wr_addr;
@@ -676,140 +648,6 @@
static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil);
-/*
- * wcd934x_get_codec_info: Get codec specific information
- *
- * @wcd9xxx: pointer to wcd9xxx structure
- * @wcd_type: pointer to wcd9xxx_codec_type structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx,
- struct wcd9xxx_codec_type *wcd_type)
-{
- u16 id_minor, id_major;
- struct regmap *wcd_regmap;
- int rc, version = -1;
-
- if (!wcd9xxx || !wcd_type)
- return -EINVAL;
-
- if (!wcd9xxx->regmap) {
- dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__);
- return -EINVAL;
- }
- wcd_regmap = wcd9xxx->regmap;
-
- rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0,
- (u8 *)&id_minor, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2,
- (u8 *)&id_major, sizeof(u16));
- if (rc)
- return -EINVAL;
-
- dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n",
- __func__, id_major, id_minor);
-
- if (id_major != TAVIL_MAJOR)
- goto version_unknown;
-
- /*
- * As fine version info cannot be retrieved before tavil probe.
- * Assign coarse versions for possible future use before tavil probe.
- */
- if (id_minor == cpu_to_le16(0))
- version = TAVIL_VERSION_1_0;
- else if (id_minor == cpu_to_le16(0x01))
- version = TAVIL_VERSION_1_1;
-
-version_unknown:
- if (version < 0)
- dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n",
- __func__);
-
- /* Fill codec type info */
- wcd_type->id_major = id_major;
- wcd_type->id_minor = id_minor;
- wcd_type->num_irqs = WCD934X_NUM_IRQS;
- wcd_type->version = version;
- wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1;
- wcd_type->i2c_chip_status = 0x01;
- wcd_type->intr_tbl = wcd934x_intr_table;
- wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table);
-
- wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] =
- WCD934X_INTR_PIN1_STATUS0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] =
- WCD934X_INTR_PIN1_CLEAR0;
- wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] =
- WCD934X_INTR_PIN1_MASK0;
- wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] =
- WCD934X_INTR_LEVEL0;
- wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] =
- WCD934X_INTR_CLR_COMMIT;
-
- return rc;
-}
-EXPORT_SYMBOL(wcd934x_get_codec_info);
-
-/*
- * wcd934x_bringdown: Bringdown WCD Codec
- *
- * @wcd9xxx: Pointer to wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_bringdown(struct wcd9xxx *wcd9xxx)
-{
- if (!wcd9xxx || !wcd9xxx->regmap)
- return -EINVAL;
-
- regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL,
- 0x04);
-
- return 0;
-}
-EXPORT_SYMBOL(wcd934x_bringdown);
-
-/*
- * wcd934x_bringup: Bringup WCD Codec
- *
- * @wcd9xxx: Pointer to the wcd9xxx structure
- *
- * Returns 0 for success or negative error code for failure
- */
-int wcd934x_bringup(struct wcd9xxx *wcd9xxx)
-{
- struct regmap *wcd_regmap;
-
- if (!wcd9xxx)
- return -EINVAL;
-
- if (!wcd9xxx->regmap) {
- dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n",
- __func__);
- return -EINVAL;
- }
- wcd_regmap = wcd9xxx->regmap;
-
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
- regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
- regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
- /* Add 1msec delay for VOUT to settle */
- usleep_range(1000, 1100);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
- regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
-
- return 0;
-}
-EXPORT_SYMBOL(wcd934x_bringup);
-
/**
* tavil_set_spkr_gain_offset - offset the speaker path
* gain with the given offset value.
@@ -1235,12 +1073,45 @@
return ret;
}
+static int tavil_get_clkmode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+
+ if (test_bit(CLK_MODE, &tavil_p->status_mask))
+ ucontrol->value.enumerated.item[0] = 1;
+ else
+ ucontrol->value.enumerated.item[0] = 0;
+
+ dev_dbg(codec->dev, "%s: is_low_power_clock: %s\n", __func__,
+ test_bit(CLK_MODE, &tavil_p->status_mask) ? "true" : "false");
+
+ return 0;
+}
+
+static int tavil_put_clkmode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
+
+ if (ucontrol->value.enumerated.item[0])
+ set_bit(CLK_MODE, &tavil_p->status_mask);
+ else
+ clear_bit(CLK_MODE, &tavil_p->status_mask);
+
+ dev_dbg(codec->dev, "%s: is_low_power_clock: %s\n", __func__,
+ test_bit(CLK_MODE, &tavil_p->status_mask) ? "true" : "false");
+
+ return 0;
+}
+
static int tavil_vi_feed_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1252,9 +1123,8 @@
static int tavil_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -1304,9 +1174,8 @@
static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1317,9 +1186,8 @@
static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -1399,9 +1267,8 @@
static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
@@ -1413,9 +1280,8 @@
static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
@@ -2973,6 +2839,35 @@
return asrc_mode;
}
+static int tavil_codec_wdma3_ctl(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Fix to 16KHz */
+ snd_soc_update_bits(codec, WCD934X_DMA_WDMA_CTL_3,
+ 0xF0, 0x10);
+ /* Select mclk_1 */
+ snd_soc_update_bits(codec, WCD934X_DMA_WDMA_CTL_3,
+ 0x02, 0x00);
+ /* Enable DMA */
+ snd_soc_update_bits(codec, WCD934X_DMA_WDMA_CTL_3,
+ 0x01, 0x01);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ /* Disable DMA */
+ snd_soc_update_bits(codec, WCD934X_DMA_WDMA_CTL_3,
+ 0x01, 0x00);
+ break;
+
+ };
+
+ return 0;
+}
+
static int tavil_codec_enable_asrc(struct snd_soc_codec *codec,
int asrc_in, int event)
{
@@ -5711,6 +5606,9 @@
static const struct soc_enum tavil_anc_func_enum =
SOC_ENUM_SINGLE_EXT(2, tavil_anc_func_text);
+static const char *const tavil_clkmode_text[] = {"EXTERNAL", "INTERNAL"};
+static SOC_ENUM_SINGLE_EXT_DECL(tavil_clkmode_enum, tavil_clkmode_text);
+
/* Cutoff frequency for high pass filter */
static const char * const cf_text[] = {
"CF_NEG_3DB_4HZ", "CF_NEG_3DB_75HZ", "CF_NEG_3DB_150HZ"
@@ -5890,6 +5788,9 @@
SOC_ENUM_EXT("ANC Function", tavil_anc_func_enum, tavil_get_anc_func,
tavil_put_anc_func),
+ SOC_ENUM_EXT("CLK MODE", tavil_clkmode_enum, tavil_get_clkmode,
+ tavil_put_clkmode),
+
SOC_ENUM("TX0 HPF cut off", cf_dec0_enum),
SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
SOC_ENUM("TX2 HPF cut off", cf_dec2_enum),
@@ -6022,9 +5923,8 @@
static int tavil_dec_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -6082,9 +5982,8 @@
static int tavil_int_dem_inp_mux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
@@ -6329,6 +6228,39 @@
"OFF", "ON",
};
+static const char *const wdma3_port0_text[] = {
+ "RX_MIX_TX0", "DEC0"
+};
+
+static const char *const wdma3_port1_text[] = {
+ "RX_MIX_TX1", "DEC1"
+};
+
+static const char *const wdma3_port2_text[] = {
+ "RX_MIX_TX2", "DEC2"
+};
+
+static const char *const wdma3_port3_text[] = {
+ "RX_MIX_TX3", "DEC3"
+};
+
+static const char *const wdma3_port4_text[] = {
+ "RX_MIX_TX4", "DEC4"
+};
+
+static const char *const wdma3_port5_text[] = {
+ "RX_MIX_TX5", "DEC5"
+};
+
+static const char *const wdma3_port6_text[] = {
+ "RX_MIX_TX6", "DEC6"
+};
+
+static const char *const wdma3_ch_text[] = {
+ "PORT_0", "PORT_1", "PORT_2", "PORT_3", "PORT_4",
+ "PORT_5", "PORT_6", "PORT_7", "PORT_8",
+};
+
static const struct snd_kcontrol_new aif4_vi_mixer[] = {
SOC_SINGLE_EXT("SPKR_VI_1", SND_SOC_NOPM, WCD934X_TX14, 1, 0,
tavil_vi_feed_mixer_get, tavil_vi_feed_mixer_put),
@@ -6734,6 +6666,20 @@
WCD_DAPM_ENUM(anc0_fb, WCD934X_CDC_RX_INP_MUX_ANC_CFG0, 0, anc0_fb_mux_text);
WCD_DAPM_ENUM(anc1_fb, WCD934X_CDC_RX_INP_MUX_ANC_CFG0, 3, anc1_fb_mux_text);
+
+WCD_DAPM_ENUM(wdma3_port0, WCD934X_DMA_WDMA3_PRT_CFG, 0, wdma3_port0_text);
+WCD_DAPM_ENUM(wdma3_port1, WCD934X_DMA_WDMA3_PRT_CFG, 1, wdma3_port1_text);
+WCD_DAPM_ENUM(wdma3_port2, WCD934X_DMA_WDMA3_PRT_CFG, 2, wdma3_port2_text);
+WCD_DAPM_ENUM(wdma3_port3, WCD934X_DMA_WDMA3_PRT_CFG, 3, wdma3_port3_text);
+WCD_DAPM_ENUM(wdma3_port4, WCD934X_DMA_WDMA3_PRT_CFG, 4, wdma3_port4_text);
+WCD_DAPM_ENUM(wdma3_port5, WCD934X_DMA_WDMA3_PRT_CFG, 5, wdma3_port5_text);
+WCD_DAPM_ENUM(wdma3_port6, WCD934X_DMA_WDMA3_PRT_CFG, 6, wdma3_port6_text);
+
+WCD_DAPM_ENUM(wdma3_ch0, WCD934X_DMA_CH_0_1_CFG_WDMA_3, 0, wdma3_ch_text);
+WCD_DAPM_ENUM(wdma3_ch1, WCD934X_DMA_CH_0_1_CFG_WDMA_3, 4, wdma3_ch_text);
+WCD_DAPM_ENUM(wdma3_ch2, WCD934X_DMA_CH_2_3_CFG_WDMA_3, 0, wdma3_ch_text);
+WCD_DAPM_ENUM(wdma3_ch3, WCD934X_DMA_CH_2_3_CFG_WDMA_3, 4, wdma3_ch_text);
+
static const struct snd_kcontrol_new anc_ear_switch =
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
@@ -6801,6 +6747,9 @@
SOC_DAPM_SINGLE("LO2 Switch", SND_SOC_NOPM, 0, 1, 0),
};
+static const struct snd_kcontrol_new wdma3_onoff_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
static int tavil_dsd_mixer_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -7483,6 +7432,28 @@
SND_SOC_DAPM_MUX_E("ASRC3 MUX", SND_SOC_NOPM, ASRC3, 0,
&asrc3_mux, tavil_codec_enable_asrc_resampler,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* WDMA3 widgets */
+ WCD_DAPM_MUX("WDMA3 PORT0 MUX", 0, wdma3_port0),
+ WCD_DAPM_MUX("WDMA3 PORT1 MUX", 1, wdma3_port1),
+ WCD_DAPM_MUX("WDMA3 PORT2 MUX", 2, wdma3_port2),
+ WCD_DAPM_MUX("WDMA3 PORT3 MUX", 3, wdma3_port3),
+ WCD_DAPM_MUX("WDMA3 PORT4 MUX", 4, wdma3_port4),
+ WCD_DAPM_MUX("WDMA3 PORT5 MUX", 5, wdma3_port5),
+ WCD_DAPM_MUX("WDMA3 PORT6 MUX", 6, wdma3_port6),
+
+ WCD_DAPM_MUX("WDMA3 CH0 MUX", 0, wdma3_ch0),
+ WCD_DAPM_MUX("WDMA3 CH1 MUX", 4, wdma3_ch1),
+ WCD_DAPM_MUX("WDMA3 CH2 MUX", 0, wdma3_ch2),
+ WCD_DAPM_MUX("WDMA3 CH3 MUX", 4, wdma3_ch3),
+
+ SND_SOC_DAPM_MIXER("WDMA3_CH_MIXER", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH_E("WDMA3_ON_OFF", SND_SOC_NOPM, 0, 0,
+ &wdma3_onoff_switch, tavil_codec_wdma3_ctl,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_OUTPUT("WDMA3_OUT"),
};
static int tavil_get_channel_map(struct snd_soc_dai *dai,
@@ -8480,6 +8451,50 @@
return ret;
}
+/*
+ * tavil_cdc_mclk_tx_enable: Enable/Disable codec's clock for TX path
+ * @codec: Handle to codec
+ * @enable: Indicates whether clock should be enabled or disabled
+ */
+int tavil_cdc_mclk_tx_enable(struct snd_soc_codec *codec, bool enable)
+{
+ struct tavil_priv *tavil_p;
+ int ret = 0;
+ bool clk_mode;
+ bool clk_internal;
+
+ if (!codec)
+ return -EINVAL;
+
+ tavil_p = snd_soc_codec_get_drvdata(codec);
+ clk_mode = test_bit(CLK_MODE, &tavil_p->status_mask);
+ clk_internal = test_bit(CLK_INTERNAL, &tavil_p->status_mask);
+
+ dev_dbg(codec->dev, "%s: clkmode: %d, enable: %d, clk_internal: %d\n",
+ __func__, clk_mode, enable, clk_internal);
+
+ if (clk_mode || clk_internal) {
+ if (enable) {
+ wcd_resmgr_enable_master_bias(tavil_p->resmgr);
+ tavil_dig_core_power_collapse(tavil_p, POWER_RESUME);
+ tavil_vote_svs(tavil_p, true);
+ ret = tavil_codec_internal_rco_ctrl(codec, enable);
+ set_bit(CLK_INTERNAL, &tavil_p->status_mask);
+ } else {
+ clear_bit(CLK_INTERNAL, &tavil_p->status_mask);
+ tavil_codec_internal_rco_ctrl(codec, enable);
+ tavil_vote_svs(tavil_p, false);
+ tavil_dig_core_power_collapse(tavil_p, POWER_COLLAPSE);
+ wcd_resmgr_disable_master_bias(tavil_p->resmgr);
+ }
+ } else {
+ ret = __tavil_cdc_mclk_enable(tavil_p, enable);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(tavil_cdc_mclk_tx_enable);
+
static const struct wcd_resmgr_cb tavil_resmgr_cb = {
.cdc_rco_ctrl = __tavil_codec_internal_rco_ctrl,
};
diff --git a/sound/soc/codecs/wcd934x/wcd934x.h b/sound/soc/codecs/wcd934x/wcd934x.h
index ae70175..27c21f1 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.h
+++ b/sound/soc/codecs/wcd934x/wcd934x.h
@@ -95,45 +95,6 @@
INTERP_MAX,
};
-enum {
- /* INTR_REG 0 */
- WCD934X_IRQ_MISC = 1,
- WCD934X_IRQ_HPH_PA_OCPL_FAULT,
- WCD934X_IRQ_HPH_PA_OCPR_FAULT,
- WCD934X_IRQ_EAR_PA_OCP_FAULT,
- WCD934X_IRQ_HPH_PA_CNPL_COMPLETE,
- WCD934X_IRQ_HPH_PA_CNPR_COMPLETE,
- WCD934X_IRQ_EAR_PA_CNP_COMPLETE,
- /* INTR_REG 1 */
- WCD934X_IRQ_MBHC_SW_DET,
- WCD934X_IRQ_MBHC_ELECT_INS_REM_DET,
- WCD934X_IRQ_MBHC_BUTTON_PRESS_DET,
- WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET,
- WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET,
- WCD934X_IRQ_RESERVED_0,
- WCD934X_IRQ_RESERVED_1,
- WCD934X_IRQ_RESERVED_2,
- /* INTR_REG 2 */
- WCD934X_IRQ_LINE_PA1_CNP_COMPLETE,
- WCD934X_IRQ_LINE_PA2_CNP_COMPLETE,
- WCD934X_IRQ_SLNQ_ANALOG_ERROR,
- WCD934X_IRQ_RESERVED_3,
- WCD934X_IRQ_SOUNDWIRE,
- WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE,
- WCD934X_IRQ_RCO_ERROR,
- WCD934X_IRQ_CPE_ERROR,
- /* INTR_REG 3 */
- WCD934X_IRQ_MAD_AUDIO,
- WCD934X_IRQ_MAD_BEACON,
- WCD934X_IRQ_MAD_ULTRASOUND,
- WCD934X_IRQ_VBAT_ATTACK,
- WCD934X_IRQ_VBAT_RESTORE,
- WCD934X_IRQ_CPE1_INTR,
- WCD934X_IRQ_RESERVED_4,
- WCD934X_IRQ_SLNQ_DIGITAL,
- WCD934X_NUM_IRQS,
-};
-
/*
* Selects compander and smart boost settings
* for a given speaker mode
@@ -176,6 +137,7 @@
extern void *tavil_get_afe_config(struct snd_soc_codec *codec,
enum afe_config_type config_type);
extern int tavil_cdc_mclk_enable(struct snd_soc_codec *codec, bool enable);
+extern int tavil_cdc_mclk_tx_enable(struct snd_soc_codec *codec, bool enable);
extern int tavil_set_spkr_mode(struct snd_soc_codec *codec, int mode);
extern int tavil_set_spkr_gain_offset(struct snd_soc_codec *codec, int offset);
extern struct wcd_dsp_cntl *tavil_get_wcd_dsp_cntl(struct device *dev);
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c
index 9ac38c2..6216657 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.c
+++ b/sound/soc/codecs/wcd9xxx-common-v2.c
@@ -1316,6 +1316,7 @@
break;
};
}
+EXPORT_SYMBOL(wcd_clsh_fsm);
int wcd_clsh_get_clsh_state(struct wcd_clsh_cdc_data *clsh)
{
diff --git a/sound/soc/codecs/wcd9xxx-common.c b/sound/soc/codecs/wcd9xxx-common.c
deleted file mode 100644
index 7b2e68a..0000000
--- a/sound/soc/codecs/wcd9xxx-common.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/* Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <sound/soc.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include "wcd9xxx-common.h"
-
-#define CLSH_COMPUTE_EAR 0x01
-#define CLSH_COMPUTE_HPH_L 0x02
-#define CLSH_COMPUTE_HPH_R 0x03
-
-#define BUCK_VREF_0P494V 0x3F
-#define BUCK_VREF_2V 0xFF
-#define BUCK_VREF_0P494V 0x3F
-#define BUCK_VREF_1P8V 0xE6
-
-#define BUCK_SETTLE_TIME_US 50
-#define NCP_SETTLE_TIME_US 50
-
-#define MAX_IMPED_PARAMS 13
-
-#define USLEEP_RANGE_MARGIN_US 100
-
-struct wcd9xxx_imped_val {
- u32 imped_val;
- u8 index;
-};
-
-static const struct wcd9xxx_reg_mask_val imped_table[][MAX_IMPED_PARAMS] = {
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x46},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x04},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x47},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0E},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x17},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x5F},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCF},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0F},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x59},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x15},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCE},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x10},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x66},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9A},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x11},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x08},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x76},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x09},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0A},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x13},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x7A},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0B},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x60},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x09},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0C},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0D},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x15},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0E},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x89},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x40},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x10},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x12},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x13},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x15},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x08},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x18},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8B},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x18},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x20},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1A},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1D},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x1A},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1F},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xB9},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x23},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18},
- },
- {
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x26},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16},
- },
-};
-
-static const struct wcd9xxx_imped_val imped_index[] = {
- {4000, 0},
- {4500, 1},
- {5000, 2},
- {5500, 3},
- {6000, 4},
- {6500, 5},
- {7000, 6},
- {7700, 7},
- {8470, 8},
- {9317, 9},
- {10248, 10},
- {11273, 11},
- {12400, 12},
- {13641, 13},
- {15005, 14},
- {16505, 15},
- {18156, 16},
- {19971, 17},
- {21969, 18},
- {24165, 19},
- {26582, 20},
- {29240, 21},
- {32164, 22},
-};
-
-static inline void
-wcd9xxx_enable_clsh_block(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
-{
- if ((enable && ++clsh_d->clsh_users == 1) ||
- (!enable && --clsh_d->clsh_users == 0))
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
- 0x01, enable ? 0x01 : 0x00);
- dev_dbg(codec->dev, "%s: clsh_users %d, enable %d", __func__,
- clsh_d->clsh_users, enable);
-}
-
-static inline void wcd9xxx_enable_anc_delay(
- struct snd_soc_codec *codec,
- bool on)
-{
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
- 0x02, on ? 0x02 : 0x00);
-}
-
-static inline void
-wcd9xxx_enable_buck(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable)
-{
- if ((enable && ++clsh_d->buck_users == 1) ||
- (!enable && --clsh_d->buck_users == 0))
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
- 0x80, enable ? 0x80 : 0x00);
- dev_dbg(codec->dev, "%s: buck_users %d, enable %d", __func__,
- clsh_d->buck_users, enable);
-}
-
-static void (*clsh_state_fp[NUM_CLSH_STATES])(struct snd_soc_codec *,
- struct wcd9xxx_clsh_cdc_data *,
- u8 req_state, bool req_type);
-
-static const char *state_to_str(u8 state, char *buf, size_t buflen)
-{
- int i;
- int cnt = 0;
- /*
- * This array of strings should match with enum wcd9xxx_clsh_state_bit.
- */
- static const char *const states[] = {
- "STATE_EAR",
- "STATE_HPH_L",
- "STATE_HPH_R",
- "STATE_LO",
- };
-
- if (state == WCD9XXX_CLSH_STATE_IDLE) {
- snprintf(buf, buflen, "[STATE_IDLE]");
- goto done;
- }
-
- buf[0] = '\0';
- for (i = 0; i < ARRAY_SIZE(states); i++) {
- if (!(state & (1 << i)))
- continue;
- cnt = snprintf(buf, buflen - cnt - 1, "%s%s%s", buf,
- buf[0] == '\0' ? "[" : "|",
- states[i]);
- }
- if (cnt > 0)
- strlcat(buf + cnt, "]", buflen);
-
-done:
- if (buf[0] == '\0')
- snprintf(buf, buflen, "[STATE_UNKNOWN]");
- return buf;
-}
-
-static void wcd9xxx_cfg_clsh_param_common(
- struct snd_soc_codec *codec)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 0, 0},
- {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 2, 1 << 2},
- {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, (0x1 << 4), 0},
- {WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 0), 0x01},
- {WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 2), (0x01 << 2)},
- {WCD9XXX_A_CDC_CLSH_B2_CTL, (0xf << 4), (0x03 << 4)},
- {WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 4), (0x03 << 4)},
- {WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 0), (0x0B)},
- {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 5), (0x01 << 5)},
- {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 1), (0x01 << 1)},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
- reg_set[i].val);
-
- dev_dbg(codec->dev, "%s: Programmed class H controller common parameters",
- __func__);
-}
-
-static void wcd9xxx_chargepump_request(struct snd_soc_codec *codec, bool on)
-{
- static int cp_count;
-
- if (on && (++cp_count == 1)) {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x01, 0x01);
- dev_dbg(codec->dev, "%s: Charge Pump enabled, count = %d\n",
- __func__, cp_count);
- } else if (!on) {
- if (--cp_count < 0) {
- dev_dbg(codec->dev,
- "%s: Unbalanced disable for charge pump\n",
- __func__);
- if (snd_soc_read(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL) &
- 0x01) {
- dev_dbg(codec->dev,
- "%s: Actual chargepump is ON\n",
- __func__);
- }
- cp_count = 0;
- WARN_ON(1);
- }
-
- if (cp_count == 0) {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
- 0x01, 0x00);
- dev_dbg(codec->dev,
- "%s: Charge pump disabled, count = %d\n",
- __func__, cp_count);
- }
- }
-}
-
-void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 uhqa_mode, u8 req_state, bool req_type)
-{
- dev_dbg(codec->dev, "%s: users fclk8 %d, fclk5 %d", __func__,
- clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
- if (req_type == WCD9XXX_CLSAB_REQ_ENABLE) {
- clsh_d->ncp_users[NCP_FCLK_LEVEL_8]++;
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA,
- WCD9XXX_A_RX_HPH_BIAS_PA__POR);
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL, 0x48);
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL, 0x48);
- if (uhqa_mode)
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
- 0x20, 0x00);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- wcd9xxx_enable_buck(codec, clsh_d, false);
- if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
- 0x0F, 0x08);
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x30, 0x30);
-
- /* Enable NCP and wait until settles down */
- if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
- usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US+10);
- } else {
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL,
- 0x20, 0x20);
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL,
- WCD9XXX_A_RX_HPH_L_PA_CTL__POR);
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL,
- WCD9XXX_A_RX_HPH_R_PA_CTL__POR);
- snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA, 0x57);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, false);
- wcd9xxx_enable_anc_delay(codec, false);
- clsh_d->ncp_users[NCP_FCLK_LEVEL_8]--;
- if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN,
- 0x01, 0x00);
- else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
- 0x0F, 0x05);
- }
- dev_dbg(codec->dev, "%s: leave\n", __func__);
-}
-EXPORT_SYMBOL(wcd9xxx_enable_high_perf_mode);
-
-static int get_impedance_index(u32 imped)
-{
- int i = 0;
-
- if (imped < imped_index[i].imped_val) {
- pr_debug("%s, detected impedance is less than 4 Ohm\n",
- __func__);
- goto ret;
- }
- if (imped >= imped_index[ARRAY_SIZE(imped_index) - 1].imped_val) {
- pr_debug("%s, detected impedance is greater than 32164 Ohm\n",
- __func__);
- i = ARRAY_SIZE(imped_index) - 1;
- goto ret;
- }
- for (i = 0; i < ARRAY_SIZE(imped_index) - 1; i++) {
- if (imped >= imped_index[i].imped_val &&
- imped < imped_index[i + 1].imped_val)
- break;
- }
-ret:
- pr_debug("%s: selected impedance index = %d\n",
- __func__, imped_index[i].index);
- return imped_index[i].index;
-}
-
-void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
- int imped)
-{
- int i = 0;
- int index = 0;
-
- index = get_impedance_index(imped);
- if (index >= ARRAY_SIZE(imped_index)) {
- pr_err("%s, invalid imped = %d\n", __func__, imped);
- return;
- }
- for (i = 0; i < MAX_IMPED_PARAMS; i++)
- snd_soc_write(codec, imped_table[index][i].reg,
- imped_table[index][i].val);
-}
-
-static void wcd9xxx_clsh_comp_req(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- int compute_pa, bool on)
-{
- u8 shift;
-
- if (compute_pa == CLSH_COMPUTE_EAR) {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, 0x10,
- (on ? 0x10 : 0));
- } else {
- if (compute_pa == CLSH_COMPUTE_HPH_L) {
- shift = 3;
- } else if (compute_pa == CLSH_COMPUTE_HPH_R) {
- shift = 2;
- } else {
- dev_dbg(codec->dev,
- "%s: classh computation request is incorrect\n",
- __func__);
- return;
- }
-
- if (on)
- wcd9xxx_resmgr_add_cond_update_bits(clsh_d->resmgr,
- WCD9XXX_COND_HPH,
- WCD9XXX_A_CDC_CLSH_B1_CTL,
- shift, false);
- else
- wcd9xxx_resmgr_rm_cond_update_bits(clsh_d->resmgr,
- WCD9XXX_COND_HPH,
- WCD9XXX_A_CDC_CLSH_B1_CTL,
- shift, false);
- }
-}
-
-int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
- struct list_head *list,
- uint16_t reg, uint8_t mask,
- uint8_t value, int delay)
-{
- int rc;
- struct wcd9xxx_register_save_node *node;
-
- node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- pr_err("%s: Not enough memory\n", __func__);
- return -ENOMEM;
- }
- node->reg = reg;
- node->value = snd_soc_read(codec, reg);
- list_add(&node->lh, list);
- if (mask == 0xFF)
- rc = snd_soc_write(codec, reg, value);
- else
- rc = snd_soc_update_bits(codec, reg, mask, value);
- if (delay)
- usleep_range(delay, delay + USLEEP_RANGE_MARGIN_US);
- return rc;
-}
-EXPORT_SYMBOL(wcd9xxx_soc_update_bits_push);
-
-void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
- struct list_head *lh)
-{
- struct wcd9xxx_register_save_node *node, *nodetmp;
-
- list_for_each_entry_safe(node, nodetmp, lh, lh) {
- snd_soc_write(codec, node->reg, node->value);
- list_del(&node->lh);
- kfree(node);
- }
-}
-EXPORT_SYMBOL(wcd9xxx_restore_registers);
-
-static void wcd9xxx_dynamic_bypass_buck_ctrl_lo(struct snd_soc_codec *cdc,
- bool enable)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
- {WCD9XXX_A_BUCK_MODE_5, enable ? 0xFF : 0x02, 0x02},
- {WCD9XXX_A_BUCK_MODE_5, 0x1, 0x01}
- };
-
- if (!enable) {
- snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
- (0x1 << 3), 0x00);
- snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
- 0xFF, BUCK_VREF_2V);
- }
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
- reg_set[i].val);
-
- /* 50us sleep is reqd. as per the class H HW design sequence */
- usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_dynamic_bypass_buck_ctrl(struct snd_soc_codec *cdc,
- bool enable)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)},
- {WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), ((!enable) << 1)},
- {WCD9XXX_A_BUCK_MODE_5, 0x1, !enable}
- };
- if (!enable) {
- snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1,
- (0x1 << 3), 0x00);
- snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4,
- 0xFF, BUCK_VREF_2V);
- }
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask,
- reg_set[i].val);
-
- /* 50us sleep is reqd. as per the class H HW design sequence */
- usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_set_buck_mode(struct snd_soc_codec *codec, u8 buck_vref)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_BUCK_MODE_5, 0x02, 0x02},
- {WCD9XXX_A_BUCK_MODE_4, 0xFF, buck_vref},
- {WCD9XXX_A_BUCK_MODE_1, 0x04, 0x04},
- {WCD9XXX_A_BUCK_MODE_3, 0x04, 0x00},
- {WCD9XXX_A_BUCK_MODE_3, 0x08, 0x00},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(codec, reg_set[i].reg,
- reg_set[i].mask, reg_set[i].val);
-
- dev_dbg(codec->dev, "%s: Done\n", __func__);
- usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US + 10);
-}
-
-
-/* This will be called for all states except Lineout */
-static void wcd9xxx_clsh_enable_post_pa(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *cdc_clsh_d)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_BUCK_MODE_5, 0x02, 0x00},
- {WCD9XXX_A_NCP_STATIC, 0x20, 0x00},
- {WCD9XXX_A_BUCK_MODE_3, 0x04, 0x04},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(codec, reg_set[i].reg,
- reg_set[i].mask, reg_set[i].val);
-
- if (!cdc_clsh_d->is_dynamic_vdd_cp)
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_3,
- 0x08, 0x08);
-
- dev_dbg(codec->dev, "%s: completed clsh mode settings after PA enable\n",
- __func__);
-
-}
-
-static void wcd9xxx_set_fclk_get_ncp(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- enum ncp_fclk_level fclk_level)
-{
- clsh_d->ncp_users[fclk_level]++;
-
- pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
- fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x10, 0x00);
- /* fclk level 8 dominates level 5 */
- if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x08);
- else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_5] > 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
- else
- WARN_ONCE(1, "Unexpected users %d,%d\n",
- clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x20, 0x20);
-
- /* enable NCP and wait until settles down */
- if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01))
- usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US + 50);
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_set_fclk_put_ncp(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- enum ncp_fclk_level fclk_level)
-{
- clsh_d->ncp_users[fclk_level]--;
-
- pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__,
- fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8],
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5]);
-
- if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 &&
- clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0)
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x00);
- else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0)
- /* if dominating level 8 has gone, switch to 5 */
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05);
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_cfg_clsh_param_ear(struct snd_soc_codec *codec)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 7), 0},
- {WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR, (0x3f << 0), 0x0D},
- {WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR, (0x3f << 0), 0x3A},
-
- /* Under assumption that EAR load is 10.7ohm */
- {WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD, (0x3f << 0), 0x26},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD, (0x3f << 0), 0x2C},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L, 0xff, 0xA9},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U, 0xff, 0x07},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, (0xf << 0), 0x08},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1b},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2d},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x36},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(codec, reg_set[i].reg,
- reg_set[i].mask, reg_set[i].val);
-
- dev_dbg(codec->dev, "%s: Programmed Class H controller EAR specific params\n",
- __func__);
-}
-
-static void wcd9xxx_cfg_clsh_param_hph(struct snd_soc_codec *codec)
-{
- int i;
- const struct wcd9xxx_reg_mask_val reg_set[] = {
- {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 6), 0},
- {WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH, 0x3f, 0x0D},
- {WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH, 0x3f, 0x1D},
-
- /* Under assumption that HPH load is 16ohm per channel */
- {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0x3f, 0x13},
- {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0x1f, 0x19},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
- {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
- {WCD9XXX_A_CDC_CLSH_K_ADDR, 0x0f, 0},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
- {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
- };
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
- reg_set[i].val);
- dev_dbg(codec->dev, "%s: Programmed Class H controller HPH specific params\n",
- __func__);
-}
-
-static void wcd9xxx_ncp_bypass_enable(struct snd_soc_codec *cdc, bool enable)
-{
- snd_soc_update_bits(cdc, WCD9XXX_A_NCP_STATIC, 0x10, (enable << 4));
- /* 50us sleep is reqd. as per the class H HW design sequence */
- usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10);
-}
-
-static void wcd9xxx_clsh_set_Iest(struct snd_soc_codec *codec,
- u8 value)
-{
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
- 0x01, (0x01 & 0x03));
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
- 0xFC, (value << 2));
-}
-
-static void wcd9xxx_clsh_state_hph_ear(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- int compute_pa = 0;
-
- dev_dbg(codec->dev, "%s: enter %s\n", __func__,
- is_enable ? "enable" : "disable");
-
- if (is_enable) {
- /*
- * The below check condition is required to make sure
- * functions inside if condition will execute only once.
- */
- if ((clsh_d->state == WCD9XXX_CLSH_STATE_EAR) ||
- (req_state == WCD9XXX_CLSH_STATE_EAR)) {
- wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
- wcd9xxx_ncp_bypass_enable(codec, true);
- }
- switch (req_state) {
- case WCD9XXX_CLSH_STATE_HPHL:
- compute_pa = CLSH_COMPUTE_HPH_L;
- break;
- case WCD9XXX_CLSH_STATE_HPHR:
- compute_pa = CLSH_COMPUTE_HPH_R;
- break;
- case WCD9XXX_CLSH_STATE_EAR:
- compute_pa = CLSH_COMPUTE_EAR;
- break;
- default:
- dev_dbg(codec->dev,
- "%s:Invalid state:0x%x,enable:0x%x\n",
- __func__, req_state, is_enable);
- break;
- }
- wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, true);
-
- dev_dbg(codec->dev, "%s: Enabled hph+ear mode clsh\n",
- __func__);
- } else {
- switch (req_state) {
- case WCD9XXX_CLSH_STATE_HPHL:
- compute_pa = CLSH_COMPUTE_HPH_L;
- break;
- case WCD9XXX_CLSH_STATE_HPHR:
- compute_pa = CLSH_COMPUTE_HPH_R;
- break;
- case WCD9XXX_CLSH_STATE_EAR:
- compute_pa = CLSH_COMPUTE_EAR;
- break;
- default:
- dev_dbg(codec->dev,
- "%s:Invalid state:0x%x,enable:0x%x\n",
- __func__, req_state, is_enable);
- break;
- }
- wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, false);
-
- if (((clsh_d->state & (~req_state)) ==
- WCD9XXX_CLSH_STATE_EAR) ||
- (req_state == WCD9XXX_CLSH_STATE_EAR)) {
- wcd9xxx_ncp_bypass_enable(codec, false);
- wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
- }
- }
-}
-
-static void wcd9xxx_clsh_state_hph_lo(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
-
- dev_dbg(codec->dev, "%s: enter %s\n", __func__,
- is_enable ? "enable" : "disable");
- if (is_enable) {
- if ((clsh_d->state == WCD9XXX_CLSH_STATE_LO) ||
- (req_state == WCD9XXX_CLSH_STATE_LO)) {
- wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, false);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_ncp_bypass_enable(codec, true);
- if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_8);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_5);
- wcd9xxx_enable_clsh_block(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- }
- }
- if (req_state == WCD9XXX_CLSH_STATE_HPHL)
- wcd9xxx_clsh_comp_req(codec, clsh_d,
- CLSH_COMPUTE_HPH_L, true);
- if (req_state == WCD9XXX_CLSH_STATE_HPHR)
- wcd9xxx_clsh_comp_req(codec, clsh_d,
- CLSH_COMPUTE_HPH_R, true);
- } else {
- switch (req_state) {
- case WCD9XXX_CLSH_STATE_LO:
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
- 0x20, 0x00);
- wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, true);
- break;
- case WCD9XXX_CLSH_STATE_HPHL:
- wcd9xxx_clsh_comp_req(codec, clsh_d,
- CLSH_COMPUTE_HPH_L, false);
- break;
- case WCD9XXX_CLSH_STATE_HPHR:
- wcd9xxx_clsh_comp_req(codec, clsh_d,
- CLSH_COMPUTE_HPH_R, false);
- break;
- default:
- dev_dbg(codec->dev,
- "%s:Invalid state:0x%x,enable:0x%x\n",
- __func__, req_state, is_enable);
- break;
- }
- if ((req_state == WCD9XXX_CLSH_STATE_LO) ||
- ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO)) {
- wcd9xxx_ncp_bypass_enable(codec, false);
-
- if ((clsh_d->state & (~req_state)) ==
- WCD9XXX_CLSH_STATE_LO) {
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_5);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_8);
- }
-
- if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) {
- usleep_range(BUCK_SETTLE_TIME_US,
- BUCK_SETTLE_TIME_US + 10);
- if (clsh_d->buck_mv ==
- WCD9XXX_CDC_BUCK_MV_1P8) {
- wcd9xxx_enable_buck(codec, clsh_d,
- false);
- wcd9xxx_ncp_bypass_enable(codec, true);
- } else {
- /*
- *NCP settle time recommended by codec
- *specification
- */
- usleep_range(NCP_SETTLE_TIME_US,
- NCP_SETTLE_TIME_US + 10);
- wcd9xxx_clsh_set_Iest(codec, 0x02);
- }
- snd_soc_update_bits(codec,
- WCD9XXX_A_BUCK_MODE_1,
- 0x04, 0x00);
- snd_soc_update_bits(codec,
- WCD9XXX_A_BUCK_MODE_4,
- 0xFF, BUCK_VREF_1P8V);
- }
- }
- }
-}
-
-static void wcd9xxx_clsh_state_ear_lo(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
-
- dev_dbg(codec->dev, "%s: enter %s\n", __func__,
- is_enable ? "enable" : "disable");
- if (is_enable) {
- wcd9xxx_dynamic_bypass_buck_ctrl(codec, false);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_ncp_bypass_enable(codec, true);
- if (req_state & WCD9XXX_CLSH_STATE_EAR) {
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_8);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_5);
- wcd9xxx_enable_clsh_block(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d,
- CLSH_COMPUTE_EAR, true);
- }
- } else {
- wcd9xxx_ncp_bypass_enable(codec, false);
-
- if ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO) {
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_5);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d,
- NCP_FCLK_LEVEL_8);
- }
-
- if (req_state & WCD9XXX_CLSH_STATE_LO) {
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
- 0x20, 0x00);
- wcd9xxx_dynamic_bypass_buck_ctrl(codec, true);
- } else if (req_state & WCD9XXX_CLSH_STATE_EAR) {
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
- false);
- /*sleep 5ms*/
- if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
- wcd9xxx_enable_buck(codec, clsh_d, false);
- wcd9xxx_ncp_bypass_enable(codec, true);
- } else {
- /* NCP settle time recommended by codec spec */
- usleep_range(NCP_SETTLE_TIME_US,
- NCP_SETTLE_TIME_US + 10);
- wcd9xxx_clsh_set_Iest(codec, 0x02);
- }
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
- 0x04, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_4,
- 0xFF, BUCK_VREF_1P8V);
- }
- }
-}
-
-static void wcd9xxx_clsh_state_hph_ear_lo(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- dev_dbg(codec->dev, "%s: enter %s\n", __func__,
- is_enable ? "enable" : "disable");
-
- if (req_state & WCD9XXX_CLSH_STATE_HPHL)
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L,
- is_enable);
-
- if (req_state & WCD9XXX_CLSH_STATE_HPHR)
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R,
- is_enable);
-
- if (req_state & WCD9XXX_CLSH_STATE_EAR)
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR,
- is_enable);
-}
-
-static void wcd9xxx_clsh_state_ear(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
- if (is_enable) {
- wcd9xxx_cfg_clsh_param_common(codec);
- wcd9xxx_cfg_clsh_param_ear(codec);
- wcd9xxx_enable_clsh_block(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, true);
- wcd9xxx_set_buck_mode(codec, BUCK_VREF_2V);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
- dev_dbg(codec->dev, "%s: Enabled ear mode class h\n", __func__);
- } else {
- dev_dbg(codec->dev, "%s: stub fallback to ear\n", __func__);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
- wcd9xxx_enable_buck(codec, clsh_d, false);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, false);
- wcd9xxx_chargepump_request(codec, false);
- wcd9xxx_enable_clsh_block(codec, clsh_d, false);
- }
-}
-
-static void wcd9xxx_clsh_state_hph_l(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
- if (is_enable) {
- wcd9xxx_cfg_clsh_param_common(codec);
- wcd9xxx_cfg_clsh_param_hph(codec);
- wcd9xxx_enable_clsh_block(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
- wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
- dev_dbg(codec->dev, "%s: Done\n", __func__);
- } else {
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
- wcd9xxx_enable_buck(codec, clsh_d, false);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
- wcd9xxx_enable_clsh_block(codec, clsh_d, false);
- wcd9xxx_chargepump_request(codec, false);
- }
-}
-
-static void wcd9xxx_clsh_state_hph_r(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
- if (is_enable) {
- wcd9xxx_cfg_clsh_param_common(codec);
- wcd9xxx_cfg_clsh_param_hph(codec);
- wcd9xxx_enable_clsh_block(codec, clsh_d, true);
- wcd9xxx_chargepump_request(codec, true);
- wcd9xxx_enable_anc_delay(codec, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true);
- wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
-
- dev_dbg(codec->dev, "%s: Done\n", __func__);
- } else {
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8);
- wcd9xxx_enable_buck(codec, clsh_d, false);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false);
- wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false);
- wcd9xxx_enable_clsh_block(codec, clsh_d, false);
- wcd9xxx_chargepump_request(codec, false);
- }
-}
-
-static void wcd9xxx_clsh_state_hph_st(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable");
-
- if (is_enable)
- dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
- else
- dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
-}
-
-static void wcd9xxx_clsh_state_lo(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- pr_debug("%s: enter %s, buck_mv %d\n", __func__,
- is_enable ? "enable" : "disable", clsh_d->buck_mv);
-
- if (is_enable) {
- wcd9xxx_set_buck_mode(codec, BUCK_VREF_1P8V);
- wcd9xxx_enable_buck(codec, clsh_d, true);
- wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
-
- if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
- wcd9xxx_enable_buck(codec, clsh_d, false);
- snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
- 1 << 4, 1 << 4);
- /* NCP settle time recommended by codec specification */
- usleep_range(NCP_SETTLE_TIME_US,
- NCP_SETTLE_TIME_US + 10);
- } else {
- /* NCP settle time recommended by codec specification */
- usleep_range(NCP_SETTLE_TIME_US,
- NCP_SETTLE_TIME_US + 10);
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
- 0x01, (0x01 & 0x03));
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
- 0xFC, (0xFC & 0xB));
- }
- snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, 0x04, 0x00);
- } else {
- dev_dbg(codec->dev, "%s: stub fallback to lineout\n", __func__);
- wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5);
- if (clsh_d->buck_mv != WCD9XXX_CDC_BUCK_MV_1P8)
- wcd9xxx_enable_buck(codec, clsh_d, false);
- }
-}
-
-static void wcd9xxx_clsh_state_err(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 req_state, bool is_enable)
-{
- char msg[128];
-
- dev_dbg(codec->dev,
- "%s Wrong request for class H state machine requested to %s %s",
- __func__, is_enable ? "enable" : "disable",
- state_to_str(req_state, msg, sizeof(msg)));
- WARN_ON(1);
-}
-
-/*
- * Function: wcd9xxx_clsh_is_state_valid
- * Params: state
- * Description:
- * Provides information on valid states of Class H configuration
- */
-static int wcd9xxx_clsh_is_state_valid(u8 state)
-{
- switch (state) {
- case WCD9XXX_CLSH_STATE_IDLE:
- case WCD9XXX_CLSH_STATE_EAR:
- case WCD9XXX_CLSH_STATE_HPHL:
- case WCD9XXX_CLSH_STATE_HPHR:
- case WCD9XXX_CLSH_STATE_HPH_ST:
- case WCD9XXX_CLSH_STATE_LO:
- case WCD9XXX_CLSH_STATE_HPHL_EAR:
- case WCD9XXX_CLSH_STATE_HPHR_EAR:
- case WCD9XXX_CLSH_STATE_HPH_ST_EAR:
- case WCD9XXX_CLSH_STATE_HPHL_LO:
- case WCD9XXX_CLSH_STATE_HPHR_LO:
- case WCD9XXX_CLSH_STATE_HPH_ST_LO:
- case WCD9XXX_CLSH_STATE_EAR_LO:
- case WCD9XXX_CLSH_STATE_HPHL_EAR_LO:
- case WCD9XXX_CLSH_STATE_HPHR_EAR_LO:
- case WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO:
- return 1;
- default:
- break;
- }
- return 0;
-}
-
-/*
- * Function: wcd9xxx_clsh_fsm
- * Params: codec, cdc_clsh_d, req_state, req_type, clsh_event
- * Description:
- * This function handles PRE DAC and POST DAC conditions of different devices
- * and updates class H configuration of different combination of devices
- * based on validity of their states. cdc_clsh_d will contain current
- * class h state information
- */
-void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
- u8 req_state, bool req_type, u8 clsh_event)
-{
- u8 old_state, new_state;
- char msg0[128], msg1[128];
-
- switch (clsh_event) {
- case WCD9XXX_CLSH_EVENT_PRE_DAC:
- /* PRE_DAC event should be used only for Enable */
- BUG_ON(req_type != WCD9XXX_CLSH_REQ_ENABLE);
-
- old_state = cdc_clsh_d->state;
- new_state = old_state | req_state;
-
- if (!wcd9xxx_clsh_is_state_valid(new_state)) {
- dev_dbg(codec->dev,
- "%s: classH not a valid new state: %s\n",
- __func__,
- state_to_str(new_state, msg0, sizeof(msg0)));
- return;
- }
- if (new_state == old_state) {
- dev_dbg(codec->dev,
- "%s: classH already in requested state: %s\n",
- __func__,
- state_to_str(new_state, msg0, sizeof(msg0)));
- return;
- }
- (*clsh_state_fp[new_state]) (codec, cdc_clsh_d, req_state,
- req_type);
- cdc_clsh_d->state = new_state;
- dev_dbg(codec->dev,
- "%s: ClassH state transition from %s to %s\n",
- __func__, state_to_str(old_state, msg0, sizeof(msg0)),
- state_to_str(cdc_clsh_d->state, msg1, sizeof(msg1)));
-
- break;
- case WCD9XXX_CLSH_EVENT_POST_PA:
- if (req_type == WCD9XXX_CLSH_REQ_DISABLE) {
- old_state = cdc_clsh_d->state;
- new_state = old_state & (~req_state);
-
- if (new_state < NUM_CLSH_STATES) {
- if (!wcd9xxx_clsh_is_state_valid(old_state)) {
- dev_dbg(codec->dev,
- "%s:Invalid old state:%s\n",
- __func__,
- state_to_str(old_state, msg0,
- sizeof(msg0)));
- return;
- }
- if (new_state == old_state) {
- dev_dbg(codec->dev,
- "%s: clsH already in old state: %s\n",
- __func__,
- state_to_str(new_state, msg0,
- sizeof(msg0)));
- return;
- }
- (*clsh_state_fp[old_state]) (codec, cdc_clsh_d,
- req_state,
- req_type);
- cdc_clsh_d->state = new_state;
- dev_dbg(codec->dev, "%s: ClassH state transition from %s to %s\n",
- __func__, state_to_str(old_state, msg0,
- sizeof(msg0)),
- state_to_str(cdc_clsh_d->state, msg1,
- sizeof(msg1)));
-
- } else {
- dev_dbg(codec->dev, "%s:wrong new state=0x%x\n",
- __func__, new_state);
- }
- } else if (!(cdc_clsh_d->state & WCD9XXX_CLSH_STATE_LO)) {
- wcd9xxx_clsh_enable_post_pa(codec, cdc_clsh_d);
- }
-
- break;
- }
-
-}
-EXPORT_SYMBOL(wcd9xxx_clsh_fsm);
-
-void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
- struct wcd9xxx_resmgr *resmgr)
-{
- int i;
-
- clsh->state = WCD9XXX_CLSH_STATE_IDLE;
- clsh->resmgr = resmgr;
-
- for (i = 0; i < NUM_CLSH_STATES; i++)
- clsh_state_fp[i] = wcd9xxx_clsh_state_err;
-
- clsh_state_fp[WCD9XXX_CLSH_STATE_EAR] = wcd9xxx_clsh_state_ear;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL] =
- wcd9xxx_clsh_state_hph_l;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR] =
- wcd9xxx_clsh_state_hph_r;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST] =
- wcd9xxx_clsh_state_hph_st;
- clsh_state_fp[WCD9XXX_CLSH_STATE_LO] = wcd9xxx_clsh_state_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR] =
- wcd9xxx_clsh_state_hph_ear;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR] =
- wcd9xxx_clsh_state_hph_ear;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR] =
- wcd9xxx_clsh_state_hph_ear;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_LO] = wcd9xxx_clsh_state_hph_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_LO] = wcd9xxx_clsh_state_hph_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_LO] =
- wcd9xxx_clsh_state_hph_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_EAR_LO] = wcd9xxx_clsh_state_ear_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR_LO] =
- wcd9xxx_clsh_state_hph_ear_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR_LO] =
- wcd9xxx_clsh_state_hph_ear_lo;
- clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO] =
- wcd9xxx_clsh_state_hph_ear_lo;
-
-}
-EXPORT_SYMBOL(wcd9xxx_clsh_init);
-
-MODULE_DESCRIPTION("WCD9XXX Common");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-common.h b/sound/soc/codecs/wcd9xxx-common.h
deleted file mode 100644
index 5c0c4a9..0000000
--- a/sound/soc/codecs/wcd9xxx-common.h
+++ /dev/null
@@ -1,286 +0,0 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef WCD9XXX_CODEC_COMMON
-
-#define WCD9XXX_CODEC_COMMON
-
-#include "wcd9xxx-resmgr.h"
-
-#define WCD9XXX_CLSH_REQ_ENABLE true
-#define WCD9XXX_CLSH_REQ_DISABLE false
-
-#define WCD9XXX_CLSH_EVENT_PRE_DAC 0x01
-#define WCD9XXX_CLSH_EVENT_POST_PA 0x02
-
-/* Basic states for Class H state machine.
- * represented as a bit mask within a u8 data type
- * bit 0: EAR mode
- * bit 1: HPH Left mode
- * bit 2: HPH Right mode
- * bit 3: Lineout mode
- * bit 4: Ultrasound mode
- */
-#define WCD9XXX_CLSH_STATE_IDLE 0x00
-#define WCD9XXX_CLSH_STATE_EAR (0x01 << 0)
-#define WCD9XXX_CLSH_STATE_HPHL (0x01 << 1)
-#define WCD9XXX_CLSH_STATE_HPHR (0x01 << 2)
-#define WCD9XXX_CLSH_STATE_LO (0x01 << 3)
-#define NUM_CLSH_STATES (0x01 << 4)
-
-#define WCD9XXX_CLSAB_STATE_IDLE 0x00
-#define WCD9XXX_CLSAB_STATE_HPHL (0x01 << 1)
-#define WCD9XXX_CLSAB_STATE_HPHR (0x01 << 2)
-
-#define WCD9XXX_CLSAB_REQ_ENABLE true
-#define WCD9XXX_CLSAB_REQ_DISABLE false
-
-#define WCD9XXX_NON_UHQA_MODE 0
-
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_2 0x0
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_3 0x1
-#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_4 0x2
-
-#define WCD9XXX_DMIC_B1_CTL_DIV_2 0x00
-#define WCD9XXX_DMIC_B1_CTL_DIV_3 0x22
-#define WCD9XXX_DMIC_B1_CTL_DIV_4 0x44
-
-#define WCD9XXX_DMIC_B2_CTL_DIV_2 0x00
-#define WCD9XXX_DMIC_B2_CTL_DIV_3 0x02
-#define WCD9XXX_DMIC_B2_CTL_DIV_4 0x04
-
-#define WCD9XXX_ANC_DMIC_X2_ON 0x1
-#define WCD9XXX_ANC_DMIC_X2_OFF 0x0
-
-/* Derived State: Bits 1 and 2 should be set for Headphone stereo */
-#define WCD9XXX_CLSH_STATE_HPH_ST (WCD9XXX_CLSH_STATE_HPHL | \
- WCD9XXX_CLSH_STATE_HPHR)
-
-#define WCD9XXX_CLSH_STATE_HPHL_EAR (WCD9XXX_CLSH_STATE_HPHL | \
- WCD9XXX_CLSH_STATE_EAR)
-#define WCD9XXX_CLSH_STATE_HPHR_EAR (WCD9XXX_CLSH_STATE_HPHR | \
- WCD9XXX_CLSH_STATE_EAR)
-
-#define WCD9XXX_CLSH_STATE_HPH_ST_EAR (WCD9XXX_CLSH_STATE_HPH_ST | \
- WCD9XXX_CLSH_STATE_EAR)
-
-#define WCD9XXX_CLSH_STATE_HPHL_LO (WCD9XXX_CLSH_STATE_HPHL | \
- WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPHR_LO (WCD9XXX_CLSH_STATE_HPHR | \
- WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_HPH_ST_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
- WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_EAR_LO (WCD9XXX_CLSH_STATE_EAR | \
- WCD9XXX_CLSH_STATE_LO)
-
-#define WCD9XXX_CLSH_STATE_HPHL_EAR_LO (WCD9XXX_CLSH_STATE_HPHL | \
- WCD9XXX_CLSH_STATE_EAR | \
- WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPHR_EAR_LO (WCD9XXX_CLSH_STATE_HPHR | \
- WCD9XXX_CLSH_STATE_EAR | \
- WCD9XXX_CLSH_STATE_LO)
-#define WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO (WCD9XXX_CLSH_STATE_HPH_ST | \
- WCD9XXX_CLSH_STATE_EAR | \
- WCD9XXX_CLSH_STATE_LO)
-
-struct wcd9xxx_reg_mask_val {
- u16 reg;
- u8 mask;
- u8 val;
-};
-
-enum ncp_fclk_level {
- NCP_FCLK_LEVEL_8,
- NCP_FCLK_LEVEL_5,
- NCP_FCLK_LEVEL_MAX,
-};
-
-/* Class H data that the codec driver will maintain */
-struct wcd9xxx_clsh_cdc_data {
- u8 state;
- int buck_mv;
- bool is_dynamic_vdd_cp;
- int clsh_users;
- int buck_users;
- int ncp_users[NCP_FCLK_LEVEL_MAX];
- struct wcd9xxx_resmgr *resmgr;
-};
-
-struct wcd9xxx_anc_header {
- u32 reserved[3];
- u32 num_anc_slots;
-};
-
-enum wcd9xxx_buck_volt {
- WCD9XXX_CDC_BUCK_UNSUPPORTED = 0,
- WCD9XXX_CDC_BUCK_MV_1P8 = 1800000,
- WCD9XXX_CDC_BUCK_MV_2P15 = 2150000,
-};
-
-struct mad_audio_header {
- u32 reserved[3];
- u32 num_reg_cfg;
-};
-
-struct mad_microphone_info {
- uint8_t input_microphone;
- uint8_t cycle_time;
- uint8_t settle_time;
- uint8_t padding;
-} __packed;
-
-struct mad_micbias_info {
- uint8_t micbias;
- uint8_t k_factor;
- uint8_t external_bypass_capacitor;
- uint8_t internal_biasing;
- uint8_t cfilter;
- uint8_t padding[3];
-} __packed;
-
-struct mad_rms_audio_beacon_info {
- uint8_t rms_omit_samples;
- uint8_t rms_comp_time;
- uint8_t detection_mechanism;
- uint8_t rms_diff_threshold;
- uint8_t rms_threshold_lsb;
- uint8_t rms_threshold_msb;
- uint8_t padding[2];
- uint8_t iir_coefficients[36];
-} __packed;
-
-struct mad_rms_ultrasound_info {
- uint8_t rms_comp_time;
- uint8_t detection_mechanism;
- uint8_t rms_diff_threshold;
- uint8_t rms_threshold_lsb;
- uint8_t rms_threshold_msb;
- uint8_t padding[3];
- uint8_t iir_coefficients[36];
-} __packed;
-
-struct mad_audio_cal {
- uint32_t version;
- struct mad_microphone_info microphone_info;
- struct mad_micbias_info micbias_info;
- struct mad_rms_audio_beacon_info audio_info;
- struct mad_rms_audio_beacon_info beacon_info;
- struct mad_rms_ultrasound_info ultrasound_info;
-} __packed;
-
-extern void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
- u8 req_state, bool req_type, u8 clsh_event);
-
-extern void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec,
- struct wcd9xxx_clsh_cdc_data *clsh_d,
- u8 uhqa_mode, u8 req_state, bool req_type);
-
-extern void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh,
- struct wcd9xxx_resmgr *resmgr);
-
-extern void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec,
- int imped);
-
-enum wcd9xxx_codec_event {
- WCD9XXX_CODEC_EVENT_CODEC_UP = 0,
-};
-
-struct wcd9xxx_register_save_node {
- struct list_head lh;
- u16 reg;
- u16 value;
-};
-
-extern int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec,
- struct list_head *lh,
- uint16_t reg, uint8_t mask,
- uint8_t value, int delay);
-extern void wcd9xxx_restore_registers(struct snd_soc_codec *codec,
- struct list_head *lh);
-enum {
- RESERVED = 0,
- AANC_LPF_FF_FB = 1,
- AANC_LPF_COEFF_MSB,
- AANC_LPF_COEFF_LSB,
- HW_MAD_AUDIO_ENABLE,
- HW_MAD_ULTR_ENABLE,
- HW_MAD_BEACON_ENABLE,
- HW_MAD_AUDIO_SLEEP_TIME,
- HW_MAD_ULTR_SLEEP_TIME,
- HW_MAD_BEACON_SLEEP_TIME,
- HW_MAD_TX_AUDIO_SWITCH_OFF,
- HW_MAD_TX_ULTR_SWITCH_OFF,
- HW_MAD_TX_BEACON_SWITCH_OFF,
- MAD_AUDIO_INT_DEST_SELECT_REG,
- MAD_ULT_INT_DEST_SELECT_REG,
- MAD_BEACON_INT_DEST_SELECT_REG,
- MAD_CLIP_INT_DEST_SELECT_REG,
- MAD_VBAT_INT_DEST_SELECT_REG,
- MAD_AUDIO_INT_MASK_REG,
- MAD_ULT_INT_MASK_REG,
- MAD_BEACON_INT_MASK_REG,
- MAD_CLIP_INT_MASK_REG,
- MAD_VBAT_INT_MASK_REG,
- MAD_AUDIO_INT_STATUS_REG,
- MAD_ULT_INT_STATUS_REG,
- MAD_BEACON_INT_STATUS_REG,
- MAD_CLIP_INT_STATUS_REG,
- MAD_VBAT_INT_STATUS_REG,
- MAD_AUDIO_INT_CLEAR_REG,
- MAD_ULT_INT_CLEAR_REG,
- MAD_BEACON_INT_CLEAR_REG,
- MAD_CLIP_INT_CLEAR_REG,
- MAD_VBAT_INT_CLEAR_REG,
- SB_PGD_PORT_TX_WATERMARK_N,
- SB_PGD_PORT_TX_ENABLE_N,
- SB_PGD_PORT_RX_WATERMARK_N,
- SB_PGD_PORT_RX_ENABLE_N,
- SB_PGD_TX_PORTn_MULTI_CHNL_0,
- SB_PGD_TX_PORTn_MULTI_CHNL_1,
- SB_PGD_RX_PORTn_MULTI_CHNL_0,
- SB_PGD_RX_PORTn_MULTI_CHNL_1,
- AANC_FF_GAIN_ADAPTIVE,
- AANC_FFGAIN_ADAPTIVE_EN,
- AANC_GAIN_CONTROL,
- SPKR_CLIP_PIPE_BANK_SEL,
- SPKR_CLIPDET_VAL0,
- SPKR_CLIPDET_VAL1,
- SPKR_CLIPDET_VAL2,
- SPKR_CLIPDET_VAL3,
- SPKR_CLIPDET_VAL4,
- SPKR_CLIPDET_VAL5,
- SPKR_CLIPDET_VAL6,
- SPKR_CLIPDET_VAL7,
- VBAT_RELEASE_INT_DEST_SELECT_REG,
- VBAT_RELEASE_INT_MASK_REG,
- VBAT_RELEASE_INT_STATUS_REG,
- VBAT_RELEASE_INT_CLEAR_REG,
- MAD2_CLIP_INT_DEST_SELECT_REG,
- MAD2_CLIP_INT_MASK_REG,
- MAD2_CLIP_INT_STATUS_REG,
- MAD2_CLIP_INT_CLEAR_REG,
- SPKR2_CLIP_PIPE_BANK_SEL,
- SPKR2_CLIPDET_VAL0,
- SPKR2_CLIPDET_VAL1,
- SPKR2_CLIPDET_VAL2,
- SPKR2_CLIPDET_VAL3,
- SPKR2_CLIPDET_VAL4,
- SPKR2_CLIPDET_VAL5,
- SPKR2_CLIPDET_VAL6,
- SPKR2_CLIPDET_VAL7,
- MAX_CFG_REGISTERS,
-};
-
-#endif
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
deleted file mode 100644
index 3754b57..0000000
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ /dev/null
@@ -1,5671 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx-irq.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9320_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <sound/tlv.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/input.h>
-#include "wcd9xxx-mbhc.h"
-#include "wcdcal-hwdep.h"
-#include "wcd9xxx-resmgr.h"
-#include "wcd9xxx-common.h"
-
-#define WCD9XXX_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \
- SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \
- SND_JACK_UNSUPPORTED | SND_JACK_MICROPHONE2 | \
- SND_JACK_MECHANICAL)
-#define WCD9XXX_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \
- SND_JACK_BTN_2 | SND_JACK_BTN_3 | \
- SND_JACK_BTN_4 | SND_JACK_BTN_5)
-
-#define NUM_DCE_PLUG_DETECT 3
-#define NUM_DCE_PLUG_INS_DETECT 5
-#define NUM_ATTEMPTS_INSERT_DETECT 25
-#define NUM_ATTEMPTS_TO_REPORT 5
-
-#define FAKE_INS_LOW 10
-#define FAKE_INS_HIGH 80
-#define FAKE_INS_HIGH_NO_SWCH 150
-#define FAKE_REMOVAL_MIN_PERIOD_MS 50
-#define FAKE_INS_DELTA_SCALED_MV 300
-
-#define BUTTON_MIN 0x8000
-#define STATUS_REL_DETECTION 0x0C
-
-#define HS_DETECT_PLUG_TIME_MS (5 * 1000)
-#define ANC_HPH_DETECT_PLUG_TIME_MS (5 * 1000)
-#define HS_DETECT_PLUG_INERVAL_MS 100
-#define SWCH_REL_DEBOUNCE_TIME_MS 50
-#define SWCH_IRQ_DEBOUNCE_TIME_US 5000
-#define BTN_RELEASE_DEBOUNCE_TIME_MS 25
-
-#define GND_MIC_SWAP_THRESHOLD 2
-#define OCP_ATTEMPT 1
-
-#define FW_READ_ATTEMPTS 15
-#define FW_READ_TIMEOUT 4000000
-
-#define BUTTON_POLLING_SUPPORTED true
-
-#define MCLK_RATE_12288KHZ 12288000
-#define MCLK_RATE_9600KHZ 9600000
-
-#define DEFAULT_DCE_STA_WAIT 55
-#define DEFAULT_DCE_WAIT 60000
-#define DEFAULT_STA_WAIT 5000
-
-#define VDDIO_MICBIAS_MV 1800
-
-#define WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US 5000
-
-#define WCD9XXX_HPHL_STATUS_READY_WAIT_US 1000
-#define WCD9XXX_MUX_SWITCH_READY_WAIT_MS 50
-#define WCD9XXX_MEAS_DELTA_MAX_MV 120
-#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20
-#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80
-
-/* Threshold in milliohm used for mono/stereo
- * plug classification
- */
-#define WCD9XXX_MONO_HS_DIFF_THR 20000000
-#define WCD9XXX_MONO_HS_MIN_THR 2000
-
-/*
- * Invalid voltage range for the detection
- * of plug type with current source
- */
-#define WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV 160
-#define WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV 265
-
-/*
- * Threshold used to detect euro headset
- * with current source
- */
-#define WCD9XXX_CS_GM_SWAP_THRES_MIN_MV 10
-#define WCD9XXX_CS_GM_SWAP_THRES_MAX_MV 40
-
-#define WCD9XXX_MBHC_NSC_CS 9
-#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150
-#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650
-#define WCD9XXX_THRESHOLD_MIC_THRESHOLD 200
-
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-
-/* RX_HPH_CNP_WG_TIME increases by 0.24ms */
-#define WCD9XXX_WG_TIME_FACTOR_US 240
-
-#define WCD9XXX_V_CS_HS_MAX 500
-#define WCD9XXX_V_CS_NO_MIC 5
-#define WCD9XXX_MB_MEAS_DELTA_MAX_MV 80
-#define WCD9XXX_CS_MEAS_DELTA_MAX_MV 12
-
-#define WCD9XXX_ZDET_ZONE_1 80000
-#define WCD9XXX_ZDET_ZONE_2 800000
-
-#define WCD9XXX_IS_IN_ZDET_ZONE_1(x) (x < WCD9XXX_ZDET_ZONE_1 ? 1 : 0)
-#define WCD9XXX_IS_IN_ZDET_ZONE_2(x) ((x > WCD9XXX_ZDET_ZONE_1 && \
- x < WCD9XXX_ZDET_ZONE_2) ? 1 : 0)
-#define WCD9XXX_IS_IN_ZDET_ZONE_3(x) (x > WCD9XXX_ZDET_ZONE_2 ? 1 : 0)
-#define WCD9XXX_BOX_CAR_AVRG_MIN 1
-#define WCD9XXX_BOX_CAR_AVRG_MAX 10
-
-/*
- * Need to report LINEIN if H/L impedance
- * is larger than 5K ohm
- */
-#define WCD9XXX_LINEIN_THRESHOLD 5000000
-
-static int impedance_detect_en;
-module_param(impedance_detect_en, int, 0664);
-MODULE_PARM_DESC(impedance_detect_en, "enable/disable impedance detect");
-static unsigned int z_det_box_car_avg = 1;
-module_param(z_det_box_car_avg, int, 0664);
-MODULE_PARM_DESC(z_det_box_car_avg,
- "Number of samples for impedance detection");
-
-static bool detect_use_vddio_switch;
-
-struct wcd9xxx_mbhc_detect {
- u16 dce;
- u16 sta;
- u16 hphl_status;
- bool swap_gnd;
- bool vddio;
- bool hwvalue;
- bool mic_bias;
- /* internal purpose from here */
- bool _above_no_mic;
- bool _below_v_hs_max;
- s16 _vdces;
- enum wcd9xxx_mbhc_plug_type _type;
-};
-
-enum meas_type {
- STA = 0,
- DCE,
-};
-
-enum {
- MBHC_USE_HPHL_TRIGGER = 1,
- MBHC_USE_MB_TRIGGER = 2
-};
-
-/*
- * Flags to track of PA and DAC state.
- * PA and DAC should be tracked separately as AUXPGA loopback requires
- * only PA to be turned on without DAC being on.
- */
-enum pa_dac_ack_flags {
- WCD9XXX_HPHL_PA_OFF_ACK = 0,
- WCD9XXX_HPHR_PA_OFF_ACK,
- WCD9XXX_HPHL_DAC_OFF_ACK,
- WCD9XXX_HPHR_DAC_OFF_ACK
-};
-
-enum wcd9xxx_current_v_idx {
- WCD9XXX_CURRENT_V_INS_H,
- WCD9XXX_CURRENT_V_INS_HU,
- WCD9XXX_CURRENT_V_B1_H,
- WCD9XXX_CURRENT_V_B1_HU,
- WCD9XXX_CURRENT_V_BR_H,
-};
-
-static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr);
-static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
- const enum wcd9xxx_current_v_idx idx);
-static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
- struct mbhc_micbias_regs *micb_regs,
- bool norel);
-
-static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc);
-
-static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
- enum meas_type dce, s16 vin_mv,
- bool cs_enable);
-
-static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc)
-{
- return snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1;
-}
-
-static void wcd9xxx_turn_onoff_override(struct wcd9xxx_mbhc *mbhc, bool on)
-{
- struct snd_soc_codec *codec = mbhc->codec;
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x04, on ? 0x04 : 0x00);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_pause_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
- if (!mbhc->polling_active) {
- pr_debug("polling not active, nothing to pause\n");
- return;
- }
-
- /* Soft reset MBHC block */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- int mbhc_state = mbhc->mbhc_state;
-
- pr_debug("%s: enter\n", __func__);
- if (!mbhc->polling_active) {
- pr_debug("Polling is not active, do not start polling\n");
- return;
- }
-
- /*
- * setup internal micbias if codec uses internal micbias for
- * headset detection
- */
- if (mbhc->mbhc_cfg->use_int_rbias) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
- mbhc->mbhc_cb->setup_int_rbias(codec, true);
- else
- pr_err("%s: internal bias requested but codec did not provide callback\n",
- __func__);
- }
-
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
-
- if (!mbhc->no_mic_headset_override &&
- mbhc_state == MBHC_STATE_POTENTIAL) {
- pr_debug("%s recovering MBHC state machine\n", __func__);
- mbhc->mbhc_state = MBHC_STATE_POTENTIAL_RECOVERY;
- /* set to max button press threshold */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, 0x7F);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, 0x7F);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, 0xFF);
- /* set to max */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF);
- }
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x0);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1);
- pr_debug("%s: leave\n", __func__);
-}
-
-static int __wcd9xxx_resmgr_get_k_val(struct wcd9xxx_mbhc *mbhc,
- unsigned int cfilt_mv)
-{
- return wcd9xxx_resmgr_get_k_val(mbhc->resmgr, cfilt_mv);
-}
-
-/*
- * called under codec_resource_lock acquisition
- * return old status
- */
-static bool __wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc,
- int vddio_switch, bool restartpolling,
- bool checkpolling)
-{
- bool ret;
- int cfilt_k_val;
- bool override;
- struct snd_soc_codec *codec;
- struct mbhc_internal_cal_data *d = &mbhc->mbhc_data;
-
- codec = mbhc->codec;
-
- if (mbhc->micbias_enable) {
- pr_debug("%s: micbias is already on\n", __func__);
- ret = mbhc->mbhc_micbias_switched;
- return ret;
- }
-
- ret = mbhc->mbhc_micbias_switched;
- if (vddio_switch && !mbhc->mbhc_micbias_switched &&
- (!checkpolling || mbhc->polling_active)) {
- if (restartpolling)
- wcd9xxx_pause_hs_polling(mbhc);
- override = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
- 0x04;
- if (!override)
- wcd9xxx_turn_onoff_override(mbhc, true);
-
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
- 0x10, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
- 0x20, 0x00);
- /* Adjust threshold if Mic Bias voltage changes */
- if (d->micb_mv != VDDIO_MICBIAS_MV) {
- cfilt_k_val = __wcd9xxx_resmgr_get_k_val(mbhc,
- VDDIO_MICBIAS_MV);
- usleep_range(10000, 10100);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.cfilt_val,
- 0xFC, (cfilt_k_val << 2));
- usleep_range(10000, 10100);
- /* Threshods for insertion/removal */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
- d->v_ins_hu[MBHC_V_IDX_VDDIO] & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
- (d->v_ins_hu[MBHC_V_IDX_VDDIO] >> 8) &
- 0xFF);
-
- if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
- /* Threshods for button press */
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
- d->v_b1_hu[MBHC_V_IDX_VDDIO] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
- (d->v_b1_hu[MBHC_V_IDX_VDDIO] >> 8) &
- 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
- d->v_b1_h[MBHC_V_IDX_VDDIO] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
- (d->v_b1_h[MBHC_V_IDX_VDDIO] >> 8) &
- 0xFF);
- /* Threshods for button release */
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
- d->v_brh[MBHC_V_IDX_VDDIO] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
- (d->v_brh[MBHC_V_IDX_VDDIO] >> 8) &
- 0xFF);
- }
- pr_debug("%s: Programmed MBHC thresholds to VDDIO\n",
- __func__);
- }
-
- /* Enable MIC BIAS Switch to VDDIO */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 0x80, 0x80);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 0x10, 0x00);
- if (!override)
- wcd9xxx_turn_onoff_override(mbhc, false);
- if (restartpolling)
- wcd9xxx_start_hs_polling(mbhc);
-
- mbhc->mbhc_micbias_switched = true;
- pr_debug("%s: VDDIO switch enabled\n", __func__);
- } else if (!vddio_switch && mbhc->mbhc_micbias_switched) {
- if ((!checkpolling || mbhc->polling_active) &&
- restartpolling)
- wcd9xxx_pause_hs_polling(mbhc);
-
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
- 0x10, 0x10);
- snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1,
- 0x20, 0x20);
- /* Reprogram thresholds */
- if (d->micb_mv != VDDIO_MICBIAS_MV) {
- cfilt_k_val =
- __wcd9xxx_resmgr_get_k_val(mbhc,
- d->micb_mv);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.cfilt_val,
- 0xFC, (cfilt_k_val << 2));
- usleep_range(10000, 10100);
- /* Revert threshods for insertion/removal */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL,
- d->v_ins_hu[MBHC_V_IDX_CFILT] & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
- (d->v_ins_hu[MBHC_V_IDX_CFILT] >> 8) &
- 0xFF);
- if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
- /* Revert threshods for button press */
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL,
- d->v_b1_hu[MBHC_V_IDX_CFILT] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
- (d->v_b1_hu[MBHC_V_IDX_CFILT] >> 8) &
- 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL,
- d->v_b1_h[MBHC_V_IDX_CFILT] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
- (d->v_b1_h[MBHC_V_IDX_CFILT] >> 8) &
- 0xFF);
- /* Revert threshods for button release */
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL,
- d->v_brh[MBHC_V_IDX_CFILT] & 0xFF);
- snd_soc_write(codec,
- WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
- (d->v_brh[MBHC_V_IDX_CFILT] >> 8) &
- 0xFF);
- }
- pr_debug("%s: Programmed MBHC thresholds to MICBIAS\n",
- __func__);
- }
-
- /* Disable MIC BIAS Switch to VDDIO */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x80,
- 0x00);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x10,
- 0x00);
-
- if ((!checkpolling || mbhc->polling_active) && restartpolling)
- wcd9xxx_start_hs_polling(mbhc);
-
- mbhc->mbhc_micbias_switched = false;
- pr_debug("%s: VDDIO switch disabled\n", __func__);
- }
-
- return ret;
-}
-
-static void wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc, int vddio_switch)
-{
- __wcd9xxx_switch_micbias(mbhc, vddio_switch, true, true);
-}
-
-static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc,
- const enum wcd9xxx_current_v_idx idx)
-{
- enum mbhc_v_index vidx;
- s16 ret = -EINVAL;
-
- if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
- mbhc->mbhc_micbias_switched)
- vidx = MBHC_V_IDX_VDDIO;
- else
- vidx = MBHC_V_IDX_CFILT;
-
- switch (idx) {
- case WCD9XXX_CURRENT_V_INS_H:
- ret = (s16)mbhc->mbhc_data.v_ins_h[vidx];
- break;
- case WCD9XXX_CURRENT_V_INS_HU:
- ret = (s16)mbhc->mbhc_data.v_ins_hu[vidx];
- break;
- case WCD9XXX_CURRENT_V_B1_H:
- ret = (s16)mbhc->mbhc_data.v_b1_h[vidx];
- break;
- case WCD9XXX_CURRENT_V_B1_HU:
- ret = (s16)mbhc->mbhc_data.v_b1_hu[vidx];
- break;
- case WCD9XXX_CURRENT_V_BR_H:
- ret = (s16)mbhc->mbhc_data.v_brh[vidx];
- break;
- }
-
- return ret;
-}
-
-void *wcd9xxx_mbhc_cal_btn_det_mp(
- const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
- const enum wcd9xxx_mbhc_btn_det_mem mem)
-{
- void *ret = (void *)&btn_det->_v_btn_low;
-
- switch (mem) {
- case MBHC_BTN_DET_GAIN:
- ret += sizeof(btn_det->_n_cic);
- /* fallthrough */
- case MBHC_BTN_DET_N_CIC:
- ret += sizeof(btn_det->_n_ready);
- /* fallthrough */
- case MBHC_BTN_DET_N_READY:
- ret += sizeof(btn_det->_v_btn_high[0]) * btn_det->num_btn;
- /* fallthrough */
- case MBHC_BTN_DET_V_BTN_HIGH:
- ret += sizeof(btn_det->_v_btn_low[0]) * btn_det->num_btn;
- /* fallthrough */
- case MBHC_BTN_DET_V_BTN_LOW:
- /* do nothing */
- break;
- default:
- ret = NULL;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_cal_btn_det_mp);
-
-static void wcd9xxx_calibrate_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- const s16 v_ins_hu = wcd9xxx_get_current_v(mbhc,
- WCD9XXX_CURRENT_V_INS_HU);
- const s16 v_b1_hu = wcd9xxx_get_current_v(mbhc,
- WCD9XXX_CURRENT_V_B1_HU);
- const s16 v_b1_h = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
- const s16 v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, v_ins_hu & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL,
- (v_ins_hu >> 8) & 0xFF);
-
- if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
- 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
- (v_b1_hu >> 8) & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, v_b1_h &
- 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL,
- (v_b1_h >> 8) & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
- 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
- (v_brh >> 8) & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL,
- mbhc->mbhc_data.v_brl & 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL,
- (mbhc->mbhc_data.v_brl >> 8) & 0xFF);
- }
-}
-
-static void wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc,
- bool fast)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- struct wcd9xxx_cfilt_mode cfilt_mode;
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->switch_cfilt_mode) {
- cfilt_mode = mbhc->mbhc_cb->switch_cfilt_mode(mbhc, fast);
- } else {
- if (fast)
- cfilt_mode.reg_mode_val = WCD9XXX_CFILT_FAST_MODE;
- else
- cfilt_mode.reg_mode_val = WCD9XXX_CFILT_SLOW_MODE;
-
- cfilt_mode.reg_mask = 0x40;
- cfilt_mode.cur_mode_val =
- snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40;
- }
-
- if (cfilt_mode.cur_mode_val
- != cfilt_mode.reg_mode_val) {
- if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
- wcd9xxx_pause_hs_polling(mbhc);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.cfilt_ctl,
- cfilt_mode.reg_mask,
- cfilt_mode.reg_mode_val);
- if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc))
- wcd9xxx_start_hs_polling(mbhc);
- pr_debug("%s: CFILT mode change (%x to %x)\n", __func__,
- cfilt_mode.cur_mode_val,
- cfilt_mode.reg_mode_val);
- } else {
- pr_debug("%s: CFILT Value is already %x\n",
- __func__, cfilt_mode.cur_mode_val);
- }
-}
-
-static void wcd9xxx_jack_report(struct wcd9xxx_mbhc *mbhc,
- struct snd_soc_jack *jack, int status, int mask)
-{
- if (jack == &mbhc->headset_jack) {
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH_MIC,
- status & SND_JACK_MICROPHONE);
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH,
- status & SND_JACK_HEADPHONE);
- }
-
- snd_soc_jack_report(jack, status, mask);
-}
-
-static void __hphocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status,
- int irq)
-{
- struct snd_soc_codec *codec;
-
- pr_debug("%s: clear ocp status %x\n", __func__, jack_status);
- codec = mbhc->codec;
- if (mbhc->hph_status & jack_status) {
- mbhc->hph_status &= ~jack_status;
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
- mbhc->hph_status, WCD9XXX_JACK_MASK);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
- 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
- 0x10);
- /*
- * reset retry counter as PA is turned off signifying
- * start of new OCP detection session
- */
- if (mbhc->intr_ids->hph_left_ocp)
- mbhc->hphlocp_cnt = 0;
- else
- mbhc->hphrocp_cnt = 0;
- wcd9xxx_enable_irq(mbhc->resmgr->core_res, irq);
- }
-}
-
-static void hphrocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
-{
- __hphocp_off_report(mbhc, SND_JACK_OC_HPHR,
- mbhc->intr_ids->hph_right_ocp);
-}
-
-static void hphlocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status)
-{
- __hphocp_off_report(mbhc, SND_JACK_OC_HPHL,
- mbhc->intr_ids->hph_left_ocp);
-}
-
-static void wcd9xxx_get_mbhc_micbias_regs(struct wcd9xxx_mbhc *mbhc,
- enum wcd9xxx_mbhc_micbias_type mb_type)
-{
- unsigned int cfilt;
- struct wcd9xxx_micbias_setting *micbias_pdata =
- mbhc->resmgr->micbias_pdata;
- struct mbhc_micbias_regs *micbias_regs;
- enum wcd9xxx_micbias_num mb_num;
-
- if (mb_type == MBHC_ANC_MIC_MB) {
- micbias_regs = &mbhc->mbhc_anc_bias_regs;
- mb_num = mbhc->mbhc_cfg->anc_micbias;
- } else {
- micbias_regs = &mbhc->mbhc_bias_regs;
- mb_num = mbhc->mbhc_cfg->micbias;
- }
-
- switch (mb_num) {
- case MBHC_MICBIAS1:
- cfilt = micbias_pdata->bias1_cfilt_sel;
- micbias_regs->mbhc_reg = WCD9XXX_A_MICB_1_MBHC;
- micbias_regs->int_rbias = WCD9XXX_A_MICB_1_INT_RBIAS;
- micbias_regs->ctl_reg = WCD9XXX_A_MICB_1_CTL;
- break;
- case MBHC_MICBIAS2:
- cfilt = micbias_pdata->bias2_cfilt_sel;
- micbias_regs->mbhc_reg = WCD9XXX_A_MICB_2_MBHC;
- micbias_regs->int_rbias = WCD9XXX_A_MICB_2_INT_RBIAS;
- micbias_regs->ctl_reg = WCD9XXX_A_MICB_2_CTL;
- break;
- case MBHC_MICBIAS3:
- cfilt = micbias_pdata->bias3_cfilt_sel;
- micbias_regs->mbhc_reg = WCD9XXX_A_MICB_3_MBHC;
- micbias_regs->int_rbias = WCD9XXX_A_MICB_3_INT_RBIAS;
- micbias_regs->ctl_reg = WCD9XXX_A_MICB_3_CTL;
- break;
- case MBHC_MICBIAS4:
- cfilt = micbias_pdata->bias4_cfilt_sel;
- micbias_regs->mbhc_reg = mbhc->resmgr->reg_addr->micb_4_mbhc;
- micbias_regs->int_rbias =
- mbhc->resmgr->reg_addr->micb_4_int_rbias;
- micbias_regs->ctl_reg = mbhc->resmgr->reg_addr->micb_4_ctl;
- break;
- default:
- /* Should never reach here */
- pr_err("%s: Invalid MIC BIAS for MBHC\n", __func__);
- return;
- }
-
- micbias_regs->cfilt_sel = cfilt;
-
- switch (cfilt) {
- case WCD9XXX_CFILT1_SEL:
- micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_1_VAL;
- micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_1_CTL;
- break;
- case WCD9XXX_CFILT2_SEL:
- micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_2_VAL;
- micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_2_CTL;
- break;
- case WCD9XXX_CFILT3_SEL:
- micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_3_VAL;
- micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_3_CTL;
- break;
- }
-
- if (mb_type == MBHC_PRIMARY_MIC_MB) {
- switch (cfilt) {
- case WCD9XXX_CFILT1_SEL:
- mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt1_mv;
- break;
- case WCD9XXX_CFILT2_SEL:
- mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt2_mv;
- break;
- case WCD9XXX_CFILT3_SEL:
- mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt3_mv;
- break;
- }
- }
-
-}
-
-static void wcd9xxx_clr_and_turnon_hph_padac(struct wcd9xxx_mbhc *mbhc)
-{
- bool pa_turned_on = false;
- struct snd_soc_codec *codec = mbhc->codec;
- u8 wg_time;
-
- wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
- wg_time += 1;
-
- if (test_and_clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK,
- &mbhc->hph_pa_dac_state)) {
- pr_debug("%s: HPHR clear flag and enable DAC\n", __func__);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL,
- 0xC0, 0xC0);
- }
- if (test_and_clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK,
- &mbhc->hph_pa_dac_state)) {
- pr_debug("%s: HPHL clear flag and enable DAC\n", __func__);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL,
- 0x80, 0x80);
- }
-
- if (test_and_clear_bit(WCD9XXX_HPHR_PA_OFF_ACK,
- &mbhc->hph_pa_dac_state)) {
- pr_debug("%s: HPHR clear flag and enable PA\n", __func__);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x10,
- 1 << 4);
- pa_turned_on = true;
- }
- if (test_and_clear_bit(WCD9XXX_HPHL_PA_OFF_ACK,
- &mbhc->hph_pa_dac_state)) {
- pr_debug("%s: HPHL clear flag and enable PA\n", __func__);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x20, 1
- << 5);
- pa_turned_on = true;
- }
-
- if (pa_turned_on) {
- pr_debug("%s: PA was turned on by MBHC and not by DAPM\n",
- __func__);
- usleep_range(wg_time * 1000, wg_time * 1000 + 50);
- }
-}
-
-static int wcd9xxx_cancel_btn_work(struct wcd9xxx_mbhc *mbhc)
-{
- int r;
-
- r = cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork);
- if (r)
- /* if scheduled mbhc.mbhc_btn_dwork is canceled from here,
- * we have to unlock from here instead btn_work
- */
- wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
- return r;
-}
-
-static bool wcd9xxx_is_hph_dac_on(struct snd_soc_codec *codec, int left)
-{
- u8 hph_reg_val = 0;
-
- if (left)
- hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL);
- else
- hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL);
-
- return (hph_reg_val & 0xC0) ? true : false;
-}
-
-static bool wcd9xxx_is_hph_pa_on(struct snd_soc_codec *codec)
-{
- u8 hph_reg_val = 0;
-
- hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN);
-
- return (hph_reg_val & 0x30) ? true : false;
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_set_and_turnoff_hph_padac(struct wcd9xxx_mbhc *mbhc)
-{
- u8 wg_time;
- struct snd_soc_codec *codec = mbhc->codec;
-
- wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME);
- wg_time += 1;
-
- /* If headphone PA is on, check if userspace receives
- * removal event to sync-up PA's state
- */
- if (wcd9xxx_is_hph_pa_on(codec)) {
- pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__);
- set_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
- set_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
- } else {
- pr_debug("%s PA is off\n", __func__);
- }
-
- if (wcd9xxx_is_hph_dac_on(codec, 1))
- set_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
- if (wcd9xxx_is_hph_dac_on(codec, 0))
- set_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
-
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x30, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL, 0x80, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xC0, 0x00);
- usleep_range(wg_time * 1000, wg_time * 1000 + 50);
-}
-
-static void wcd9xxx_insert_detect_setup(struct wcd9xxx_mbhc *mbhc, bool ins)
-{
- if (!mbhc->mbhc_cfg->insert_detect)
- return;
- pr_debug("%s: Setting up %s detection\n", __func__,
- ins ? "insert" : "removal");
- /* Disable detection to avoid glitch */
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 0);
- if (mbhc->mbhc_cfg->gpio_level_insert)
- snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
- (0x68 | (ins ? (1 << 1) : 0)));
- else
- snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT,
- (0x6C | (ins ? (1 << 1) : 0)));
- /* Re-enable detection */
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 1);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_report_plug(struct wcd9xxx_mbhc *mbhc, int insertion,
- enum snd_jack_types jack_type)
-{
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- pr_debug("%s: enter insertion %d hph_status %x\n",
- __func__, insertion, mbhc->hph_status);
- if (!insertion) {
- /* Report removal */
- mbhc->hph_status &= ~jack_type;
- /*
- * cancel possibly scheduled btn work and
- * report release if we reported button press
- */
- if (wcd9xxx_cancel_btn_work(mbhc))
- pr_debug("%s: button press is canceled\n", __func__);
- else if (mbhc->buttons_pressed) {
- pr_debug("%s: release of button press%d\n",
- __func__, jack_type);
- wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
- mbhc->buttons_pressed);
- mbhc->buttons_pressed &=
- ~WCD9XXX_JACK_BUTTON_MASK;
- }
-
- if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
- pr_debug("%s: Disabling micbias\n", __func__);
- mbhc->micbias_enable = false;
- mbhc->micbias_enable_cb(mbhc->codec, false,
- mbhc->mbhc_cfg->micbias);
- }
- mbhc->zl = mbhc->zr = 0;
- mbhc->hph_type = MBHC_HPH_NONE;
- pr_debug("%s: Reporting removal %d(%x)\n", __func__,
- jack_type, mbhc->hph_status);
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, mbhc->hph_status,
- WCD9XXX_JACK_MASK);
- wcd9xxx_set_and_turnoff_hph_padac(mbhc);
- hphrocp_off_report(mbhc, SND_JACK_OC_HPHR);
- hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
- mbhc->current_plug = PLUG_TYPE_NONE;
- mbhc->polling_active = false;
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
- mbhc->mbhc_cb->hph_auto_pulldown_ctrl(mbhc->codec,
- false);
- } else {
- /*
- * Report removal of current jack type.
- * Headphone to headset shouldn't report headphone
- * removal.
- */
- if (mbhc->mbhc_cfg->detect_extn_cable &&
- !(mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
- jack_type == SND_JACK_HEADSET) &&
- (mbhc->hph_status && mbhc->hph_status != jack_type)) {
- if (mbhc->micbias_enable && mbhc->micbias_enable_cb &&
- mbhc->hph_status == SND_JACK_HEADSET) {
- pr_debug("%s: Disabling micbias\n", __func__);
- mbhc->micbias_enable = false;
- mbhc->micbias_enable_cb(mbhc->codec, false,
- mbhc->mbhc_cfg->micbias);
- }
-
- pr_debug("%s: Reporting removal (%x)\n",
- __func__, mbhc->hph_status);
- mbhc->zl = mbhc->zr = 0;
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
- 0, WCD9XXX_JACK_MASK);
- mbhc->hph_status &= ~(SND_JACK_HEADSET |
- SND_JACK_LINEOUT |
- SND_JACK_ANC_HEADPHONE |
- SND_JACK_UNSUPPORTED);
- if (mbhc->mbhc_cb &&
- mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
- mbhc->mbhc_cb->hph_auto_pulldown_ctrl(
- mbhc->codec,
- false);
- }
-
- /* Report insertion */
- if (jack_type == SND_JACK_HEADPHONE) {
- mbhc->current_plug = PLUG_TYPE_HEADPHONE;
- } else if (jack_type == SND_JACK_UNSUPPORTED) {
- mbhc->current_plug = PLUG_TYPE_GND_MIC_SWAP;
- } else if (jack_type == SND_JACK_HEADSET) {
- mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
- mbhc->current_plug = PLUG_TYPE_HEADSET;
- mbhc->update_z = true;
- } else if (jack_type == SND_JACK_LINEOUT) {
- mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
- } else if (jack_type == SND_JACK_ANC_HEADPHONE) {
- mbhc->polling_active = BUTTON_POLLING_SUPPORTED;
- mbhc->current_plug = PLUG_TYPE_ANC_HEADPHONE;
- }
-
- if (mbhc->impedance_detect && impedance_detect_en) {
- wcd9xxx_detect_impedance(mbhc,
- &mbhc->zl, &mbhc->zr);
- if ((mbhc->zl > WCD9XXX_LINEIN_THRESHOLD) &&
- (mbhc->zr > WCD9XXX_LINEIN_THRESHOLD)) {
- jack_type = SND_JACK_LINEOUT;
- mbhc->current_plug = PLUG_TYPE_HIGH_HPH;
- pr_debug("%s: Replace with SND_JACK_LINEOUT\n",
- __func__);
- }
- }
-
- mbhc->hph_status |= jack_type;
-
- if (mbhc->micbias_enable && mbhc->micbias_enable_cb) {
- pr_debug("%s: Enabling micbias\n", __func__);
- mbhc->micbias_enable_cb(mbhc->codec, true,
- mbhc->mbhc_cfg->micbias);
- }
-
- pr_debug("%s: Reporting insertion %d(%x)\n", __func__,
- jack_type, mbhc->hph_status);
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
- (mbhc->hph_status | SND_JACK_MECHANICAL),
- WCD9XXX_JACK_MASK);
- /*
- * if PA is already on, switch micbias
- * source to VDDIO
- */
- if (((mbhc->current_plug == PLUG_TYPE_HEADSET) ||
- (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE)) &&
- ((mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
- 1 << MBHC_EVENT_PA_HPHR))))
- __wcd9xxx_switch_micbias(mbhc, 1, false,
- false);
- wcd9xxx_clr_and_turnon_hph_padac(mbhc);
- }
- /* Setup insert detect */
- wcd9xxx_insert_detect_setup(mbhc, !insertion);
-
- pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status);
-}
-
-/* should be called under interrupt context that hold suspend */
-static void wcd9xxx_schedule_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
- struct work_struct *work)
-{
- pr_debug("%s: scheduling wcd9xxx_correct_swch_plug\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
- mbhc->hs_detect_work_stop = false;
- wcd9xxx_lock_sleep(mbhc->resmgr->core_res);
- schedule_work(work);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_cancel_hs_detect_plug(struct wcd9xxx_mbhc *mbhc,
- struct work_struct *work)
-{
- pr_debug("%s: Canceling correct_plug_swch\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
- mbhc->hs_detect_work_stop = true;
-
- /* Make sure mbhc state update complete before unlocking. */
- wmb();
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- if (cancel_work_sync(work)) {
- pr_debug("%s: correct_plug_swch is canceled\n",
- __func__);
- wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
- }
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
-}
-
-static s16 scale_v_micb_vddio(struct wcd9xxx_mbhc *mbhc, int v, bool tovddio)
-{
- int r;
- int vddio_k, mb_k;
-
- vddio_k = __wcd9xxx_resmgr_get_k_val(mbhc, VDDIO_MICBIAS_MV);
- mb_k = __wcd9xxx_resmgr_get_k_val(mbhc, mbhc->mbhc_data.micb_mv);
- if (tovddio)
- r = v * (vddio_k + 4) / (mb_k + 4);
- else
- r = v * (mb_k + 4) / (vddio_k + 4);
- return r;
-}
-
-static s16 wcd9xxx_get_current_v_hs_max(struct wcd9xxx_mbhc *mbhc)
-{
- s16 v_hs_max;
- struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
-
- plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
- if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) &&
- mbhc->mbhc_micbias_switched)
- v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max, true);
- else
- v_hs_max = plug_type->v_hs_max;
- return v_hs_max;
-}
-
-static short wcd9xxx_read_sta_result(struct snd_soc_codec *codec)
-{
- u8 bias_msb, bias_lsb;
- short bias_value;
-
- bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B3_STATUS);
- bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B2_STATUS);
- bias_value = (bias_msb << 8) | bias_lsb;
- return bias_value;
-}
-
-static short wcd9xxx_read_dce_result(struct snd_soc_codec *codec)
-{
- u8 bias_msb, bias_lsb;
- short bias_value;
-
- bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B5_STATUS);
- bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B4_STATUS);
- bias_value = (bias_msb << 8) | bias_lsb;
- return bias_value;
-}
-
-static void wcd9xxx_turn_onoff_rel_detection(struct snd_soc_codec *codec,
- bool on)
-{
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, on << 1);
-}
-
-static short __wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
- bool override_bypass, bool noreldetection)
-{
- short bias_value;
- struct snd_soc_codec *codec = mbhc->codec;
-
- wcd9xxx_disable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->dce_est_complete);
- if (noreldetection)
- wcd9xxx_turn_onoff_rel_detection(codec, false);
-
- if (mbhc->mbhc_cfg->do_recalibration)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2,
- 0x0);
- /* Turn on the override */
- if (!override_bypass)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x4, 0x4);
- if (dce) {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
- 0x8);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
- 0x0);
- if (mbhc->mbhc_cfg->do_recalibration)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x2, 0x2);
- usleep_range(mbhc->mbhc_data.t_sta_dce,
- mbhc->mbhc_data.t_sta_dce + 50);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4);
- usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce + 50);
- bias_value = wcd9xxx_read_dce_result(codec);
- } else {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
- 0x8);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
- 0x0);
- if (mbhc->mbhc_cfg->do_recalibration)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x2, 0x2);
- usleep_range(mbhc->mbhc_data.t_sta_dce,
- mbhc->mbhc_data.t_sta_dce + 50);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
- usleep_range(mbhc->mbhc_data.t_sta,
- mbhc->mbhc_data.t_sta + 50);
- bias_value = wcd9xxx_read_sta_result(codec);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8,
- 0x8);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x0);
- }
- /* Turn off the override after measuring mic voltage */
- if (!override_bypass)
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04,
- 0x00);
-
- if (noreldetection)
- wcd9xxx_turn_onoff_rel_detection(codec, true);
- wcd9xxx_enable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->dce_est_complete);
-
- return bias_value;
-}
-
-static short wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce,
- bool norel)
-{
- bool override_bypass;
-
- /* Bypass override if it is already enabled */
- override_bypass = (snd_soc_read(mbhc->codec,
- WCD9XXX_A_CDC_MBHC_B1_CTL) &
- 0x04) ? true : false;
-
- return __wcd9xxx_codec_sta_dce(mbhc, dce, override_bypass, norel);
-}
-
-static s32 __wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
- u16 bias_value, s16 z, u32 micb_mv)
-{
- s16 value, mb;
- s32 mv = 0;
-
- value = bias_value;
- if (dce) {
- mb = (mbhc->mbhc_data.dce_mb);
- if (mb - z)
- mv = (value - z) * (s32)micb_mv / (mb - z);
- } else {
- mb = (mbhc->mbhc_data.sta_mb);
- if (mb - z)
- mv = (value - z) * (s32)micb_mv / (mb - z);
- }
-
- return mv;
-}
-
-static s32 wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce,
- u16 bias_value)
-{
- s16 z;
-
- z = dce ? (s16)mbhc->mbhc_data.dce_z : (s16)mbhc->mbhc_data.sta_z;
- return __wcd9xxx_codec_sta_dce_v(mbhc, dce, bias_value, z,
- mbhc->mbhc_data.micb_mv);
-}
-
-/* To enable/disable bandgap and RC oscillator */
-static void wcd9xxx_mbhc_ctrl_clk_bandgap(struct wcd9xxx_mbhc *mbhc,
- bool enable)
-{
- if (enable) {
- WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- wcd9xxx_resmgr_get_bandgap(mbhc->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
- WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
- mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
- } else {
- wcd9xxx_resmgr_get_clk_block(mbhc->resmgr,
- WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
- }
- } else {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) {
- mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
- WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- } else {
- WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- wcd9xxx_resmgr_put_clk_block(mbhc->resmgr,
- WCD9XXX_CLK_RCO);
- }
- wcd9xxx_resmgr_put_bandgap(mbhc->resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
- }
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc,
- struct mbhc_micbias_regs *mbhc_micb_regs,
- bool is_cs_enable)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- short bias_value;
- u8 cfilt_mode;
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- pr_debug("%s: enter\n", __func__);
- if (!mbhc->mbhc_cfg->calibration) {
- pr_err("%s: Error, no calibration exists\n", __func__);
- return -ENODEV;
- }
-
- /* Enable external voltage source to micbias if present */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
- mbhc->mbhc_cb->enable_mb_source(codec, true, true);
-
- /*
- * setup internal micbias if codec uses internal micbias for
- * headset detection
- */
- if (mbhc->mbhc_cfg->use_int_rbias) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
- mbhc->mbhc_cb->setup_int_rbias(codec, true);
- else
- pr_err("%s: internal bias requested but codec did not provide callback\n",
- __func__);
- }
-
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x01);
-
- /* Make sure CFILT is in fast mode, save current mode */
- cfilt_mode = snd_soc_read(codec, mbhc_micb_regs->cfilt_ctl);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
- mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
- else
- snd_soc_update_bits(codec, mbhc_micb_regs->cfilt_ctl,
- 0x70, 0x00);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- mbhc->scaling_mux_in);
- pr_debug("%s: scaling_mux_input: %d\n", __func__,
- mbhc->scaling_mux_in);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
-
- snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x80);
- snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x1F, 0x1C);
- snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x40, 0x40);
-
- snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x00);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x2, 0x2);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8);
-
- if (!mbhc->mbhc_cfg->do_recalibration) {
- if (!is_cs_enable)
- wcd9xxx_calibrate_hs_polling(mbhc);
- }
-
- /* don't flip override */
- bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
- snd_soc_write(codec, mbhc_micb_regs->cfilt_ctl, cfilt_mode);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
-
- return bias_value;
-}
-
-static void wcd9xxx_recalibrate(struct wcd9xxx_mbhc *mbhc,
- struct mbhc_micbias_regs *mbhc_micb_regs,
- bool is_cs_enable)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- s16 reg;
- int change;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- s16 sta_z = 0, dce_z = 0;
-
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-
- if (mbhc->mbhc_cfg->do_recalibration) {
- /* recalibrate dce_z and sta_z */
- reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
- change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x78, btn_det->mbhc_nsc << 3);
- wcd9xxx_get_z(mbhc, &dce_z, &sta_z, mbhc_micb_regs, true);
- if (change)
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg);
- if (dce_z && sta_z) {
- pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n",
- __func__,
- mbhc->mbhc_data.sta_z, sta_z & 0xffff,
- mbhc->mbhc_data.dce_z, dce_z & 0xffff);
- mbhc->mbhc_data.dce_z = dce_z;
- mbhc->mbhc_data.sta_z = sta_z;
- wcd9xxx_mbhc_calc_thres(mbhc);
- wcd9xxx_calibrate_hs_polling(mbhc);
- } else {
- pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n",
- __func__, dce_z, sta_z);
- }
-
- if (is_cs_enable) {
- /* recalibrate dce_nsc_cs_z */
- reg = snd_soc_read(mbhc->codec,
- WCD9XXX_A_CDC_MBHC_B1_CTL);
- snd_soc_update_bits(mbhc->codec,
- WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x78, WCD9XXX_MBHC_NSC_CS << 3);
- wcd9xxx_get_z(mbhc, &dce_z, NULL, mbhc_micb_regs,
- true);
- snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- reg);
- if (dce_z) {
- mbhc->mbhc_data.dce_nsc_cs_z = dce_z;
- /* update v_cs_ins_h with new dce_nsc_cs_z */
- mbhc->mbhc_data.v_cs_ins_h =
- wcd9xxx_codec_v_sta_dce(
- mbhc, DCE,
- WCD9XXX_V_CS_HS_MAX,
- is_cs_enable);
- pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x, v_cs_ins_h 0x%x\n",
- __func__,
- mbhc->mbhc_data.dce_nsc_cs_z,
- dce_z & 0xffff,
- mbhc->mbhc_data.v_cs_ins_h);
- } else {
- pr_debug("%s: failed get new dce_nsc_cs_z\n",
- __func__);
- }
- }
- }
-}
-
-static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- const struct wcd9xxx_mbhc_general_cfg *generic =
- WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
-
- /* Need MBHC clock */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
- mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true);
- else {
- WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
- }
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x6, 0x0);
- __wcd9xxx_switch_micbias(mbhc, 0, false, false);
-
- usleep_range(generic->t_shutdown_plug_rem,
- generic->t_shutdown_plug_rem + 50);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xA, 0x8);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl)
- mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false);
- else {
- WCD9XXX_BG_CLK_LOCK(mbhc->resmgr);
- /* Put requested CLK back */
- wcd9xxx_resmgr_put_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO);
- WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr);
- }
-
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x00);
-}
-
-static void wcd9xxx_cleanup_hs_polling(struct wcd9xxx_mbhc *mbhc)
-{
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- wcd9xxx_shutdown_hs_removal_detect(mbhc);
-
-
- /* Disable external voltage source to micbias if present */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
- mbhc->mbhc_cb->enable_mb_source(mbhc->codec, false, true);
-
- mbhc->polling_active = false;
- mbhc->mbhc_state = MBHC_STATE_NONE;
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_codec_hphr_gnd_switch(struct snd_soc_codec *codec, bool on)
-{
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, on);
- if (on)
- usleep_range(5000, 5100);
-}
-
-static void wcd9xxx_onoff_vddio_switch(struct wcd9xxx_mbhc *mbhc, bool on)
-{
- pr_debug("%s: vddio %d\n", __func__, on);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->pull_mb_to_vddio) {
- mbhc->mbhc_cb->pull_mb_to_vddio(mbhc->codec, on);
- goto exit;
- }
-
- if (on) {
- snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 1 << 7, 1 << 7);
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
- 1 << 4, 0);
- } else {
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
- 1 << 4, 1 << 4);
- snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 1 << 7, 0);
- }
-
-exit:
- /*
- * Wait for the micbias to settle down to vddio
- * when the micbias to vddio switch is enabled.
- */
- if (on)
- usleep_range(10000, 10100);
-}
-
-static int wcd9xxx_hphl_status(struct wcd9xxx_mbhc *mbhc)
-{
- u16 hph, status;
- struct snd_soc_codec *codec = mbhc->codec;
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
- hph = snd_soc_read(codec, WCD9XXX_A_MBHC_HPH);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x02);
- usleep_range(WCD9XXX_HPHL_STATUS_READY_WAIT_US,
- WCD9XXX_HPHL_STATUS_READY_WAIT_US +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- status = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_STATUS);
- snd_soc_write(codec, WCD9XXX_A_MBHC_HPH, hph);
- return status;
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_cs_find_plug_type(struct wcd9xxx_mbhc *mbhc,
- struct wcd9xxx_mbhc_detect *dt, const int size,
- bool highhph,
- unsigned long event_state)
-{
- int i;
- int vdce, mb_mv;
- int ch, sz, delta_thr;
- int minv = 0, maxv = INT_MIN;
- struct wcd9xxx_mbhc_detect *d = dt;
- struct wcd9xxx_mbhc_detect *dprev = d, *dmicbias = NULL, *dgnd = NULL;
- enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
-
- const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
- WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
- s16 hs_max, no_mic, dce_z;
- int highhph_cnt = 0;
-
- pr_debug("%s: enter\n", __func__);
- pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
-
- sz = size - 1;
- for (i = 0, d = dt, ch = 0; i < sz; i++, d++) {
- if (d->mic_bias) {
- dce_z = mbhc->mbhc_data.dce_z;
- mb_mv = mbhc->mbhc_data.micb_mv;
- hs_max = plug_type->v_hs_max;
- no_mic = plug_type->v_no_mic;
- } else {
- dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
- mb_mv = VDDIO_MICBIAS_MV;
- hs_max = WCD9XXX_V_CS_HS_MAX;
- no_mic = WCD9XXX_V_CS_NO_MIC;
- }
-
- vdce = __wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce,
- dce_z, (u32)mb_mv);
- d->_vdces = vdce;
- if (d->_vdces < no_mic)
- d->_type = PLUG_TYPE_HEADPHONE;
- else if (d->_vdces >= hs_max) {
- d->_type = PLUG_TYPE_HIGH_HPH;
- highhph_cnt++;
- } else
- d->_type = PLUG_TYPE_HEADSET;
-
- pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n",
- __func__, i, d->dce, vdce, d->_vdces,
- d->hphl_status & 0x01,
- d->_type);
-
- ch += d->hphl_status & 0x01;
- if (!d->swap_gnd && !d->mic_bias) {
- if (maxv < d->_vdces)
- maxv = d->_vdces;
- if (!minv || minv > d->_vdces)
- minv = d->_vdces;
- }
- if ((!d->mic_bias &&
- (d->_vdces >= WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV &&
- d->_vdces <= WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV)) ||
- (d->mic_bias &&
- (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
- d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV))) {
- pr_debug("%s: within invalid range\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
- }
-
- delta_thr = ((highhph_cnt == sz) || highhph) ?
- WCD9XXX_MB_MEAS_DELTA_MAX_MV :
- WCD9XXX_CS_MEAS_DELTA_MAX_MV;
-
- for (i = 0, d = dt; i < sz; i++, d++) {
- if ((i > 0) && !d->mic_bias && !d->swap_gnd &&
- (d->_type != dprev->_type)) {
- pr_debug("%s: Invalid, inconsistent types\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
-
- if (!d->swap_gnd && !d->mic_bias &&
- (abs(minv - d->_vdces) > delta_thr ||
- abs(maxv - d->_vdces) > delta_thr)) {
- pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
- __func__, d->_vdces, minv, maxv);
- type = PLUG_TYPE_INVALID;
- goto exit;
- } else if (d->swap_gnd) {
- dgnd = d;
- }
-
- if (!d->mic_bias && !d->swap_gnd)
- dprev = d;
- else if (d->mic_bias)
- dmicbias = d;
- }
- if (dgnd && dt->_type != PLUG_TYPE_HEADSET &&
- dt->_type != dgnd->_type) {
- pr_debug("%s: Invalid, inconsistent types\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
-
- type = dt->_type;
- if (dmicbias) {
- if (dmicbias->_type == PLUG_TYPE_HEADSET &&
- (dt->_type == PLUG_TYPE_HIGH_HPH ||
- dt->_type == PLUG_TYPE_HEADSET)) {
- type = PLUG_TYPE_HEADSET;
- if (dt->_type == PLUG_TYPE_HIGH_HPH) {
- pr_debug("%s: Headset with threshold on MIC detected\n",
- __func__);
- if (mbhc->mbhc_cfg->micbias_enable_flags &
- (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
- mbhc->micbias_enable = true;
- }
- }
- }
-
- if (type == PLUG_TYPE_HEADSET && dgnd && !dgnd->mic_bias) {
- /* if plug type is Headphone report as GND_MIC_SWAP */
- if (dgnd->_type == PLUG_TYPE_HEADPHONE) {
- pr_debug("%s: GND_MIC_SWAP\n", __func__);
- type = PLUG_TYPE_GND_MIC_SWAP;
- /*
- * if type is GND_MIC_SWAP we should not check
- * HPHL status hence goto exit
- */
- goto exit;
- } else if (dgnd->_type != PLUG_TYPE_HEADSET && !dmicbias) {
- pr_debug("%s: Invalid, inconsistent types\n", __func__);
- type = PLUG_TYPE_INVALID;
- }
- }
-
- if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
- pr_debug("%s: HPHL PA was ON\n", __func__);
- } else if (ch != sz && ch > 0) {
- pr_debug("%s: Invalid, inconsistent HPHL..\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
-
- if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
- if (((type == PLUG_TYPE_HEADSET ||
- type == PLUG_TYPE_HEADPHONE) && ch != sz)) {
- pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
- __func__, type);
- type = PLUG_TYPE_INVALID;
- }
- }
-
- if (type == PLUG_TYPE_HEADSET &&
- (mbhc->mbhc_cfg->micbias_enable_flags &
- (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET)))
- mbhc->micbias_enable = true;
-
-exit:
- pr_debug("%s: Plug type %d detected\n", __func__, type);
- return type;
-}
-
-/*
- * wcd9xxx_find_plug_type : Find out and return the best plug type with given
- * list of wcd9xxx_mbhc_detect structure.
- * param mbhc wcd9xxx_mbhc structure
- * param dt collected measurements
- * param size array size of dt
- * param event_state mbhc->event_state when dt is collected
- */
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_find_plug_type(struct wcd9xxx_mbhc *mbhc,
- struct wcd9xxx_mbhc_detect *dt, const int size,
- unsigned long event_state)
-{
- int i;
- int ch;
- enum wcd9xxx_mbhc_plug_type type;
- int vdce;
- struct wcd9xxx_mbhc_detect *d, *dprev, *dgnd = NULL, *dvddio = NULL;
- int maxv = 0, minv = 0;
- const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
- WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
- const s16 hs_max = plug_type->v_hs_max;
- const s16 no_mic = plug_type->v_no_mic;
-
- pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
-
- for (i = 0, d = dt, ch = 0; i < size; i++, d++) {
- vdce = wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce);
- if (d->vddio)
- d->_vdces = scale_v_micb_vddio(mbhc, vdce, false);
- else
- d->_vdces = vdce;
-
- if (d->_vdces >= no_mic && d->_vdces < hs_max)
- d->_type = PLUG_TYPE_HEADSET;
- else if (d->_vdces < no_mic)
- d->_type = PLUG_TYPE_HEADPHONE;
- else
- d->_type = PLUG_TYPE_HIGH_HPH;
-
- ch += d->hphl_status & 0x01;
- if (!d->swap_gnd && !d->hwvalue && !d->vddio) {
- if (maxv < d->_vdces)
- maxv = d->_vdces;
- if (!minv || minv > d->_vdces)
- minv = d->_vdces;
- }
-
- pr_debug("%s: DCE #%d, %04x, V %04d(%04d), GND %d, VDDIO %d, HPHL %d TYPE %d\n",
- __func__, i, d->dce, vdce, d->_vdces,
- d->swap_gnd, d->vddio, d->hphl_status & 0x01,
- d->_type);
-
-
- /*
- * If GND and MIC prongs are aligned to HPHR and GND of
- * headphone, codec measures the voltage based on
- * impedance between HPHR and GND which results in ~80mv.
- * Avoid this.
- */
- if (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
- d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) {
- pr_debug("%s: within invalid range\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
- }
-
- if (event_state & (1 << MBHC_EVENT_PA_HPHL)) {
- pr_debug("%s: HPHL PA was ON\n", __func__);
- } else if (ch != size && ch > 0) {
- pr_debug("%s: Invalid, inconsistent HPHL\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
-
- for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) {
- if (d->vddio) {
- dvddio = d;
- continue;
- }
-
- if ((i > 0) && (dprev != NULL) && (d->_type != dprev->_type)) {
- pr_debug("%s: Invalid, inconsistent types\n", __func__);
- type = PLUG_TYPE_INVALID;
- goto exit;
- }
-
- if (!d->swap_gnd && !d->hwvalue &&
- (abs(minv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV ||
- abs(maxv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV)) {
- pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n",
- __func__, d->_vdces, minv, maxv);
- type = PLUG_TYPE_INVALID;
- goto exit;
- } else if (d->swap_gnd) {
- dgnd = d;
- }
- dprev = d;
- }
-
- WARN_ON(i != size);
- type = dt->_type;
- if (type == PLUG_TYPE_HEADSET && dgnd) {
- if ((dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MIN_MV <
- minv) &&
- (dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MAX_MV >
- maxv))
- type = PLUG_TYPE_GND_MIC_SWAP;
- }
-
- /* if HPHL PA was on, we cannot use hphl status */
- if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) {
- if (((type == PLUG_TYPE_HEADSET ||
- type == PLUG_TYPE_HEADPHONE) && ch != size) ||
- (type == PLUG_TYPE_GND_MIC_SWAP && ch)) {
- pr_debug("%s: Invalid, not fully inserted, TYPE %d\n",
- __func__, type);
- type = PLUG_TYPE_INVALID;
- }
- }
-
- if (type == PLUG_TYPE_HEADSET) {
- if (dvddio && ((dvddio->_vdces > hs_max) ||
- (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD))) {
- pr_debug("%s: Headset with threshold on MIC detected\n",
- __func__);
- if (mbhc->mbhc_cfg->micbias_enable_flags &
- (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET))
- mbhc->micbias_enable = true;
- } else {
- pr_debug("%s: Headset with regular MIC detected\n",
- __func__);
- if (mbhc->mbhc_cfg->micbias_enable_flags &
- (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET))
- mbhc->micbias_enable = true;
- }
- }
-exit:
- pr_debug("%s: Plug type %d detected, micbias_enable %d\n", __func__,
- type, mbhc->micbias_enable);
- return type;
-}
-
-/*
- * Pull down MBHC micbias for provided duration in microsecond.
- */
-static int wcd9xxx_pull_down_micbias(struct wcd9xxx_mbhc *mbhc, int us)
-{
- bool micbiasconn = false;
- struct snd_soc_codec *codec = mbhc->codec;
- const u16 ctlreg = mbhc->mbhc_bias_regs.ctl_reg;
-
- /*
- * Disable MBHC to micbias connection to pull down
- * micbias and pull down micbias for a moment.
- */
- if ((snd_soc_read(mbhc->codec, ctlreg) & 0x01)) {
- WARN_ONCE(1, "MBHC micbias is already pulled down unexpectedly\n");
- return -EFAULT;
- }
-
- if ((snd_soc_read(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL) & 1 << 4)) {
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
- 1 << 4, 0);
- micbiasconn = true;
- }
-
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
-
- /*
- * Pull down for 1ms to discharge bias. Give small margin (10us) to be
- * able to get consistent result across DCEs.
- */
- usleep_range(1000, 1000 + 10);
-
- if (micbiasconn)
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL,
- 1 << 4, 1 << 4);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
- usleep_range(us, us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- return 0;
-}
-
-/* Called under codec resource lock acquisition */
-void wcd9xxx_turn_onoff_current_source(struct wcd9xxx_mbhc *mbhc,
- struct mbhc_micbias_regs *mbhc_micb_regs,
- bool on, bool highhph)
-{
- struct snd_soc_codec *codec;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
- WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
-
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
- codec = mbhc->codec;
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- if ((on && mbhc->is_cs_enabled) ||
- (!on && !mbhc->is_cs_enabled)) {
- pr_debug("%s: Current source is already %s\n",
- __func__, on ? "ON" : "OFF");
- return;
- }
-
- if (on) {
- pr_debug("%s: enabling current source\n", __func__);
- /* Nsc to 9 */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x78, 0x48);
- /* pull down diode bit to 0 */
- snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
- 0x01, 0x00);
- /*
- * Keep the low power insertion/removal
- * detection (reg 0x3DD) disabled
- */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL,
- 0x01, 0x00);
- /*
- * Enable the Mic Bias current source
- * Write bits[6:5] of register MICB_2_MBHC to 0x3 (V_20_UA)
- * Write bit[7] of register MICB_2_MBHC to 1
- * (INS_DET_ISRC_EN__ENABLE)
- * MICB_2_MBHC__SCHT_TRIG_EN to 1
- */
- snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
- 0xF0, 0xF0);
- /* Disconnect MBHC Override from MicBias and LDOH */
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x00);
- mbhc->is_cs_enabled = true;
- } else {
- pr_debug("%s: disabling current source\n", __func__);
- /* Connect MBHC Override from MicBias and LDOH */
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x10);
- /* INS_DET_ISRC_CTL to acdb value */
- snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg,
- 0x60, plug_det->mic_current << 5);
- if (!highhph) {
- /* INS_DET_ISRC_EN__ENABLE to 0 */
- snd_soc_update_bits(codec,
- mbhc_micb_regs->mbhc_reg,
- 0x80, 0x00);
- /* MICB_2_MBHC__SCHT_TRIG_EN to 0 */
- snd_soc_update_bits(codec,
- mbhc_micb_regs->mbhc_reg,
- 0x10, 0x00);
- }
- /* Nsc to acdb value */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
- btn_det->mbhc_nsc << 3);
- mbhc->is_cs_enabled = false;
- }
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_codec_cs_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
- enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
- int i;
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- BUG_ON(NUM_DCE_PLUG_INS_DETECT < 4);
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
- rt[0].swap_gnd = false;
- rt[0].vddio = false;
- rt[0].hwvalue = true;
- rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
- rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
- true);
- rt[0].mic_bias = false;
-
- for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
- rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 3);
- rt[i].mic_bias = ((i == NUM_DCE_PLUG_INS_DETECT - 4) &&
- highhph);
- rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
- if (rt[i].swap_gnd)
- wcd9xxx_codec_hphr_gnd_switch(codec, true);
-
- if (rt[i].mic_bias)
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_bias_regs,
- false, false);
-
- rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, !highhph, true);
- if (rt[i].mic_bias)
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_bias_regs,
- true, false);
- if (rt[i].swap_gnd)
- wcd9xxx_codec_hphr_gnd_switch(codec, false);
- }
-
- /* recalibrate DCE/STA GND voltages */
- wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, true);
-
- type = wcd9xxx_cs_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), highhph,
- mbhc->event_state);
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- pr_debug("%s: plug_type:%d\n", __func__, type);
-
- return type;
-}
-
-static enum wcd9xxx_mbhc_plug_type
-wcd9xxx_codec_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph)
-{
- int i;
- bool vddioon;
- struct wcd9xxx_mbhc_plug_type_cfg *plug_type_ptr;
- struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT];
- enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- /* make sure override is on */
- WARN_ON(!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04));
-
- /* GND and MIC swap detection requires at least 2 rounds of DCE */
- BUG_ON(NUM_DCE_PLUG_INS_DETECT < 2);
- detect_use_vddio_switch = mbhc->mbhc_cfg->use_vddio_meas;
-
- /*
- * There are chances vddio switch is on and cfilt voltage is adjusted
- * to vddio voltage even after plug type removal reported.
- */
- vddioon = __wcd9xxx_switch_micbias(mbhc, 0, false, false);
- pr_debug("%s: vddio switch was %s\n", __func__, vddioon ? "on" : "off");
-
- plug_type_ptr =
- WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-
- /*
- * cfilter in fast mode requires 1ms to charge up and down micbias
- * fully.
- */
- (void) wcd9xxx_pull_down_micbias(mbhc,
- WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
- rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
- rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs,
- false);
- rt[0].swap_gnd = false;
- rt[0].vddio = false;
- rt[0].hwvalue = true;
- for (i = 1; i < NUM_DCE_PLUG_INS_DETECT; i++) {
- rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 2);
- if (detect_use_vddio_switch)
- rt[i].vddio = (i == 1);
- else
- rt[i].vddio = false;
- rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
- rt[i].hwvalue = false;
- if (rt[i].swap_gnd)
- wcd9xxx_codec_hphr_gnd_switch(codec, true);
- if (rt[i].vddio)
- wcd9xxx_onoff_vddio_switch(mbhc, true);
- /*
- * Pull down micbias to detect headset with mic which has
- * threshold and to have more consistent voltage measurements.
- *
- * cfilter in fast mode requires 1ms to charge up and down
- * micbias fully.
- */
- (void) wcd9xxx_pull_down_micbias(mbhc,
- WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US);
- rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
- if (rt[i].vddio)
- wcd9xxx_onoff_vddio_switch(mbhc, false);
- if (rt[i].swap_gnd)
- wcd9xxx_codec_hphr_gnd_switch(codec, false);
- }
- /* recalibrate DCE/STA GND voltages */
- wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, false);
-
- if (vddioon)
- __wcd9xxx_switch_micbias(mbhc, 1, false, false);
-
- type = wcd9xxx_find_plug_type(mbhc, rt, ARRAY_SIZE(rt),
- mbhc->event_state);
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- pr_debug("%s: leave\n", __func__);
- return type;
-}
-
-static bool wcd9xxx_swch_level_remove(struct wcd9xxx_mbhc *mbhc)
-{
- if (mbhc->mbhc_cfg->gpio)
- return (gpio_get_value_cansleep(mbhc->mbhc_cfg->gpio) !=
- mbhc->mbhc_cfg->gpio_level_insert);
- else if (mbhc->mbhc_cfg->insert_detect) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->insert_rem_status)
- return mbhc->mbhc_cb->insert_rem_status(mbhc->codec);
- else
- return snd_soc_read(mbhc->codec,
- WCD9XXX_A_MBHC_INSERT_DET_STATUS) &
- (1 << 2);
- } else
- WARN(1, "Invalid jack detection configuration\n");
-
- return true;
-}
-
-static bool is_clk_active(struct snd_soc_codec *codec)
-{
- return !!(snd_soc_read(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL) & 0x05);
-}
-
-static int wcd9xxx_enable_hs_detect(struct wcd9xxx_mbhc *mbhc,
- int insertion, int trigger, bool padac_off)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- int central_bias_enabled = 0;
- const struct wcd9xxx_mbhc_general_cfg *generic =
- WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
- const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det =
- WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration);
-
- pr_debug("%s: enter insertion(%d) trigger(0x%x)\n",
- __func__, insertion, trigger);
-
- if (!mbhc->mbhc_cfg->calibration) {
- pr_err("Error, no wcd9xxx calibration\n");
- return -EINVAL;
- }
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0);
-
- /*
- * Make sure mic bias and Mic line schmitt trigger
- * are turned OFF
- */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
-
- if (insertion) {
- wcd9xxx_switch_micbias(mbhc, 0);
-
- /* DAPM can manipulate PA/DAC bits concurrently */
- if (padac_off == true)
- wcd9xxx_set_and_turnoff_hph_padac(mbhc);
-
- if (trigger & MBHC_USE_HPHL_TRIGGER) {
- /* Enable HPH Schmitt Trigger */
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x11,
- 0x11);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x0C,
- plug_det->hph_current << 2);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x02,
- 0x02);
- }
- if (trigger & MBHC_USE_MB_TRIGGER) {
- /* enable the mic line schmitt trigger */
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.mbhc_reg,
- 0x60, plug_det->mic_current << 5);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.mbhc_reg,
- 0x80, 0x80);
- usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.ctl_reg, 0x01,
- 0x00);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.mbhc_reg,
- 0x10, 0x10);
- }
-
- /* setup for insetion detection */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2, 0);
- } else {
- pr_debug("setup for removal detection\n");
- /* Make sure the HPH schmitt trigger is OFF */
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x00);
-
- /* enable the mic line schmitt trigger */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
- 0x01, 0x00);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x60,
- plug_det->mic_current << 5);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 0x80, 0x80);
- usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg,
- 0x10, 0x10);
-
- /* Setup for low power removal detection */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2,
- 0x2);
- }
-
- if (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x4) {
- /* called by interrupt */
- if (!is_clk_active(codec)) {
- wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 1);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x06, 0);
- usleep_range(generic->t_shutdown_plug_rem,
- generic->t_shutdown_plug_rem +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 0);
- } else
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x06, 0);
- }
-
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.int_rbias, 0x80, 0);
-
- /* If central bandgap disabled */
- if (!(snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE1) & 1)) {
- snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x3, 0x3);
- usleep_range(generic->t_bg_fast_settle,
- generic->t_bg_fast_settle +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- central_bias_enabled = 1;
- }
-
- /* If LDO_H disabled */
- if (snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE0) & 0x80) {
- snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x10, 0);
- snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0x80);
- usleep_range(generic->t_ldoh, generic->t_ldoh +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0);
-
- if (central_bias_enabled)
- snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x1,
- 0);
- }
-
- if (mbhc->resmgr->reg_addr && mbhc->resmgr->reg_addr->micb_4_mbhc)
- snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc,
- 0x3, mbhc->mbhc_cfg->micbias);
-
- wcd9xxx_enable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0x1);
- pr_debug("%s: leave\n", __func__);
-
- return 0;
-}
-
-/*
- * Function to determine whether anc microphone is preset or not.
- * Return true if anc microphone is detected or false if not detected.
- */
-static bool wcd9xxx_detect_anc_plug_type(struct wcd9xxx_mbhc *mbhc)
-{
- struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT - 1];
- bool anc_mic_found = true;
- int i, mb_mv;
- const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
- WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
- s16 hs_max, dce_z;
- s16 no_mic;
- bool override_en;
- bool timedout;
- unsigned long timeout, retry = 0;
- enum wcd9xxx_mbhc_plug_type type;
- bool cs_enable;
-
- if (mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS3 &&
- mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS2)
- return false;
-
- pr_debug("%s: enter\n", __func__);
-
- override_en = (snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL) &
- 0x04) ? true : false;
- cs_enable = ((mbhc->mbhc_cfg->cs_enable_flags &
- (1 << MBHC_CS_ENABLE_DET_ANC)) != 0) &&
- (!(snd_soc_read(mbhc->codec,
- mbhc->mbhc_anc_bias_regs.ctl_reg) & 0x80)) &&
- (mbhc->mbhc_cfg->micbias != mbhc->mbhc_cfg->anc_micbias);
-
- if (cs_enable) {
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_anc_bias_regs,
- true, false);
- } else {
- if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
- if (mbhc->micbias_enable_cb)
- mbhc->micbias_enable_cb(mbhc->codec, true,
- mbhc->mbhc_cfg->anc_micbias);
- else
- return false;
- } else {
- /* Enable override */
- if (!override_en)
- wcd9xxx_turn_onoff_override(mbhc, true);
- }
- }
-
- if (!cs_enable) {
- hs_max = plug_type->v_hs_max;
- no_mic = plug_type->v_no_mic;
- dce_z = mbhc->mbhc_data.dce_z;
- mb_mv = mbhc->mbhc_data.micb_mv;
- } else {
- hs_max = WCD9XXX_V_CS_HS_MAX;
- no_mic = WCD9XXX_V_CS_NO_MIC;
- mb_mv = VDDIO_MICBIAS_MV;
- dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
- }
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
-
- timeout = jiffies + msecs_to_jiffies(ANC_HPH_DETECT_PLUG_TIME_MS);
- anc_mic_found = true;
-
- while (!(timedout = time_after(jiffies, timeout))) {
- retry++;
-
- if (wcd9xxx_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch level is low\n", __func__);
- anc_mic_found = false;
- break;
- }
-
- pr_debug("%s: Retry attempt %lu", __func__, retry - 1);
-
- rt[0].hphl_status = wcd9xxx_hphl_status(mbhc);
- rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc,
- &mbhc->mbhc_anc_bias_regs,
- cs_enable);
- rt[0]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true, rt[0].dce,
- dce_z, (u32)mb_mv);
-
- if (rt[0]._vdces >= no_mic && rt[0]._vdces < hs_max)
- rt[0]._type = PLUG_TYPE_HEADSET;
- else if (rt[0]._vdces < no_mic)
- rt[0]._type = PLUG_TYPE_HEADPHONE;
- else
- rt[0]._type = PLUG_TYPE_HIGH_HPH;
-
- pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
- __func__, 0, rt[0]._vdces,
- rt[0].hphl_status & 0x01,
- rt[0]._type);
-
- for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
- rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1,
- true, true);
- rt[i]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true,
- rt[i].dce, dce_z,
- (u32) mb_mv);
-
- if (rt[i]._vdces >= no_mic && rt[i]._vdces < hs_max)
- rt[i]._type = PLUG_TYPE_HEADSET;
- else if (rt[i]._vdces < no_mic)
- rt[i]._type = PLUG_TYPE_HEADPHONE;
- else
- rt[i]._type = PLUG_TYPE_HIGH_HPH;
-
- rt[i].hphl_status = wcd9xxx_hphl_status(mbhc);
-
- pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n",
- __func__, i, rt[i]._vdces,
- rt[i].hphl_status & 0x01,
- rt[i]._type);
- }
-
- /*
- * Check for the "type" of all the 4 measurements
- * If all 4 measurements have the Type as PLUG_TYPE_HEADSET
- * then it is proper mic and declare that the plug has two mics
- */
- for (i = 0; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) {
- if (i > 0 && (rt[i - 1]._type != rt[i]._type)) {
- type = PLUG_TYPE_INVALID;
- break;
- } else {
- type = rt[0]._type;
- }
- }
-
- pr_debug("%s: Plug type found in ANC detection :%d",
- __func__, type);
-
- if (type != PLUG_TYPE_HEADSET)
- anc_mic_found = false;
- if (anc_mic_found || (type == PLUG_TYPE_HEADPHONE &&
- mbhc->mbhc_cfg->hw_jack_type == FIVE_POLE_JACK) ||
- (type == PLUG_TYPE_HIGH_HPH &&
- mbhc->mbhc_cfg->hw_jack_type == SIX_POLE_JACK))
- break;
- }
-
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- if (cs_enable) {
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_anc_bias_regs,
- false, false);
- } else {
- if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) {
- if (mbhc->micbias_enable_cb)
- mbhc->micbias_enable_cb(mbhc->codec, false,
- mbhc->mbhc_cfg->anc_micbias);
- } else {
- /* Disable override */
- if (!override_en)
- wcd9xxx_turn_onoff_override(mbhc, false);
- }
- }
- pr_debug("%s: leave\n", __func__);
- return anc_mic_found;
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_find_plug_and_report(struct wcd9xxx_mbhc *mbhc,
- enum wcd9xxx_mbhc_plug_type plug_type)
-{
- bool anc_mic_found = false;
-
- pr_debug("%s: enter current_plug(%d) new_plug(%d)\n",
- __func__, mbhc->current_plug, plug_type);
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- if (plug_type == PLUG_TYPE_HEADPHONE &&
- mbhc->current_plug == PLUG_TYPE_NONE) {
- /*
- * Nothing was reported previously
- * report a headphone or unsupported
- */
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
- wcd9xxx_cleanup_hs_polling(mbhc);
- } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
- if (!mbhc->mbhc_cfg->detect_extn_cable) {
- if (mbhc->current_plug == PLUG_TYPE_HEADSET)
- wcd9xxx_report_plug(mbhc, 0,
- SND_JACK_HEADSET);
- else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
- wcd9xxx_report_plug(mbhc, 0,
- SND_JACK_HEADPHONE);
- }
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED);
- wcd9xxx_cleanup_hs_polling(mbhc);
- } else if (plug_type == PLUG_TYPE_HEADSET) {
-
- if (mbhc->mbhc_cfg->enable_anc_mic_detect) {
- /*
- * Do not report Headset, because at this point
- * it could be a ANC headphone having two mics.
- * So, proceed further to detect if there is a
- * second mic.
- */
- mbhc->scaling_mux_in = 0x08;
- anc_mic_found = wcd9xxx_detect_anc_plug_type(mbhc);
- }
-
- if (anc_mic_found) {
- /* Report ANC headphone */
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_ANC_HEADPHONE);
- } else {
- /*
- * If Headphone was reported previously, this will
- * only report the mic line
- */
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADSET);
- }
- /* Button detection required RC oscillator */
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true);
- /*
- * sleep so that audio path completely tears down
- * before report plug insertion to the user space
- */
- msleep(100);
-
- wcd9xxx_start_hs_polling(mbhc);
- } else if (plug_type == PLUG_TYPE_HIGH_HPH) {
- if (mbhc->mbhc_cfg->detect_extn_cable) {
- /* High impedance device found. Report as LINEOUT*/
- if (mbhc->current_plug == PLUG_TYPE_NONE)
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
- wcd9xxx_cleanup_hs_polling(mbhc);
- pr_debug("%s: setup mic trigger for further detection\n",
- __func__);
- mbhc->lpi_enabled = true;
- /*
- * Do not enable HPHL trigger. If playback is active,
- * it might lead to continuous false HPHL triggers
- */
- wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER,
- false);
- } else {
- if (mbhc->current_plug == PLUG_TYPE_NONE)
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_HEADPHONE);
- wcd9xxx_cleanup_hs_polling(mbhc);
- pr_debug("setup mic trigger for further detection\n");
- mbhc->lpi_enabled = true;
- wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
- MBHC_USE_HPHL_TRIGGER,
- false);
- }
- } else {
- WARN(1, "Unexpected current plug_type %d, plug_type %d\n",
- mbhc->current_plug, plug_type);
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_mbhc_decide_swch_plug(struct wcd9xxx_mbhc *mbhc)
-{
- enum wcd9xxx_mbhc_plug_type plug_type;
- bool current_source_enable;
-
- pr_debug("%s: enter\n", __func__);
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
- (1 << MBHC_CS_ENABLE_INSERTION)) != 0) &&
- (!(snd_soc_read(mbhc->codec,
- mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-
- mbhc->scaling_mux_in = 0x04;
-
- if (current_source_enable) {
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- true, false);
- plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc, false);
- /*
- * For other plug types, the current source disable
- * will be done from wcd9xxx_correct_swch_plug
- */
- if (plug_type == PLUG_TYPE_HEADSET)
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_bias_regs,
- false, false);
- } else {
- wcd9xxx_turn_onoff_override(mbhc, true);
- plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
- wcd9xxx_turn_onoff_override(mbhc, false);
- }
-
- if (wcd9xxx_swch_level_remove(mbhc)) {
- if (current_source_enable && mbhc->is_cs_enabled) {
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_bias_regs,
- false, false);
- }
- pr_debug("%s: Switch level is low when determining plug\n",
- __func__);
- return;
- }
-
- if (plug_type == PLUG_TYPE_INVALID ||
- plug_type == PLUG_TYPE_GND_MIC_SWAP) {
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_schedule_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
- } else if (plug_type == PLUG_TYPE_HEADPHONE) {
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE);
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_schedule_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
- } else if (plug_type == PLUG_TYPE_HIGH_HPH) {
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_schedule_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
- } else {
- pr_debug("%s: Valid plug found, determine plug type %d\n",
- __func__, plug_type);
- wcd9xxx_find_plug_and_report(mbhc, plug_type);
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called under codec_resource_lock acquisition */
-static void wcd9xxx_mbhc_detect_plug_type(struct wcd9xxx_mbhc *mbhc)
-{
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- if (wcd9xxx_swch_level_remove(mbhc))
- pr_debug("%s: Switch level low when determining plug\n",
- __func__);
- else
- wcd9xxx_mbhc_decide_swch_plug(mbhc);
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_insert_irq_swch(struct wcd9xxx_mbhc *mbhc,
- bool is_removal)
-{
- if (!is_removal) {
- pr_debug("%s: MIC trigger insertion interrupt\n", __func__);
-
- /* Make sure memory read is completed before reading
- * lpi_enabled.
- */
- rmb();
- if (mbhc->lpi_enabled)
- msleep(100);
-
- /* Make sure memory read is completed before reading
- * lpi_enabled.
- */
- rmb();
- if (!mbhc->lpi_enabled) {
- pr_debug("%s: lpi is disabled\n", __func__);
- } else if (!wcd9xxx_swch_level_remove(mbhc)) {
- pr_debug("%s: Valid insertion, detect plug type\n",
- __func__);
- wcd9xxx_mbhc_decide_swch_plug(mbhc);
- } else {
- pr_debug("%s: Invalid insertion stop plug detection\n",
- __func__);
- }
- } else if (mbhc->mbhc_cfg->detect_extn_cable) {
- pr_debug("%s: Removal\n", __func__);
- if (!wcd9xxx_swch_level_remove(mbhc)) {
- /*
- * Switch indicates, something is still inserted.
- * This could be extension cable i.e. headset is
- * removed from extension cable.
- */
- /* cancel detect plug */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
- wcd9xxx_mbhc_decide_swch_plug(mbhc);
- }
- } else {
- pr_err("%s: Switch IRQ used, invalid MBHC Removal\n", __func__);
- }
-}
-
-static bool is_valid_mic_voltage(struct wcd9xxx_mbhc *mbhc, s32 mic_mv,
- bool cs_enable)
-{
- const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
- WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
- const s16 v_hs_max = wcd9xxx_get_current_v_hs_max(mbhc);
-
- if (cs_enable)
- return ((mic_mv > WCD9XXX_V_CS_NO_MIC) &&
- (mic_mv < WCD9XXX_V_CS_HS_MAX)) ? true : false;
- else
- return (!(mic_mv > WCD9XXX_MEAS_INVALD_RANGE_LOW_MV &&
- mic_mv < WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) &&
- (mic_mv > plug_type->v_no_mic) &&
- (mic_mv < v_hs_max)) ? true : false;
-}
-
-/*
- * called under codec_resource_lock acquisition
- * returns true if mic voltage range is back to normal insertion
- * returns false either if timedout or removed
- */
-static bool wcd9xxx_hs_remove_settle(struct wcd9xxx_mbhc *mbhc)
-{
- int i;
- bool timedout, settled = false;
- s32 mic_mv[NUM_DCE_PLUG_DETECT];
- short mb_v[NUM_DCE_PLUG_DETECT];
- unsigned long retry = 0, timeout;
- bool cs_enable;
-
- cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
- (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
- (!(snd_soc_read(mbhc->codec,
- mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
- if (cs_enable)
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- true, false);
-
- timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
- while (!(timedout = time_after(jiffies, timeout))) {
- retry++;
- if (wcd9xxx_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch indicates removal\n", __func__);
- break;
- }
-
- if (retry > 1)
- msleep(250);
- else
- msleep(50);
-
- if (wcd9xxx_swch_level_remove(mbhc)) {
- pr_debug("%s: Switch indicates removal\n", __func__);
- break;
- }
-
- if (cs_enable) {
- for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
- mb_v[i] = __wcd9xxx_codec_sta_dce(mbhc, 1,
- true, true);
- mic_mv[i] = __wcd9xxx_codec_sta_dce_v(mbhc,
- true,
- mb_v[i],
- mbhc->mbhc_data.dce_nsc_cs_z,
- (u32)VDDIO_MICBIAS_MV);
- pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
- __func__, retry, mic_mv[i], mb_v[i]);
- }
- } else {
- for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) {
- mb_v[i] = wcd9xxx_codec_sta_dce(mbhc, 1,
- true);
- mic_mv[i] = wcd9xxx_codec_sta_dce_v(mbhc, 1,
- mb_v[i]);
- pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n",
- __func__, retry, mic_mv[i],
- mb_v[i]);
- }
- }
-
- if (wcd9xxx_swch_level_remove(mbhc)) {
- pr_debug("%s: Switcn indicates removal\n", __func__);
- break;
- }
-
- if (mbhc->current_plug == PLUG_TYPE_NONE) {
- pr_debug("%s : headset/headphone is removed\n",
- __func__);
- break;
- }
-
- for (i = 0; i < NUM_DCE_PLUG_DETECT; i++)
- if (!is_valid_mic_voltage(mbhc, mic_mv[i], cs_enable))
- break;
-
- if (i == NUM_DCE_PLUG_DETECT) {
- pr_debug("%s: MIC voltage settled\n", __func__);
- settled = true;
- msleep(200);
- break;
- }
- }
-
- if (cs_enable)
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- false, false);
-
- if (timedout)
- pr_debug("%s: Microphone did not settle in %d seconds\n",
- __func__, HS_DETECT_PLUG_TIME_MS);
- return settled;
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_remove_irq_swch(struct wcd9xxx_mbhc *mbhc)
-{
- pr_debug("%s: enter\n", __func__);
- if (wcd9xxx_hs_remove_settle(mbhc))
- wcd9xxx_start_hs_polling(mbhc);
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_remove_irq_noswch(struct wcd9xxx_mbhc *mbhc)
-{
- s16 dce, dcez;
- unsigned long timeout;
- bool removed = true;
- struct snd_soc_codec *codec = mbhc->codec;
- const struct wcd9xxx_mbhc_general_cfg *generic =
- WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
- bool cs_enable;
- s16 cur_v_ins_h;
- u32 mb_mv;
-
- pr_debug("%s: enter\n", __func__);
- if (mbhc->current_plug != PLUG_TYPE_HEADSET &&
- mbhc->current_plug != PLUG_TYPE_ANC_HEADPHONE) {
- pr_debug("%s(): Headset is not inserted, ignore removal\n",
- __func__);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x08, 0x08);
- return;
- }
-
- usleep_range(generic->t_shutdown_plug_rem,
- generic->t_shutdown_plug_rem +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- /* If micbias is enabled, don't enable current source */
- cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
- (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) &&
- (!(snd_soc_read(codec,
- mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
- if (cs_enable)
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- true, false);
-
- timeout = jiffies + msecs_to_jiffies(FAKE_REMOVAL_MIN_PERIOD_MS);
- do {
- if (cs_enable) {
- dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true);
- dcez = mbhc->mbhc_data.dce_nsc_cs_z;
- mb_mv = VDDIO_MICBIAS_MV;
- } else {
- dce = wcd9xxx_codec_sta_dce(mbhc, 1, true);
- dcez = mbhc->mbhc_data.dce_z;
- mb_mv = mbhc->mbhc_data.micb_mv;
- }
-
- pr_debug("%s: DCE 0x%x,%d\n", __func__, dce,
- __wcd9xxx_codec_sta_dce_v(mbhc, true, dce,
- dcez, mb_mv));
-
- cur_v_ins_h = cs_enable ? (s16) mbhc->mbhc_data.v_cs_ins_h :
- (wcd9xxx_get_current_v(mbhc,
- WCD9XXX_CURRENT_V_INS_H));
-
- if (dce < cur_v_ins_h) {
- removed = false;
- break;
- }
- } while (!time_after(jiffies, timeout));
- pr_debug("%s: headset %sactually removed\n", __func__,
- removed ? "" : "not ");
-
- if (cs_enable)
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- false, false);
-
- if (removed) {
- if (mbhc->mbhc_cfg->detect_extn_cable) {
- if (!wcd9xxx_swch_level_remove(mbhc)) {
- /*
- * extension cable is still plugged in
- * report it as LINEOUT device
- */
- if (mbhc->hph_status == SND_JACK_HEADSET)
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc,
- false);
- wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT);
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_enable_hs_detect(mbhc, 1,
- MBHC_USE_MB_TRIGGER,
- false);
- }
- } else {
- /* Cancel possibly running hs_detect_work */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_noswch);
- /*
- * If this removal is not false, first check the micbias
- * switch status and switch it to LDOH if it is already
- * switched to VDDIO.
- */
- wcd9xxx_switch_micbias(mbhc, 0);
-
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER |
- MBHC_USE_HPHL_TRIGGER,
- true);
- }
- } else {
- wcd9xxx_start_hs_polling(mbhc);
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-/* called only from interrupt which is under codec_resource_lock acquisition */
-static void wcd9xxx_hs_insert_irq_extn(struct wcd9xxx_mbhc *mbhc,
- bool is_mb_trigger)
-{
- /* Cancel possibly running hs_detect_work */
- wcd9xxx_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch);
-
- if (is_mb_trigger) {
- pr_debug("%s: Waiting for Headphone left trigger\n", __func__);
- wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_HPHL_TRIGGER, false);
- } else {
- pr_debug("%s: HPHL trigger received, detecting plug type\n",
- __func__);
- wcd9xxx_mbhc_detect_plug_type(mbhc);
- }
-}
-
-static irqreturn_t wcd9xxx_hs_remove_irq(int irq, void *data)
-{
- struct wcd9xxx_mbhc *mbhc = data;
-
- pr_debug("%s: enter, removal interrupt\n", __func__);
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- /*
- * While we don't know whether MIC is there or not, let the resmgr know
- * so micbias can be disabled temporarily
- */
- if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH_MIC, false);
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH, false);
- } else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH, false);
- }
-
- if (mbhc->mbhc_cfg->detect_extn_cable &&
- !wcd9xxx_swch_level_remove(mbhc))
- wcd9xxx_hs_remove_irq_noswch(mbhc);
- else
- wcd9xxx_hs_remove_irq_swch(mbhc);
-
- if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH, true);
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH_MIC, true);
- } else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
- wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr,
- WCD9XXX_COND_HPH, true);
- }
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hs_insert_irq(int irq, void *data)
-{
- bool is_mb_trigger, is_removal;
- struct wcd9xxx_mbhc *mbhc = data;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- wcd9xxx_disable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion);
-
- is_mb_trigger = !!(snd_soc_read(codec, mbhc->mbhc_bias_regs.mbhc_reg) &
- 0x10);
- is_removal = !!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_INT_CTL) & 0x02);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x03, 0x00);
-
- /* Turn off both HPH and MIC line schmitt triggers */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
-
- if (mbhc->mbhc_cfg->detect_extn_cable &&
- mbhc->current_plug == PLUG_TYPE_HIGH_HPH)
- wcd9xxx_hs_insert_irq_extn(mbhc, is_mb_trigger);
- else
- wcd9xxx_hs_insert_irq_swch(mbhc, is_removal);
-
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- return IRQ_HANDLED;
-}
-
-static void wcd9xxx_btn_lpress_fn(struct work_struct *work)
-{
- struct delayed_work *dwork;
- short bias_value;
- int dce_mv, sta_mv;
- struct wcd9xxx_mbhc *mbhc;
-
- pr_debug("%s:\n", __func__);
-
- dwork = to_delayed_work(work);
- mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_btn_dwork);
-
- bias_value = wcd9xxx_read_sta_result(mbhc->codec);
- sta_mv = wcd9xxx_codec_sta_dce_v(mbhc, 0, bias_value);
-
- bias_value = wcd9xxx_read_dce_result(mbhc->codec);
- dce_mv = wcd9xxx_codec_sta_dce_v(mbhc, 1, bias_value);
- pr_debug("%s: STA: %d, DCE: %d\n", __func__, sta_mv, dce_mv);
-
- pr_debug("%s: Reporting long button press event\n", __func__);
- wcd9xxx_jack_report(mbhc, &mbhc->button_jack, mbhc->buttons_pressed,
- mbhc->buttons_pressed);
-
- pr_debug("%s: leave\n", __func__);
- wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-}
-
-static void wcd9xxx_mbhc_insert_work(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wcd9xxx_mbhc *mbhc;
- struct snd_soc_codec *codec;
- struct wcd9xxx_core_resource *core_res;
-
- dwork = to_delayed_work(work);
- mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_insert_dwork);
- codec = mbhc->codec;
- core_res = mbhc->resmgr->core_res;
-
- pr_debug("%s:\n", __func__);
-
- /* Turn off both HPH and MIC line schmitt triggers */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
- wcd9xxx_disable_irq_sync(core_res, mbhc->intr_ids->insertion);
- wcd9xxx_mbhc_detect_plug_type(mbhc);
- wcd9xxx_unlock_sleep(core_res);
-}
-
-static bool wcd9xxx_mbhc_fw_validate(const void *data, size_t size)
-{
- u32 cfg_offset;
- struct wcd9xxx_mbhc_imped_detect_cfg *imped_cfg;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg;
- struct firmware_cal fw;
-
- fw.data = (void *)data;
- fw.size = size;
-
- if (fw.size < WCD9XXX_MBHC_CAL_MIN_SIZE)
- return false;
-
- /*
- * Previous check guarantees that there is enough fw data up
- * to num_btn
- */
- btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw.data);
- cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data);
- if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg)))
- return false;
-
- /*
- * Previous check guarantees that there is enough fw data up
- * to start of impedance detection configuration
- */
- imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw.data);
- cfg_offset = (u32) ((void *) imped_cfg - (void *) fw.data);
-
- if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ))
- return false;
-
- if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg)))
- return false;
-
- return true;
-}
-
-static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc,
- enum meas_type dce, s16 vin_mv,
- bool cs_enable)
-{
- s16 diff, zero;
- u32 mb_mv, in;
- u16 value;
- s16 dce_z;
-
- mb_mv = mbhc->mbhc_data.micb_mv;
- dce_z = mbhc->mbhc_data.dce_z;
-
- if (mb_mv == 0) {
- pr_err("%s: Mic Bias voltage is set to zero\n", __func__);
- return -EINVAL;
- }
- if (cs_enable) {
- mb_mv = VDDIO_MICBIAS_MV;
- dce_z = mbhc->mbhc_data.dce_nsc_cs_z;
- }
-
- if (dce) {
- diff = (mbhc->mbhc_data.dce_mb) - (dce_z);
- zero = (dce_z);
- } else {
- diff = (mbhc->mbhc_data.sta_mb) - (mbhc->mbhc_data.sta_z);
- zero = (mbhc->mbhc_data.sta_z);
- }
- in = (u32) diff * vin_mv;
-
- value = (u16) (in / mb_mv) + zero;
- return value;
-}
-
-static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec;
- s16 adj_v_hs_max;
- s16 btn_mv = 0, btn_mv_sta[MBHC_V_IDX_NUM], btn_mv_dce[MBHC_V_IDX_NUM];
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- struct wcd9xxx_mbhc_plug_type_cfg *plug_type;
- u16 *btn_high;
- int i;
-
- pr_debug("%s: enter\n", __func__);
- codec = mbhc->codec;
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
- plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
-
- mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_CFILT] =
- wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_hs_max, false);
- mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_CFILT] =
- wcd9xxx_codec_v_sta_dce(mbhc, DCE, plug_type->v_hs_max, false);
-
- mbhc->mbhc_data.v_inval_ins_low = FAKE_INS_LOW;
- mbhc->mbhc_data.v_inval_ins_high = FAKE_INS_HIGH;
-
- if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) {
- adj_v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max,
- true);
- mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_VDDIO] =
- wcd9xxx_codec_v_sta_dce(mbhc, STA, adj_v_hs_max, false);
- mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_VDDIO] =
- wcd9xxx_codec_v_sta_dce(mbhc, DCE, adj_v_hs_max, false);
- mbhc->mbhc_data.v_inval_ins_low =
- scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_low,
- false);
- mbhc->mbhc_data.v_inval_ins_high =
- scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_high,
- false);
- }
- mbhc->mbhc_data.v_cs_ins_h = wcd9xxx_codec_v_sta_dce(mbhc, DCE,
- WCD9XXX_V_CS_HS_MAX,
- true);
- pr_debug("%s: v_ins_h for current source: 0x%x\n", __func__,
- mbhc->mbhc_data.v_cs_ins_h);
-
- btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
- MBHC_BTN_DET_V_BTN_HIGH);
- for (i = 0; i < btn_det->num_btn; i++)
- btn_mv = btn_high[i] > btn_mv ? btn_high[i] : btn_mv;
-
- btn_mv_sta[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_sta;
- btn_mv_dce[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_cic;
- btn_mv_sta[MBHC_V_IDX_VDDIO] =
- scale_v_micb_vddio(mbhc, btn_mv_sta[MBHC_V_IDX_CFILT], true);
- btn_mv_dce[MBHC_V_IDX_VDDIO] =
- scale_v_micb_vddio(mbhc, btn_mv_dce[MBHC_V_IDX_CFILT], true);
-
- mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_CFILT] =
- wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_CFILT],
- false);
- mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT] =
- wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_CFILT],
- false);
- mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_VDDIO] =
- wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_VDDIO],
- false);
- mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO] =
- wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_VDDIO],
- false);
-
- mbhc->mbhc_data.v_brh[MBHC_V_IDX_CFILT] =
- mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT];
- mbhc->mbhc_data.v_brh[MBHC_V_IDX_VDDIO] =
- mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO];
-
- mbhc->mbhc_data.v_brl = BUTTON_MIN;
-
- mbhc->mbhc_data.v_no_mic =
- wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_no_mic, false);
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_onoff_ext_mclk(struct wcd9xxx_mbhc *mbhc, bool on)
-{
- /*
- * XXX: {codec}_mclk_enable holds WCD9XXX_BCL_LOCK,
- * therefore wcd9xxx_onoff_ext_mclk caller SHOULDN'T hold
- * WCD9XXX_BCL_LOCK when it calls wcd9xxx_onoff_ext_mclk()
- */
- if (mbhc && mbhc->mbhc_cfg && mbhc->mbhc_cfg->mclk_cb_fn)
- mbhc->mbhc_cfg->mclk_cb_fn(mbhc->codec, on, false);
-}
-
-/*
- * Mic Bias Enable Decision
- * Return true if high_hph_cnt is a power of 2 (!= 2)
- * otherwise return false
- */
-static bool wcd9xxx_mbhc_enable_mb_decision(int high_hph_cnt)
-{
- return (high_hph_cnt > 2) && !(high_hph_cnt & (high_hph_cnt - 1));
-}
-
-static inline void wcd9xxx_handle_gnd_mic_swap(struct wcd9xxx_mbhc *mbhc,
- int pt_gnd_mic_swap_cnt,
- enum wcd9xxx_mbhc_plug_type plug_type)
-{
- if (mbhc->mbhc_cfg->swap_gnd_mic &&
- (pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD)) {
- /*
- * if switch is toggled, check again,
- * otherwise report unsupported plug
- */
- mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec);
- } else if (pt_gnd_mic_swap_cnt >= GND_MIC_SWAP_THRESHOLD) {
- /* Report UNSUPPORTED plug
- * and continue polling
- */
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- if (!mbhc->mbhc_cfg->detect_extn_cable) {
- if (mbhc->current_plug == PLUG_TYPE_HEADPHONE)
- wcd9xxx_report_plug(mbhc, 0,
- SND_JACK_HEADPHONE);
- else if (mbhc->current_plug == PLUG_TYPE_HEADSET)
- wcd9xxx_report_plug(mbhc, 0,
- SND_JACK_HEADSET);
- }
- if (mbhc->current_plug != plug_type)
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_UNSUPPORTED);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- }
-}
-
-static void wcd9xxx_correct_swch_plug(struct work_struct *work)
-{
- struct wcd9xxx_mbhc *mbhc;
- struct snd_soc_codec *codec;
- enum wcd9xxx_mbhc_plug_type plug_type = PLUG_TYPE_INVALID;
- unsigned long timeout;
- int retry = 0, pt_gnd_mic_swap_cnt = 0;
- int highhph_cnt = 0;
- bool correction = false;
- bool current_source_enable;
- bool wrk_complete = true, highhph = false;
-
- pr_debug("%s: enter\n", __func__);
-
- mbhc = container_of(work, struct wcd9xxx_mbhc, correct_plug_swch);
- codec = mbhc->codec;
-
- current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags &
- (1 << MBHC_CS_ENABLE_POLLING)) != 0) &&
- (!(snd_soc_read(codec,
- mbhc->mbhc_bias_regs.ctl_reg) & 0x80)));
-
- wcd9xxx_onoff_ext_mclk(mbhc, true);
-
- /*
- * Keep override on during entire plug type correction work.
- *
- * This is okay under the assumption that any switch irqs which use
- * MBHC block cancel and sync this work so override is off again
- * prior to switch interrupt handler's MBHC block usage.
- * Also while this correction work is running, we can guarantee
- * DAPM doesn't use any MBHC block as this work only runs with
- * headphone detection.
- */
- if (current_source_enable) {
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- true, false);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- } else {
- wcd9xxx_turn_onoff_override(mbhc, true);
- }
-
- timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS);
- while (!time_after(jiffies, timeout)) {
- ++retry;
-
- /* Make sure any pending memory read is completed, before
- * hs_detect_work_stop value is read.
- */
- rmb();
- if (mbhc->hs_detect_work_stop) {
- wrk_complete = false;
- pr_debug("%s: stop requested\n", __func__);
- break;
- }
-
- msleep(HS_DETECT_PLUG_INERVAL_MS);
- if (wcd9xxx_swch_level_remove(mbhc)) {
- wrk_complete = false;
- pr_debug("%s: Switch level is low\n", __func__);
- break;
- }
-
- /* can race with removal interrupt */
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- if (current_source_enable)
- plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc,
- highhph);
- else
- plug_type = wcd9xxx_codec_get_plug_type(mbhc, true);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
-
- pr_debug("%s: attempt(%d) current_plug(%d) new_plug(%d)\n",
- __func__, retry, mbhc->current_plug, plug_type);
-
- highhph_cnt = (plug_type == PLUG_TYPE_HIGH_HPH) ?
- (highhph_cnt + 1) :
- 0;
- highhph = wcd9xxx_mbhc_enable_mb_decision(highhph_cnt);
- if (plug_type == PLUG_TYPE_INVALID) {
- pr_debug("Invalid plug in attempt # %d\n", retry);
- if (!mbhc->mbhc_cfg->detect_extn_cable &&
- retry == NUM_ATTEMPTS_TO_REPORT &&
- mbhc->current_plug == PLUG_TYPE_NONE) {
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_HEADPHONE);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- }
- } else if (plug_type == PLUG_TYPE_HEADPHONE) {
- pr_debug("Good headphone detected, continue polling\n");
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- if (mbhc->mbhc_cfg->detect_extn_cable) {
- if (mbhc->current_plug != plug_type)
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_HEADPHONE);
- } else if (mbhc->current_plug == PLUG_TYPE_NONE) {
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_HEADPHONE);
- }
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- } else if (plug_type == PLUG_TYPE_HIGH_HPH) {
- pr_debug("%s: High HPH detected, continue polling\n",
- __func__);
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- if (mbhc->mbhc_cfg->detect_extn_cable) {
- if (mbhc->current_plug != plug_type)
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_LINEOUT);
- } else if (mbhc->current_plug == PLUG_TYPE_NONE) {
- wcd9xxx_report_plug(mbhc, 1,
- SND_JACK_HEADPHONE);
- }
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- } else {
- if (plug_type == PLUG_TYPE_GND_MIC_SWAP) {
- pt_gnd_mic_swap_cnt++;
- if (pt_gnd_mic_swap_cnt >=
- GND_MIC_SWAP_THRESHOLD)
- wcd9xxx_handle_gnd_mic_swap(mbhc,
- pt_gnd_mic_swap_cnt,
- plug_type);
- pr_debug("%s: unsupported HS detected, continue polling\n",
- __func__);
- continue;
- } else {
- pt_gnd_mic_swap_cnt = 0;
-
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- /* Turn off override/current source */
- if (current_source_enable)
- wcd9xxx_turn_onoff_current_source(mbhc,
- &mbhc->mbhc_bias_regs,
- false, false);
- else
- wcd9xxx_turn_onoff_override(mbhc,
- false);
- /*
- * The valid plug also includes
- * PLUG_TYPE_GND_MIC_SWAP
- */
- wcd9xxx_find_plug_and_report(mbhc, plug_type);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- pr_debug("Attempt %d found correct plug %d\n",
- retry,
- plug_type);
- correction = true;
- }
- break;
- }
- }
-
- highhph = false;
- if (wrk_complete && plug_type == PLUG_TYPE_HIGH_HPH) {
- pr_debug("%s: polling is done, still HPH, so enabling MIC trigger\n",
- __func__);
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- wcd9xxx_find_plug_and_report(mbhc, plug_type);
- highhph = true;
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- }
-
- if (plug_type == PLUG_TYPE_HEADPHONE) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl)
- mbhc->mbhc_cb->hph_auto_pulldown_ctrl(codec, true);
- }
-
- if (!correction && current_source_enable) {
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs,
- false, highhph);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- } else if (!correction) {
- wcd9xxx_turn_onoff_override(mbhc, false);
- }
-
- wcd9xxx_onoff_ext_mclk(mbhc, false);
-
- if (mbhc->mbhc_cfg->detect_extn_cable) {
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- if ((mbhc->current_plug == PLUG_TYPE_HEADPHONE &&
- wrk_complete) ||
- mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP ||
- mbhc->current_plug == PLUG_TYPE_INVALID ||
- (plug_type == PLUG_TYPE_INVALID && wrk_complete)) {
- /* Enable removal detection */
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_enable_hs_detect(mbhc, 0, 0, false);
- }
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- }
- pr_debug("%s: leave current_plug(%d)\n", __func__, mbhc->current_plug);
- /* unlock sleep */
- wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
-}
-
-static void wcd9xxx_swch_irq_handler(struct wcd9xxx_mbhc *mbhc)
-{
- bool insert;
- bool is_removed = false;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
-
- mbhc->in_swch_irq_handler = true;
- /* Wait here for debounce time */
- usleep_range(SWCH_IRQ_DEBOUNCE_TIME_US, SWCH_IRQ_DEBOUNCE_TIME_US +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
-
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
-
- /* cancel pending button press */
- if (wcd9xxx_cancel_btn_work(mbhc))
- pr_debug("%s: button press is canceled\n", __func__);
-
- insert = !wcd9xxx_swch_level_remove(mbhc);
- pr_debug("%s: Current plug type %d, insert %d\n", __func__,
- mbhc->current_plug, insert);
- if ((mbhc->current_plug == PLUG_TYPE_NONE) && insert) {
-
- mbhc->lpi_enabled = false;
-
- /* Make sure mbhc state update complete before cancel detect
- * plug.
- */
- wmb();
- /* cancel detect plug */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
-
- if ((mbhc->current_plug != PLUG_TYPE_NONE) &&
- (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) &&
- !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DETECT) &
- (1 << 1))) {
- pr_debug("%s: current plug: %d\n", __func__,
- mbhc->current_plug);
- goto exit;
- }
-
- /* Disable Mic Bias pull down and HPH Switch to GND */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01,
- 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x00);
- wcd9xxx_mbhc_detect_plug_type(mbhc);
- } else if ((mbhc->current_plug != PLUG_TYPE_NONE) && !insert) {
- mbhc->lpi_enabled = false;
-
- /* Make sure mbhc state update complete before cancel detect
- * plug.
- */
- wmb();
- /* cancel detect plug */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
-
- if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
- is_removed = true;
- } else if (mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP) {
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED);
- is_removed = true;
- } else if (mbhc->current_plug == PLUG_TYPE_HEADSET) {
- wcd9xxx_pause_hs_polling(mbhc);
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET);
- is_removed = true;
- } else if (mbhc->current_plug == PLUG_TYPE_HIGH_HPH) {
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_LINEOUT);
- is_removed = true;
- } else if (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE) {
- wcd9xxx_pause_hs_polling(mbhc);
- wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false);
- wcd9xxx_cleanup_hs_polling(mbhc);
- wcd9xxx_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE);
- is_removed = true;
- }
-
- if (is_removed) {
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x02, 0x00);
-
- /* Enable Mic Bias pull down and HPH Switch to GND */
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.ctl_reg, 0x01,
- 0x01);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01,
- 0x01);
- /* Make sure mic trigger is turned off */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
- 0x01, 0x01);
- snd_soc_update_bits(codec,
- mbhc->mbhc_bias_regs.mbhc_reg,
- 0x90, 0x00);
- /* Reset MBHC State Machine */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x08, 0x08);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL,
- 0x08, 0x00);
- /* Turn off override */
- wcd9xxx_turn_onoff_override(mbhc, false);
- }
- }
-exit:
- mbhc->in_swch_irq_handler = false;
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- pr_debug("%s: leave\n", __func__);
-}
-
-static irqreturn_t wcd9xxx_mech_plug_detect_irq(int irq, void *data)
-{
- int r = IRQ_HANDLED;
- struct wcd9xxx_mbhc *mbhc = data;
-
- pr_debug("%s: enter\n", __func__);
- if (unlikely(wcd9xxx_lock_sleep(mbhc->resmgr->core_res) == false)) {
- pr_warn("%s: failed to hold suspend\n", __func__);
- r = IRQ_NONE;
- } else {
- /* Call handler */
- wcd9xxx_swch_irq_handler(mbhc);
- wcd9xxx_unlock_sleep(mbhc->resmgr->core_res);
- }
-
- pr_debug("%s: leave %d\n", __func__, r);
- return r;
-}
-
-static int wcd9xxx_is_false_press(struct wcd9xxx_mbhc *mbhc)
-{
- s16 mb_v;
- int i = 0;
- int r = 0;
- const s16 v_ins_hu =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
- const s16 v_ins_h =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
- const s16 v_b1_hu =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
- const s16 v_b1_h =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
- const unsigned long timeout =
- jiffies + msecs_to_jiffies(BTN_RELEASE_DEBOUNCE_TIME_MS);
-
- while (time_before(jiffies, timeout)) {
- /*
- * This function needs to run measurements just few times during
- * release debounce time. Make 1ms interval to avoid
- * unnecessary excessive measurements.
- */
- usleep_range(1000, 1000 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- if (i == 0) {
- mb_v = wcd9xxx_codec_sta_dce(mbhc, 0, true);
- pr_debug("%s: STA[0]: %d,%d\n", __func__, mb_v,
- wcd9xxx_codec_sta_dce_v(mbhc, 0, mb_v));
- if (mb_v < v_b1_hu || mb_v > v_ins_hu) {
- r = 1;
- break;
- }
- } else {
- mb_v = wcd9xxx_codec_sta_dce(mbhc, 1, true);
- pr_debug("%s: DCE[%d]: %d,%d\n", __func__, i, mb_v,
- wcd9xxx_codec_sta_dce_v(mbhc, 1, mb_v));
- if (mb_v < v_b1_h || mb_v > v_ins_h) {
- r = 1;
- break;
- }
- }
- i++;
- }
-
- return r;
-}
-
-/* called under codec_resource_lock acquisition */
-static int wcd9xxx_determine_button(const struct wcd9xxx_mbhc *mbhc,
- const s32 micmv)
-{
- s16 *v_btn_low, *v_btn_high;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- int i, btn = -1;
-
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
- v_btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
- MBHC_BTN_DET_V_BTN_LOW);
- v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
- MBHC_BTN_DET_V_BTN_HIGH);
-
- for (i = 0; i < btn_det->num_btn; i++) {
- if ((v_btn_low[i] <= micmv) && (v_btn_high[i] >= micmv)) {
- btn = i;
- break;
- }
- }
-
- if (btn == -1)
- pr_debug("%s: couldn't find button number for mic mv %d\n",
- __func__, micmv);
-
- return btn;
-}
-
-static int wcd9xxx_get_button_mask(const int btn)
-{
- int mask = 0;
-
- switch (btn) {
- case 0:
- mask = SND_JACK_BTN_0;
- break;
- case 1:
- mask = SND_JACK_BTN_1;
- break;
- case 2:
- mask = SND_JACK_BTN_2;
- break;
- case 3:
- mask = SND_JACK_BTN_3;
- break;
- case 4:
- mask = SND_JACK_BTN_4;
- break;
- case 5:
- mask = SND_JACK_BTN_5;
- break;
- }
- return mask;
-}
-
-static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z,
- struct mbhc_micbias_regs *micb_regs,
- bool norel_detection)
-{
- s16 reg0, reg1;
- int change;
- struct snd_soc_codec *codec = mbhc->codec;
-
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
- /* Pull down micbias to ground and disconnect vddio switch */
- reg0 = snd_soc_read(codec, micb_regs->ctl_reg);
- snd_soc_update_bits(codec, micb_regs->ctl_reg, 0x81, 0x1);
- reg1 = snd_soc_read(codec, micb_regs->mbhc_reg);
- snd_soc_update_bits(codec, micb_regs->mbhc_reg, 1 << 7, 0);
-
- /* Disconnect override from micbias */
- change = snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
- 1 << 0);
- usleep_range(1000, 1000 + 1000);
- if (sta_z) {
- *sta_z = wcd9xxx_codec_sta_dce(mbhc, 0, norel_detection);
- pr_debug("%s: sta_z 0x%x\n", __func__, *sta_z & 0xFFFF);
- }
- if (dce_z) {
- *dce_z = wcd9xxx_codec_sta_dce(mbhc, 1, norel_detection);
- pr_debug("%s: dce_z 0x%x\n", __func__, *dce_z & 0xFFFF);
- }
-
- /* Connect override from micbias */
- if (change)
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4,
- 1 << 4);
- /* Disable pull down micbias to ground */
- snd_soc_write(codec, micb_regs->mbhc_reg, reg1);
- snd_soc_write(codec, micb_regs->ctl_reg, reg0);
-}
-
-/*
- * This function recalibrates dce_z and sta_z parameters.
- * No release detection will be false when this function is
- * used.
- */
-void wcd9xxx_update_z(struct wcd9xxx_mbhc *mbhc)
-{
- const u16 sta_z = mbhc->mbhc_data.sta_z;
- const u16 dce_z = mbhc->mbhc_data.dce_z;
-
- wcd9xxx_get_z(mbhc, &mbhc->mbhc_data.dce_z, &mbhc->mbhc_data.sta_z,
- &mbhc->mbhc_bias_regs, false);
- pr_debug("%s: sta_z 0x%x,dce_z 0x%x -> sta_z 0x%x,dce_z 0x%x\n",
- __func__, sta_z & 0xFFFF, dce_z & 0xFFFF,
- mbhc->mbhc_data.sta_z & 0xFFFF,
- mbhc->mbhc_data.dce_z & 0xFFFF);
-
- wcd9xxx_mbhc_calc_thres(mbhc);
- wcd9xxx_calibrate_hs_polling(mbhc);
-}
-
-/*
- * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold
- * to ceilmv + buffer
- */
-static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv,
- bool vddio)
-{
- u16 v_brh, v_b1_hu;
- int mv;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- void *calibration = mbhc->mbhc_cfg->calibration;
- struct snd_soc_codec *codec = mbhc->codec;
-
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
- mv = ceilmv + btn_det->v_btn_press_delta_cic;
- if (vddio)
- mv = scale_v_micb_vddio(mbhc, mv, true);
- pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv);
-
- if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) {
- /*
- * update LSB first so mbhc hardware block
- * doesn't see too low value.
- */
- v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv, false);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu &
- 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL,
- (v_b1_hu >> 8) & 0xFF);
- v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv, false);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh &
- 0xFF);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL,
- (v_brh >> 8) & 0xFF);
- }
- return 0;
-}
-
-irqreturn_t wcd9xxx_dce_handler(int irq, void *data)
-{
- int i, mask;
- bool vddio;
- u8 mbhc_status;
- s16 dce_z, sta_z;
- s32 stamv, stamv_s;
- s16 *v_btn_high;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- int btn = -1, meas = 0;
- struct wcd9xxx_mbhc *mbhc = data;
- const struct wcd9xxx_mbhc_btn_detect_cfg *d =
- WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
- short btnmeas[d->n_btn_meas + 1];
- short dce[d->n_btn_meas + 1], sta;
- s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1];
- struct snd_soc_codec *codec = mbhc->codec;
- struct wcd9xxx_core_resource *core_res = mbhc->resmgr->core_res;
- int n_btn_meas = d->n_btn_meas;
- void *calibration = mbhc->mbhc_cfg->calibration;
-
- pr_debug("%s: enter\n", __func__);
-
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- mutex_lock(&mbhc->mbhc_lock);
- mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E;
-
- if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
- pr_debug("%s: mbhc is being recovered, skip button press\n",
- __func__);
- goto done;
- }
-
- mbhc->mbhc_state = MBHC_STATE_POTENTIAL;
-
- if (!mbhc->polling_active) {
- pr_warn("%s: mbhc polling is not active, skip button press\n",
- __func__);
- goto done;
- }
-
- /* If switch nterrupt already kicked in, ignore button press */
- if (mbhc->in_swch_irq_handler) {
- pr_debug("%s: Swtich level changed, ignore button press\n",
- __func__);
- btn = -1;
- goto done;
- }
-
- /*
- * setup internal micbias if codec uses internal micbias for
- * headset detection
- */
- if (mbhc->mbhc_cfg->use_int_rbias) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias)
- mbhc->mbhc_cb->setup_int_rbias(codec, true);
- else
- pr_err("%s: internal bias requested but codec did not provide callback\n",
- __func__);
- }
-
-
- /* Measure scaled HW DCE */
- vddio = (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV &&
- mbhc->mbhc_micbias_switched);
-
- dce_z = mbhc->mbhc_data.dce_z;
- sta_z = mbhc->mbhc_data.sta_z;
-
- /* Measure scaled HW STA */
- dce[0] = wcd9xxx_read_dce_result(codec);
- sta = wcd9xxx_read_sta_result(codec);
- if (mbhc_status != STATUS_REL_DETECTION) {
- if (mbhc->mbhc_last_resume &&
- !time_after(jiffies, mbhc->mbhc_last_resume + HZ)) {
- pr_debug("%s: Button is released after resume\n",
- __func__);
- n_btn_meas = 0;
- } else {
- pr_debug("%s: Button is released without resume",
- __func__);
- if (mbhc->update_z) {
- wcd9xxx_update_z(mbhc);
- dce_z = mbhc->mbhc_data.dce_z;
- sta_z = mbhc->mbhc_data.sta_z;
- mbhc->update_z = true;
- }
- stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
- mbhc->mbhc_data.micb_mv);
- if (vddio)
- stamv_s = scale_v_micb_vddio(mbhc, stamv,
- false);
- else
- stamv_s = stamv;
- mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0],
- dce_z, mbhc->mbhc_data.micb_mv);
- mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0],
- false) : mv[0];
- btn = wcd9xxx_determine_button(mbhc, mv_s[0]);
- if (btn != wcd9xxx_determine_button(mbhc, stamv_s))
- btn = -1;
- goto done;
- }
- }
-
- for (meas = 1; ((d->n_btn_meas) && (meas < (d->n_btn_meas + 1)));
- meas++)
- dce[meas] = wcd9xxx_codec_sta_dce(mbhc, 1, false);
-
- if (mbhc->update_z) {
- wcd9xxx_update_z(mbhc);
- dce_z = mbhc->mbhc_data.dce_z;
- sta_z = mbhc->mbhc_data.sta_z;
- mbhc->update_z = true;
- }
-
- stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z,
- mbhc->mbhc_data.micb_mv);
- if (vddio)
- stamv_s = scale_v_micb_vddio(mbhc, stamv, false);
- else
- stamv_s = stamv;
- pr_debug("%s: Meas HW - STA 0x%x,%d,%d\n", __func__,
- sta & 0xFFFF, stamv, stamv_s);
-
- /* determine pressed button */
- mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0], dce_z,
- mbhc->mbhc_data.micb_mv);
- mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0], false) : mv[0];
- btnmeas[0] = wcd9xxx_determine_button(mbhc, mv_s[0]);
- pr_debug("%s: Meas HW - DCE 0x%x,%d,%d button %d\n", __func__,
- dce[0] & 0xFFFF, mv[0], mv_s[0], btnmeas[0]);
- if (n_btn_meas == 0)
- btn = btnmeas[0];
- for (meas = 1; (n_btn_meas && d->n_btn_meas &&
- (meas < (d->n_btn_meas + 1))); meas++) {
- mv[meas] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[meas], dce_z,
- mbhc->mbhc_data.micb_mv);
- mv_s[meas] = vddio ? scale_v_micb_vddio(mbhc, mv[meas], false) :
- mv[meas];
- btnmeas[meas] = wcd9xxx_determine_button(mbhc, mv_s[meas]);
- pr_debug("%s: Meas %d - DCE 0x%x,%d,%d button %d\n",
- __func__, meas, dce[meas] & 0xFFFF, mv[meas],
- mv_s[meas], btnmeas[meas]);
- /*
- * if large enough measurements are collected,
- * start to check if last all n_btn_con measurements were
- * in same button low/high range
- */
- if (meas + 1 >= d->n_btn_con) {
- for (i = 0; i < d->n_btn_con; i++)
- if ((btnmeas[meas] < 0) ||
- (btnmeas[meas] != btnmeas[meas - i]))
- break;
- if (i == d->n_btn_con) {
- /* button pressed */
- btn = btnmeas[meas];
- break;
- } else if ((n_btn_meas - meas) < (d->n_btn_con - 1)) {
- /*
- * if left measurements are less than n_btn_con,
- * it's impossible to find button number
- */
- break;
- }
- }
- }
-
- if (btn >= 0) {
- if (mbhc->in_swch_irq_handler) {
- pr_debug(
- "%s: Switch irq triggered, ignore button press\n",
- __func__);
- goto done;
- }
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
- v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det,
- MBHC_BTN_DET_V_BTN_HIGH);
- WARN_ON(btn >= btn_det->num_btn);
- /* reprogram release threshold to catch voltage ramp up early */
- wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn], vddio);
-
- mask = wcd9xxx_get_button_mask(btn);
- mbhc->buttons_pressed |= mask;
- wcd9xxx_lock_sleep(core_res);
- if (schedule_delayed_work(&mbhc->mbhc_btn_dwork,
- msecs_to_jiffies(400)) == 0) {
- WARN(1, "Button pressed twice without release event\n");
- wcd9xxx_unlock_sleep(core_res);
- }
- } else {
- pr_debug("%s: bogus button press, too short press?\n",
- __func__);
- }
-
- done:
- pr_debug("%s: leave\n", __func__);
- mutex_unlock(&mbhc->mbhc_lock);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_release_handler(int irq, void *data)
-{
- int ret;
- bool waitdebounce = true;
- struct wcd9xxx_mbhc *mbhc = data;
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_LOCK(mbhc->resmgr);
- mbhc->mbhc_state = MBHC_STATE_RELEASE;
-
- if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) {
- ret = wcd9xxx_cancel_btn_work(mbhc);
- if (ret == 0) {
- pr_debug("%s: Reporting long button release event\n",
- __func__);
- wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0,
- mbhc->buttons_pressed);
- } else {
- if (wcd9xxx_is_false_press(mbhc)) {
- pr_debug("%s: Fake button press interrupt\n",
- __func__);
- } else {
- if (mbhc->in_swch_irq_handler) {
- pr_debug("%s: Switch irq kicked in, ignore\n",
- __func__);
- } else {
- pr_debug("%s: Reporting btn press\n",
- __func__);
- wcd9xxx_jack_report(mbhc,
- &mbhc->button_jack,
- mbhc->buttons_pressed,
- mbhc->buttons_pressed);
- pr_debug("%s: Reporting btn release\n",
- __func__);
- wcd9xxx_jack_report(mbhc,
- &mbhc->button_jack,
- 0, mbhc->buttons_pressed);
- waitdebounce = false;
- }
- }
- }
-
- mbhc->buttons_pressed &= ~WCD9XXX_JACK_BUTTON_MASK;
- }
-
- wcd9xxx_calibrate_hs_polling(mbhc);
-
- if (waitdebounce)
- msleep(SWCH_REL_DEBOUNCE_TIME_MS);
- wcd9xxx_start_hs_polling(mbhc);
-
- pr_debug("%s: leave\n", __func__);
- WCD9XXX_BCL_UNLOCK(mbhc->resmgr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hphl_ocp_irq(int irq, void *data)
-{
- struct wcd9xxx_mbhc *mbhc = data;
- struct snd_soc_codec *codec;
-
- pr_info("%s: received HPHL OCP irq\n", __func__);
-
- if (mbhc) {
- codec = mbhc->codec;
- if ((mbhc->hphlocp_cnt < OCP_ATTEMPT) &&
- (!mbhc->hphrocp_cnt)) {
- pr_info("%s: retry\n", __func__);
- mbhc->hphlocp_cnt++;
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
- 0x10, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL,
- 0x10, 0x10);
- } else {
- wcd9xxx_disable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->hph_left_ocp);
- mbhc->hph_status |= SND_JACK_OC_HPHL;
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
- mbhc->hph_status,
- WCD9XXX_JACK_MASK);
- }
- } else {
- pr_err("%s: Bad wcd9xxx private data\n", __func__);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t wcd9xxx_hphr_ocp_irq(int irq, void *data)
-{
- struct wcd9xxx_mbhc *mbhc = data;
- struct snd_soc_codec *codec;
-
- pr_info("%s: received HPHR OCP irq\n", __func__);
- codec = mbhc->codec;
- if ((mbhc->hphrocp_cnt < OCP_ATTEMPT) &&
- (!mbhc->hphlocp_cnt)) {
- pr_info("%s: retry\n", __func__);
- mbhc->hphrocp_cnt++;
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
- 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
- 0x10);
- } else {
- wcd9xxx_disable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->hph_right_ocp);
- mbhc->hph_status |= SND_JACK_OC_HPHR;
- wcd9xxx_jack_report(mbhc, &mbhc->headset_jack,
- mbhc->hph_status, WCD9XXX_JACK_MASK);
- }
-
- return IRQ_HANDLED;
-}
-
-static int wcd9xxx_acdb_mclk_index(const int rate)
-{
- if (rate == MCLK_RATE_12288KHZ)
- return 0;
- else if (rate == MCLK_RATE_9600KHZ)
- return 1;
- else {
- BUG_ON(1);
- return -EINVAL;
- }
-}
-
-static void wcd9xxx_update_mbhc_clk_rate(struct wcd9xxx_mbhc *mbhc, u32 rate)
-{
- u32 dce_wait, sta_wait;
- u8 ncic, nmeas, navg;
- void *calibration;
- u8 *n_cic, *n_ready;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- u8 npoll = 4, nbounce_wait = 30;
- struct snd_soc_codec *codec = mbhc->codec;
- int idx = wcd9xxx_acdb_mclk_index(rate);
- int idxmclk = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
-
- pr_debug("%s: Updating clock rate dependents, rate = %u\n", __func__,
- rate);
- calibration = mbhc->mbhc_cfg->calibration;
-
- /*
- * First compute the DCE / STA wait times depending on tunable
- * parameters. The value is computed in microseconds
- */
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration);
- n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_READY);
- n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_CIC);
- nmeas = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration)->n_meas;
- navg = WCD9XXX_MBHC_CAL_GENERAL_PTR(calibration)->mbhc_navg;
-
- /* ncic stays with the same what we had during calibration */
- ncic = n_cic[idxmclk];
- dce_wait = (1000 * 512 * ncic * (nmeas + 1)) / (rate / 1000);
- sta_wait = (1000 * 128 * (navg + 1)) / (rate / 1000);
- mbhc->mbhc_data.t_dce = dce_wait;
- /* give extra margin to sta for safety */
- mbhc->mbhc_data.t_sta = sta_wait + 250;
- mbhc->mbhc_data.t_sta_dce = ((1000 * 256) / (rate / 1000) *
- n_ready[idx]) + 10;
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL, n_ready[idx]);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL, ncic);
-
- if (rate == MCLK_RATE_12288KHZ) {
- npoll = 4;
- nbounce_wait = 30;
- } else if (rate == MCLK_RATE_9600KHZ) {
- npoll = 3;
- nbounce_wait = 23;
- }
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL, npoll);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL, nbounce_wait);
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_mbhc_cal(struct wcd9xxx_mbhc *mbhc)
-{
- u8 cfilt_mode;
- u16 reg0, reg1, reg2;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
- wcd9xxx_disable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->dce_est_complete);
- wcd9xxx_turn_onoff_rel_detection(codec, false);
-
- /* t_dce and t_sta are updated by wcd9xxx_update_mbhc_clk_rate() */
- WARN_ON(!mbhc->mbhc_data.t_dce);
- WARN_ON(!mbhc->mbhc_data.t_sta);
-
- /*
- * LDOH and CFILT are already configured during pdata handling.
- * Only need to make sure CFILT and bandgap are in Fast mode.
- * Need to restore defaults once calculation is done.
- *
- * In case when Micbias is powered by external source, request
- * turn on the external voltage source for Calibration.
- */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
- mbhc->mbhc_cb->enable_mb_source(codec, true, false);
-
- cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
- mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
- else
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
- 0x40, 0x00);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
- mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, false);
-
- /*
- * Micbias, CFILT, LDOH, MBHC MUX mode settings
- * to perform ADC calibration
- */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->select_cfilt)
- mbhc->mbhc_cb->select_cfilt(codec, mbhc);
- else
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x60,
- mbhc->mbhc_cfg->micbias << 5);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, 0x60, 0x60);
- snd_soc_write(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x78);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_specific_cal)
- mbhc->mbhc_cb->codec_specific_cal(codec, mbhc);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL,
- 0x04, 0x04);
-
- /* Pull down micbias to ground */
- reg0 = snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 1, 1);
- /* Disconnect override from micbias */
- reg1 = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, 1 << 0);
- /* Connect the MUX to micbias */
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- /*
- * Hardware that has external cap can delay mic bias ramping down up
- * to 50ms.
- */
- msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
- /* DCE measurement for 0 voltage */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- mbhc->mbhc_data.dce_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true, false);
-
- /* compute dce_z for current source */
- reg2 = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
- WCD9XXX_MBHC_NSC_CS << 3);
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- mbhc->mbhc_data.dce_nsc_cs_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true,
- false);
- pr_debug("%s: dce_z with nsc cs: 0x%x\n", __func__,
- mbhc->mbhc_data.dce_nsc_cs_z);
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg2);
-
- /* STA measurement for 0 voltage */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- mbhc->mbhc_data.sta_z = __wcd9xxx_codec_sta_dce(mbhc, 0, true, false);
-
- /* Restore registers */
- snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0);
- snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, reg1);
-
- /* DCE measurment for MB voltage */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- /*
- * Hardware that has external cap can delay mic bias ramping down up
- * to 50ms.
- */
- msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x04);
- usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- mbhc->mbhc_data.dce_mb = wcd9xxx_read_dce_result(codec);
-
- /* STA Measurement for MB Voltage */
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- /*
- * Hardware that has external cap can delay mic bias ramping down up
- * to 50ms.
- */
- msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02);
- usleep_range(mbhc->mbhc_data.t_sta, mbhc->mbhc_data.t_sta +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- mbhc->mbhc_data.sta_mb = wcd9xxx_read_sta_result(codec);
-
- /* Restore default settings. */
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x00);
- snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode);
- snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04);
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- usleep_range(100, 110);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source)
- mbhc->mbhc_cb->enable_mb_source(codec, false, false);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl)
- mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, true);
-
- wcd9xxx_enable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->dce_est_complete);
- wcd9xxx_turn_onoff_rel_detection(codec, true);
-
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_mbhc_setup(struct wcd9xxx_mbhc *mbhc)
-{
- int n;
- u8 *gain;
- struct wcd9xxx_mbhc_general_cfg *generic;
- struct wcd9xxx_mbhc_btn_detect_cfg *btn_det;
- struct snd_soc_codec *codec = mbhc->codec;
- const int idx = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate);
-
- pr_debug("%s: enter\n", __func__);
- generic = WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration);
- btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration);
-
- for (n = 0; n < 8; n++) {
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_FIR_B1_CFG,
- 0x07, n);
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_FIR_B2_CFG,
- btn_det->c[n]);
- }
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x07,
- btn_det->nc);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x70,
- generic->mbhc_nsa << 4);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x0F,
- btn_det->n_meas);
-
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL,
- generic->mbhc_navg);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x80, 0x80);
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78,
- btn_det->mbhc_nsc << 3);
-
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
- mbhc->mbhc_cb->get_cdc_type() !=
- WCD9XXX_CDC_TYPE_HELICON) {
- if (mbhc->resmgr->reg_addr->micb_4_mbhc)
- snd_soc_update_bits(codec,
- mbhc->resmgr->reg_addr->micb_4_mbhc,
- 0x03, MBHC_MICBIAS2);
- }
-
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, 0x02);
-
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xF0, 0xF0);
-
- gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_GAIN);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x78,
- gain[idx] << 3);
- snd_soc_update_bits(codec, WCD9XXX_A_MICB_2_MBHC, 0x04, 0x04);
-
- pr_debug("%s: leave\n", __func__);
-}
-
-static int wcd9xxx_setup_jack_detect_irq(struct wcd9xxx_mbhc *mbhc)
-{
- int ret = 0;
- void *core_res = mbhc->resmgr->core_res;
-
- if (mbhc->mbhc_cfg->gpio) {
- ret = request_threaded_irq(mbhc->mbhc_cfg->gpio_irq, NULL,
- wcd9xxx_mech_plug_detect_irq,
- (IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING),
- "headset detect", mbhc);
- if (ret) {
- pr_err("%s: Failed to request gpio irq %d\n", __func__,
- mbhc->mbhc_cfg->gpio_irq);
- } else {
- ret = enable_irq_wake(mbhc->mbhc_cfg->gpio_irq);
- if (ret)
- pr_err("%s: Failed to enable wake up irq %d\n",
- __func__, mbhc->mbhc_cfg->gpio_irq);
- }
- } else if (mbhc->mbhc_cfg->insert_detect) {
- /* Enable HPHL_10K_SW */
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_RX_HPH_OCP_CTL,
- 1 << 1, 1 << 1);
-
- ret = wcd9xxx_request_irq(core_res,
- mbhc->intr_ids->hs_jack_switch,
- wcd9xxx_mech_plug_detect_irq,
- "Jack Detect",
- mbhc);
- if (ret)
- pr_err("%s: Failed to request insert detect irq %d\n",
- __func__, mbhc->intr_ids->hs_jack_switch);
- }
-
- return ret;
-}
-
-static int wcd9xxx_init_and_calibrate(struct wcd9xxx_mbhc *mbhc)
-{
- int ret = 0;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
-
- /* Enable MCLK during calibration */
- wcd9xxx_onoff_ext_mclk(mbhc, true);
- wcd9xxx_mbhc_setup(mbhc);
- wcd9xxx_mbhc_cal(mbhc);
- wcd9xxx_mbhc_calc_thres(mbhc);
- wcd9xxx_onoff_ext_mclk(mbhc, false);
- wcd9xxx_calibrate_hs_polling(mbhc);
-
- /* Enable Mic Bias pull down and HPH Switch to GND */
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x01);
- INIT_WORK(&mbhc->correct_plug_swch, wcd9xxx_correct_swch_plug);
-
- snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
- 0x10);
- wcd9xxx_enable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->hph_left_ocp);
- wcd9xxx_enable_irq(mbhc->resmgr->core_res,
- mbhc->intr_ids->hph_right_ocp);
-
- /* Initialize mechanical mbhc */
- ret = wcd9xxx_setup_jack_detect_irq(mbhc);
-
- if (!ret && mbhc->mbhc_cfg->gpio) {
- /* Requested with IRQF_DISABLED */
- enable_irq(mbhc->mbhc_cfg->gpio_irq);
-
- /* Bootup time detection */
- wcd9xxx_swch_irq_handler(mbhc);
- } else if (!ret && mbhc->mbhc_cfg->insert_detect) {
- pr_debug("%s: Setting up codec own insert detection\n",
- __func__);
- /* Setup for insertion detection */
- wcd9xxx_insert_detect_setup(mbhc, true);
- }
-
- pr_debug("%s: leave\n", __func__);
-
- return ret;
-}
-
-static void wcd9xxx_mbhc_fw_read(struct work_struct *work)
-{
- struct delayed_work *dwork;
- struct wcd9xxx_mbhc *mbhc;
- struct snd_soc_codec *codec;
- const struct firmware *fw;
- struct firmware_cal *fw_data = NULL;
- int ret = -1, retry = 0;
- bool use_default_cal = false;
-
- dwork = to_delayed_work(work);
- mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_firmware_dwork);
- codec = mbhc->codec;
-
- while (retry < FW_READ_ATTEMPTS) {
- retry++;
- pr_info("%s:Attempt %d to request MBHC firmware\n",
- __func__, retry);
- if (mbhc->mbhc_cb->get_hwdep_fw_cal)
- fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(codec,
- WCD9XXX_MBHC_CAL);
- if (!fw_data)
- ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin",
- codec->dev);
- /*
- * if request_firmware and hwdep cal both fail then
- * retry for few times before bailing out
- */
- if ((ret != 0) && !fw_data) {
- usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- } else {
- pr_info("%s: MBHC Firmware read successful\n",
- __func__);
- break;
- }
- }
- if (!fw_data)
- pr_info("%s: using request_firmware\n", __func__);
- else
- pr_info("%s: using hwdep cal\n", __func__);
- if (ret != 0 && !fw_data) {
- pr_err("%s: Cannot load MBHC firmware use default cal\n",
- __func__);
- use_default_cal = true;
- }
- if (!use_default_cal) {
- const void *data;
- size_t size;
-
- if (fw_data) {
- data = fw_data->data;
- size = fw_data->size;
- } else {
- data = fw->data;
- size = fw->size;
- }
- if (wcd9xxx_mbhc_fw_validate(data, size) == false) {
- pr_err("%s: Invalid MBHC cal data size use default cal\n",
- __func__);
- if (!fw_data)
- release_firmware(fw);
- } else {
- if (fw_data) {
- mbhc->mbhc_cfg->calibration =
- (void *)fw_data->data;
- mbhc->mbhc_cal = fw_data;
- } else {
- mbhc->mbhc_cfg->calibration =
- (void *)fw->data;
- mbhc->mbhc_fw = fw;
- }
- }
- }
-
- (void) wcd9xxx_init_and_calibrate(mbhc);
-}
-
-#ifdef CONFIG_DEBUG_FS
-ssize_t codec_mbhc_debug_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
-{
- const int size = 768;
- char buffer[size];
- int n = 0;
- struct wcd9xxx_mbhc *mbhc = file->private_data;
- const struct mbhc_internal_cal_data *p = &mbhc->mbhc_data;
- const s16 v_ins_hu =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU);
- const s16 v_ins_h =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H);
- const s16 v_b1_hu =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU);
- const s16 v_b1_h =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H);
- const s16 v_br_h =
- wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H);
-
- n = scnprintf(buffer, size - n, "dce_z = %x(%dmv)\n",
- p->dce_z, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_z));
- n += scnprintf(buffer + n, size - n, "dce_mb = %x(%dmv)\n",
- p->dce_mb, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_mb));
- n += scnprintf(buffer + n, size - n, "dce_nsc_cs_z = %x(%dmv)\n",
- p->dce_nsc_cs_z,
- __wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_nsc_cs_z,
- p->dce_nsc_cs_z,
- VDDIO_MICBIAS_MV));
- n += scnprintf(buffer + n, size - n, "sta_z = %x(%dmv)\n",
- p->sta_z, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_z));
- n += scnprintf(buffer + n, size - n, "sta_mb = %x(%dmv)\n",
- p->sta_mb, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_mb));
- n += scnprintf(buffer + n, size - n, "t_dce = %d\n", p->t_dce);
- n += scnprintf(buffer + n, size - n, "t_sta = %d\n", p->t_sta);
- n += scnprintf(buffer + n, size - n, "micb_mv = %dmv\n", p->micb_mv);
- n += scnprintf(buffer + n, size - n, "v_ins_hu = %x(%dmv)\n",
- v_ins_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_ins_hu));
- n += scnprintf(buffer + n, size - n, "v_ins_h = %x(%dmv)\n",
- v_ins_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_ins_h));
- n += scnprintf(buffer + n, size - n, "v_b1_hu = %x(%dmv)\n",
- v_b1_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_b1_hu));
- n += scnprintf(buffer + n, size - n, "v_b1_h = %x(%dmv)\n",
- v_b1_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_b1_h));
- n += scnprintf(buffer + n, size - n, "v_brh = %x(%dmv)\n",
- v_br_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_br_h));
- n += scnprintf(buffer + n, size - n, "v_brl = %x(%dmv)\n", p->v_brl,
- wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_brl));
- n += scnprintf(buffer + n, size - n, "v_no_mic = %x(%dmv)\n",
- p->v_no_mic,
- wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_no_mic));
- n += scnprintf(buffer + n, size - n, "v_inval_ins_low = %d\n",
- p->v_inval_ins_low);
- n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n",
- p->v_inval_ins_high);
- n += scnprintf(buffer + n, size - n, "Insert detect insert = %d\n",
- !wcd9xxx_swch_level_remove(mbhc));
- buffer[n] = 0;
-
- return simple_read_from_buffer(buf, count, pos, buffer, n);
-}
-
-static int codec_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t codec_debug_write(struct file *filp,
- const char __user *ubuf, size_t cnt,
- loff_t *ppos)
-{
- char lbuf[32];
- char *buf;
- int rc;
- struct wcd9xxx_mbhc *mbhc = filp->private_data;
-
- if (cnt > sizeof(lbuf) - 1)
- return -EINVAL;
-
- rc = copy_from_user(lbuf, ubuf, cnt);
- if (rc)
- return -EFAULT;
-
- lbuf[cnt] = '\0';
- buf = (char *)lbuf;
- mbhc->no_mic_headset_override = (*strsep(&buf, " ") == '0') ?
- false : true;
- return rc;
-}
-
-static const struct file_operations mbhc_trrs_debug_ops = {
- .open = codec_debug_open,
- .write = codec_debug_write,
-};
-
-static const struct file_operations mbhc_debug_ops = {
- .open = codec_debug_open,
- .read = codec_mbhc_debug_read,
-};
-
-static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
- mbhc->debugfs_poke =
- debugfs_create_file("TRRS", S_IFREG | 0444, NULL, mbhc,
- &mbhc_trrs_debug_ops);
- mbhc->debugfs_mbhc =
- debugfs_create_file("wcd9xxx_mbhc", S_IFREG | 0444,
- NULL, mbhc, &mbhc_debug_ops);
-}
-
-static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
- debugfs_remove(mbhc->debugfs_poke);
- debugfs_remove(mbhc->debugfs_mbhc);
-}
-#else
-static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-}
-
-static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc)
-{
-}
-#endif
-
-int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc)
-{
- enum snd_jack_types type = SND_JACK_BTN_0;
- int i, ret, result = 0;
- int *btn_key_code;
-
- btn_key_code = mbhc->mbhc_cfg->key_code;
-
- for (i = 0 ; i < 8 ; i++) {
- if (btn_key_code[i] != 0) {
- switch (i) {
- case 0:
- type = SND_JACK_BTN_0;
- break;
- case 1:
- type = SND_JACK_BTN_1;
- break;
- case 2:
- type = SND_JACK_BTN_2;
- break;
- case 3:
- type = SND_JACK_BTN_3;
- break;
- case 4:
- type = SND_JACK_BTN_4;
- break;
- case 5:
- type = SND_JACK_BTN_5;
- break;
- default:
- WARN_ONCE(1, "Wrong button number:%d\n", i);
- result = -1;
- break;
- }
- ret = snd_jack_set_key(mbhc->button_jack.jack,
- type,
- btn_key_code[i]);
- if (ret) {
- pr_err("%s: Failed to set code for %d\n",
- __func__, btn_key_code[i]);
- result = -1;
- }
- input_set_capability(
- mbhc->button_jack.jack->input_dev,
- EV_KEY, btn_key_code[i]);
- pr_debug("%s: set btn%d key code:%d\n", __func__,
- i, btn_key_code[i]);
- }
- }
- return result;
-}
-
-int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
- struct wcd9xxx_mbhc_config *mbhc_cfg)
-{
- int rc = 0;
- struct snd_soc_codec *codec = mbhc->codec;
-
- pr_debug("%s: enter\n", __func__);
-
- if (!codec) {
- pr_err("%s: no codec\n", __func__);
- return -EINVAL;
- }
-
- if (mbhc_cfg->mclk_rate != MCLK_RATE_12288KHZ &&
- mbhc_cfg->mclk_rate != MCLK_RATE_9600KHZ) {
- pr_err("Error: unsupported clock rate %d\n",
- mbhc_cfg->mclk_rate);
- return -EINVAL;
- }
-
- /* Save mbhc config */
- mbhc->mbhc_cfg = mbhc_cfg;
-
- /* Set btn key code */
- if (wcd9xxx_mbhc_set_keycode(mbhc))
- pr_err("Set btn key code error!!!\n");
-
- /* Get HW specific mbhc registers' address */
- wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_PRIMARY_MIC_MB);
-
- /* Get HW specific mbhc registers' address for anc */
- wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_ANC_MIC_MB);
-
- /* Put CFILT in fast mode by default */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode)
- mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc);
- else
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl,
- 0x40, WCD9XXX_CFILT_FAST_MODE);
-
- /*
- * setup internal micbias if codec uses internal micbias for
- * headset detection
- */
- if (mbhc->mbhc_cfg->use_int_rbias) {
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) {
- mbhc->mbhc_cb->setup_int_rbias(codec, true);
- } else {
- pr_info("%s: internal bias requested but codec did not provide callback\n",
- __func__);
- }
- }
-
- /*
- * If codec has specific clock gating for MBHC,
- * remove the clock gate
- */
- if (mbhc->mbhc_cb &&
- mbhc->mbhc_cb->enable_clock_gate)
- mbhc->mbhc_cb->enable_clock_gate(mbhc->codec, true);
-
- if (!mbhc->mbhc_cfg->read_fw_bin ||
- (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) ||
- (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) {
- rc = wcd9xxx_init_and_calibrate(mbhc);
- } else {
- if (!mbhc->mbhc_fw || !mbhc->mbhc_cal)
- schedule_delayed_work(&mbhc->mbhc_firmware_dwork,
- usecs_to_jiffies(FW_READ_TIMEOUT));
- else
- pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n",
- __func__, mbhc->mbhc_fw, mbhc->mbhc_cal);
- }
-
- pr_debug("%s: leave %d\n", __func__, rc);
- return rc;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_start);
-
-void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc)
-{
- if (mbhc->mbhc_fw || mbhc->mbhc_cal) {
- cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork);
- if (!mbhc->mbhc_cal)
- release_firmware(mbhc->mbhc_fw);
- mbhc->mbhc_fw = NULL;
- mbhc->mbhc_cal = NULL;
- }
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_stop);
-
-static enum wcd9xxx_micbias_num
-wcd9xxx_event_to_micbias(const enum wcd9xxx_notify_event event)
-{
- enum wcd9xxx_micbias_num ret;
-
- switch (event) {
- case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_1_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
- ret = MBHC_MICBIAS1;
- break;
- case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_2_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
- ret = MBHC_MICBIAS2;
- break;
- case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_3_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
- ret = MBHC_MICBIAS3;
- break;
- case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_4_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
- ret = MBHC_MICBIAS4;
- break;
- default:
- WARN_ONCE(1, "Cannot convert event %d to micbias\n", event);
- ret = MBHC_MICBIAS_INVALID;
- break;
- }
- return ret;
-}
-
-static int wcd9xxx_event_to_cfilt(const enum wcd9xxx_notify_event event)
-{
- int ret;
-
- switch (event) {
- case WCD9XXX_EVENT_PRE_CFILT_1_OFF:
- case WCD9XXX_EVENT_POST_CFILT_1_OFF:
- case WCD9XXX_EVENT_PRE_CFILT_1_ON:
- case WCD9XXX_EVENT_POST_CFILT_1_ON:
- ret = WCD9XXX_CFILT1_SEL;
- break;
- case WCD9XXX_EVENT_PRE_CFILT_2_OFF:
- case WCD9XXX_EVENT_POST_CFILT_2_OFF:
- case WCD9XXX_EVENT_PRE_CFILT_2_ON:
- case WCD9XXX_EVENT_POST_CFILT_2_ON:
- ret = WCD9XXX_CFILT2_SEL;
- break;
- case WCD9XXX_EVENT_PRE_CFILT_3_OFF:
- case WCD9XXX_EVENT_POST_CFILT_3_OFF:
- case WCD9XXX_EVENT_PRE_CFILT_3_ON:
- case WCD9XXX_EVENT_POST_CFILT_3_ON:
- ret = WCD9XXX_CFILT3_SEL;
- break;
- default:
- ret = -1;
- }
- return ret;
-}
-
-static int wcd9xxx_get_mbhc_cfilt_sel(struct wcd9xxx_mbhc *mbhc)
-{
- int cfilt;
- const struct wcd9xxx_micbias_setting *mb_pdata =
- mbhc->resmgr->micbias_pdata;
-
- switch (mbhc->mbhc_cfg->micbias) {
- case MBHC_MICBIAS1:
- cfilt = mb_pdata->bias1_cfilt_sel;
- break;
- case MBHC_MICBIAS2:
- cfilt = mb_pdata->bias2_cfilt_sel;
- break;
- case MBHC_MICBIAS3:
- cfilt = mb_pdata->bias3_cfilt_sel;
- break;
- case MBHC_MICBIAS4:
- cfilt = mb_pdata->bias4_cfilt_sel;
- break;
- default:
- cfilt = MBHC_MICBIAS_INVALID;
- break;
- }
- return cfilt;
-}
-
-static void wcd9xxx_enable_mbhc_txfe(struct wcd9xxx_mbhc *mbhc, bool on)
-{
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mbhc_txfe)
- mbhc->mbhc_cb->enable_mbhc_txfe(mbhc->codec, on);
- else
- snd_soc_update_bits(mbhc->codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL,
- 0x40, on ? 0x40 : 0x00);
-}
-
-static int wcd9xxx_event_notify(struct notifier_block *self, unsigned long val,
- void *data)
-{
- int ret = 0;
- struct wcd9xxx_mbhc *mbhc = ((struct wcd9xxx_resmgr *)data)->mbhc;
- struct snd_soc_codec *codec;
- enum wcd9xxx_notify_event event = (enum wcd9xxx_notify_event)val;
-
- pr_debug("%s: enter event %s(%d)\n", __func__,
- wcd9xxx_get_event_string(event), event);
-
- if (!mbhc || !mbhc->mbhc_cfg) {
- pr_debug("mbhc not initialized\n");
- return 0;
- }
- codec = mbhc->codec;
- mutex_lock(&mbhc->mbhc_lock);
- switch (event) {
- /* MICBIAS usage change */
- case WCD9XXX_EVENT_PRE_MICBIAS_1_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_2_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_3_ON:
- case WCD9XXX_EVENT_PRE_MICBIAS_4_ON:
- if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
- wcd9xxx_event_to_micbias(event)) {
- wcd9xxx_switch_micbias(mbhc, 0);
- /*
- * Enable MBHC TxFE whenever micbias is
- * turned ON and polling is active
- */
- if (mbhc->polling_active)
- wcd9xxx_enable_mbhc_txfe(mbhc, true);
- }
- break;
- case WCD9XXX_EVENT_POST_MICBIAS_1_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_2_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_3_ON:
- case WCD9XXX_EVENT_POST_MICBIAS_4_ON:
- if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
- wcd9xxx_event_to_micbias(event) &&
- wcd9xxx_mbhc_polling(mbhc)) {
- /* if polling is on, restart it */
- wcd9xxx_pause_hs_polling(mbhc);
- wcd9xxx_start_hs_polling(mbhc);
- }
- break;
- case WCD9XXX_EVENT_POST_MICBIAS_1_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_2_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_3_OFF:
- case WCD9XXX_EVENT_POST_MICBIAS_4_OFF:
- if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias ==
- wcd9xxx_event_to_micbias(event)) {
- if (mbhc->event_state &
- (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR))
- wcd9xxx_switch_micbias(mbhc, 1);
- /*
- * Disable MBHC TxFE, in case it was enabled earlier
- * when micbias was enabled and polling is not active.
- */
- if (!mbhc->polling_active)
- wcd9xxx_enable_mbhc_txfe(mbhc, false);
- }
- if (mbhc->micbias_enable && mbhc->polling_active &&
- !(snd_soc_read(mbhc->codec, mbhc->mbhc_bias_regs.ctl_reg)
- & 0x80)) {
- pr_debug("%s:Micbias turned off by recording, set up again",
- __func__);
- snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg,
- 0x80, 0x80);
- }
- break;
- /* PA usage change */
- case WCD9XXX_EVENT_PRE_HPHL_PA_ON:
- set_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
- if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg) & 0x80))
- /* if micbias is not enabled, switch to vddio */
- wcd9xxx_switch_micbias(mbhc, 1);
- break;
- case WCD9XXX_EVENT_PRE_HPHR_PA_ON:
- set_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
- break;
- case WCD9XXX_EVENT_POST_HPHL_PA_OFF:
- clear_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state);
- /* if HPH PAs are off, report OCP and switch back to CFILT */
- clear_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
- clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
- if (mbhc->hph_status & SND_JACK_OC_HPHL)
- hphlocp_off_report(mbhc, SND_JACK_OC_HPHL);
- if (!(mbhc->event_state &
- (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
- 1 << MBHC_EVENT_PRE_TX_3_ON)))
- wcd9xxx_switch_micbias(mbhc, 0);
- break;
- case WCD9XXX_EVENT_POST_HPHR_PA_OFF:
- clear_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state);
- /* if HPH PAs are off, report OCP and switch back to CFILT */
- clear_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state);
- clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state);
- if (mbhc->hph_status & SND_JACK_OC_HPHR)
- hphrocp_off_report(mbhc, SND_JACK_OC_HPHL);
- if (!(mbhc->event_state &
- (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR |
- 1 << MBHC_EVENT_PRE_TX_3_ON)))
- wcd9xxx_switch_micbias(mbhc, 0);
- break;
- /* Clock usage change */
- case WCD9XXX_EVENT_PRE_MCLK_ON:
- break;
- case WCD9XXX_EVENT_POST_MCLK_ON:
- /* Change to lower TxAAF frequency */
- snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
- 1 << 4);
- /* Re-calibrate clock rate dependent values */
- wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->mbhc_cfg->mclk_rate);
- /* If clock source changes, stop and restart polling */
- if (wcd9xxx_mbhc_polling(mbhc)) {
- wcd9xxx_calibrate_hs_polling(mbhc);
- wcd9xxx_start_hs_polling(mbhc);
- }
- break;
- case WCD9XXX_EVENT_PRE_MCLK_OFF:
- /* If clock source changes, stop and restart polling */
- if (wcd9xxx_mbhc_polling(mbhc))
- wcd9xxx_pause_hs_polling(mbhc);
- break;
- case WCD9XXX_EVENT_POST_MCLK_OFF:
- break;
- case WCD9XXX_EVENT_PRE_RCO_ON:
- break;
- case WCD9XXX_EVENT_POST_RCO_ON:
- /* Change to higher TxAAF frequency */
- snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4,
- 0 << 4);
- /* Re-calibrate clock rate dependent values */
- wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->rco_clk_rate);
- /* If clock source changes, stop and restart polling */
- if (wcd9xxx_mbhc_polling(mbhc)) {
- wcd9xxx_calibrate_hs_polling(mbhc);
- wcd9xxx_start_hs_polling(mbhc);
- }
- break;
- case WCD9XXX_EVENT_PRE_RCO_OFF:
- /* If clock source changes, stop and restart polling */
- if (wcd9xxx_mbhc_polling(mbhc))
- wcd9xxx_pause_hs_polling(mbhc);
- break;
- case WCD9XXX_EVENT_POST_RCO_OFF:
- break;
- /* CFILT usage change */
- case WCD9XXX_EVENT_PRE_CFILT_1_ON:
- case WCD9XXX_EVENT_PRE_CFILT_2_ON:
- case WCD9XXX_EVENT_PRE_CFILT_3_ON:
- if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
- wcd9xxx_event_to_cfilt(event))
- /*
- * Switch CFILT to slow mode if MBHC CFILT is being
- * used.
- */
- wcd9xxx_codec_switch_cfilt_mode(mbhc, false);
- break;
- case WCD9XXX_EVENT_POST_CFILT_1_OFF:
- case WCD9XXX_EVENT_POST_CFILT_2_OFF:
- case WCD9XXX_EVENT_POST_CFILT_3_OFF:
- if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) ==
- wcd9xxx_event_to_cfilt(event))
- /*
- * Switch CFILT to fast mode if MBHC CFILT is not
- * used anymore.
- */
- wcd9xxx_codec_switch_cfilt_mode(mbhc, true);
- break;
- /* System resume */
- case WCD9XXX_EVENT_POST_RESUME:
- mbhc->mbhc_last_resume = jiffies;
- break;
- /* BG mode chage */
- case WCD9XXX_EVENT_PRE_BG_OFF:
- case WCD9XXX_EVENT_POST_BG_OFF:
- case WCD9XXX_EVENT_PRE_BG_AUDIO_ON:
- case WCD9XXX_EVENT_POST_BG_AUDIO_ON:
- case WCD9XXX_EVENT_PRE_BG_MBHC_ON:
- case WCD9XXX_EVENT_POST_BG_MBHC_ON:
- /* Not used for now */
- break;
- case WCD9XXX_EVENT_PRE_TX_3_ON:
- /*
- * if polling is ON, mbhc micbias not enabled
- * switch micbias source to VDDIO
- */
- set_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
- if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg)
- & 0x80) &&
- mbhc->polling_active && !mbhc->mbhc_micbias_switched)
- wcd9xxx_switch_micbias(mbhc, 1);
- break;
- case WCD9XXX_EVENT_POST_TX_3_OFF:
- /*
- * Switch back to micbias if HPH PA or TX3 path
- * is disabled
- */
- clear_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state);
- if (mbhc->polling_active && mbhc->mbhc_micbias_switched &&
- !(mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL |
- 1 << MBHC_EVENT_PA_HPHR)))
- wcd9xxx_switch_micbias(mbhc, 0);
- break;
- default:
- WARN(1, "Unknown event %d\n", event);
- ret = -EINVAL;
- }
- mutex_unlock(&mbhc->mbhc_lock);
-
- pr_debug("%s: leave\n", __func__);
-
- return ret;
-}
-
-static s16 wcd9xxx_read_impedance_regs(struct wcd9xxx_mbhc *mbhc)
-{
- struct snd_soc_codec *codec = mbhc->codec;
- short bias_value;
- int i;
- s32 z_t = 0;
- s32 z_loop = z_det_box_car_avg;
-
- /* Box Car avrg of less than a particular loop count will not be
- * accomodated. Similarly if the count is more than a particular number
- * it will not be counted. Set z_loop counter to a limit, if its more
- * or less than the value in WCD9XXX_BOX_CAR_AVRG_MAX or
- * WCD9XXX_BOX_CAR_AVRG_MIN
- */
- if (z_loop < WCD9XXX_BOX_CAR_AVRG_MIN) {
- dev_dbg(codec->dev,
- "%s: Box Car avrg counter < %d. Limiting it to %d\n",
- __func__, WCD9XXX_BOX_CAR_AVRG_MIN,
- WCD9XXX_BOX_CAR_AVRG_MIN);
- z_loop = WCD9XXX_BOX_CAR_AVRG_MIN;
- } else if (z_loop > WCD9XXX_BOX_CAR_AVRG_MAX) {
- dev_dbg(codec->dev,
- "%s: Box Car avrg counter > %d. Limiting it to %d\n",
- __func__, WCD9XXX_BOX_CAR_AVRG_MAX,
- WCD9XXX_BOX_CAR_AVRG_MAX);
- z_loop = WCD9XXX_BOX_CAR_AVRG_MAX;
- }
-
- /* Take box car average if needed */
- for (i = 0; i < z_loop; i++) {
- snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2);
- /* Wait for atleast 1800uS to let register write to settle */
- usleep_range(1800, 1800 + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- z_t += wcd9xxx_read_sta_result(codec);
- }
- /* Take average of the Z values read */
- bias_value = (s16) (z_t / z_loop);
- return bias_value;
-}
-
-static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc,
- s16 l[3], s16 r[3],
- uint32_t *zl, uint32_t *zr,
- u32 *zl_stereo, u32 *zl_mono)
-{
- s16 l_t[3] = {0}, r_t[3] = {0};
- s16 l2_stereo, l2_mono;
- bool left, right;
- struct snd_soc_codec *codec = mbhc->codec;
-
- if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
- !mbhc->mbhc_cb->compute_impedance) {
- dev_err(codec->dev, "%s: Invalid parameters\n", __func__);
- return -EINVAL;
- }
-
- left = !!(l);
- right = !!(r);
-
- dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__);
- dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__,
- l, r, left, right);
-
- /* Remeasure V2 values */
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0);
- if (right)
- r_t[2] = wcd9xxx_read_impedance_regs(mbhc);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
- if (left)
- l_t[2] = wcd9xxx_read_impedance_regs(mbhc);
-
- /* Ramp down HPHR */
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_RAMP_DISABLE);
-
- if (right) {
- /* Take R0'/R1' */
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
- 0xFF, 0xF8);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0xFF, 0xA0);
- r_t[1] = wcd9xxx_read_impedance_regs(mbhc);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2,
- 0xFF, 0xF0);
- r_t[0] = wcd9xxx_read_impedance_regs(mbhc);
- }
-
- /* Put back gain to 1x */
- if (!left && right)
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_0);
-
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0);
- /* Take L2'' measurement */
- l2_stereo = wcd9xxx_read_impedance_regs(mbhc);
-
- /* Turn off HPHR PA and take L2''' */
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_PA_DISABLE);
- l2_mono = wcd9xxx_read_impedance_regs(mbhc);
-
- /* Ramp HPHL from -15mV to 0V */
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHL_RAMP_DISABLE);
-
- /* Take L0' and L1' with iCal */
- l_t[0] = wcd9xxx_read_impedance_regs(mbhc);
- snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8);
- l_t[1] = wcd9xxx_read_impedance_regs(mbhc);
-
- if (left) {
- l[0] = l_t[0];
- l[1] = l_t[1];
- l[2] = l_t[2];
- }
- if (right) {
- r[0] = r_t[0];
- r[1] = r_t[1];
- r[2] = r_t[2];
- }
-
- /* compute the new impedance values */
- mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
-
- if (!left && right)
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_UPDATE_1X);
- /* compute the new ZL'' value */
- l_t[2] = l2_stereo;
- mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_stereo, NULL);
- /* compute the new ZL''' value */
- l_t[2] = l2_mono;
- mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_mono, NULL);
-
- pr_debug("%s: L0': 0x%x, L1': 0x%x L2_stereo: 0x%x, L2_mono: 0x%x\n",
- __func__, l_t[0] & 0xffff, l_t[1] & 0xffff,
- l2_stereo & 0xffff, l2_mono & 0xffff);
- pr_debug("%s: ZL_stereo = %u, ZL_mono = %u\n",
- __func__, *zl_stereo, *zl_mono);
-
- return 0;
-}
-
-static enum mbhc_zdet_zones wcd9xxx_assign_zdet_zone(uint32_t zl, uint32_t zr,
- int32_t *gain)
-{
- enum mbhc_zdet_zones zdet_zone;
-
- if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
- zdet_zone = ZL_ZONE1__ZR_ZONE1;
- *gain = 0;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
- zdet_zone = ZL_ZONE2__ZR_ZONE2;
- *gain = MBHC_ZDET_GAIN_1;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
- zdet_zone = ZL_ZONE3__ZR_ZONE3;
- *gain = MBHC_ZDET_GAIN_2;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
- zdet_zone = ZL_ZONE2__ZR_ZONE1;
- *gain = MBHC_ZDET_GAIN_1;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) {
- zdet_zone = ZL_ZONE3__ZR_ZONE1;
- *gain = MBHC_ZDET_GAIN_2;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) {
- zdet_zone = ZL_ZONE1__ZR_ZONE2;
- *gain = MBHC_ZDET_GAIN_1;
- } else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) &&
- WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) {
- zdet_zone = ZL_ZONE1__ZR_ZONE3;
- *gain = MBHC_ZDET_GAIN_2;
- } else {
- zdet_zone = ZL_ZR_NOT_IN_ZONE1;
- *gain = MBHC_ZDET_GAIN_1;
- }
-
- return zdet_zone;
-}
-
-static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr)
-{
- int i;
- int ret = 0;
- u8 micb_mbhc_val;
- s16 l[3], r[3];
- s16 *z[] = {
- &l[0], &r[0], &r[1], &l[1], &l[2], &r[2],
- };
- u32 zl_stereo, zl_mono;
- u32 zl_diff_1, zl_diff_2;
- bool override_en;
- struct snd_soc_codec *codec = mbhc->codec;
- const int mux_wait_us = 25;
- const struct wcd9xxx_reg_mask_val reg_set_mux[] = {
- /* Phase 1 */
- /* Set MBHC_MUX for HPHL without ical */
- {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
- /* Set MBHC_MUX for HPHR without ical */
- {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
- /* Set MBHC_MUX for HPHR with ical */
- {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8},
- /* Set MBHC_MUX for HPHL with ical */
- {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0},
-
- /* Phase 2 */
- {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0},
- /* Set MBHC_MUX for HPHR without ical and wait for 25us */
- {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0},
- };
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr);
-
- if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet ||
- !mbhc->mbhc_cb->compute_impedance || !zl || !zr) {
- return -EINVAL;
- }
-
- /*
- * Impedance detection is an intrusive function as it mutes RX paths,
- * enable PAs and etc. Therefore codec drvier including ALSA
- * shouldn't read and write hardware registers during detection.
- */
- wcd9xxx_onoff_ext_mclk(mbhc, true);
-
- /*
- * For impedance detection, make sure to disable micbias from
- * override signal so that override does not cause micbias
- * to be enabled. This setting will be undone after completing
- * impedance measurement.
- */
- micb_mbhc_val = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL);
- snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL,
- 0x10, 0x00);
-
- override_en = (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04) ?
- true : false;
- if (!override_en)
- wcd9xxx_turn_onoff_override(mbhc, true);
- pr_debug("%s: Setting impedance detection\n", __func__);
-
- /* Codec specific setup for L0, R0, L1 and R1 measurements */
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PRE_MEASURE);
-
- pr_debug("%s: Performing impedance detection\n", __func__);
- for (i = 0; i < ARRAY_SIZE(reg_set_mux) - 2; i++) {
- snd_soc_update_bits(codec, reg_set_mux[i].reg,
- reg_set_mux[i].mask,
- reg_set_mux[i].val);
- if (mbhc->mbhc_cb->get_cdc_type &&
- mbhc->mbhc_cb->get_cdc_type() ==
- WCD9XXX_CDC_TYPE_TOMTOM) {
- *(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
- } else {
- if (mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec,
- WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- /* 25us is required after mux change to settle down */
- usleep_range(mux_wait_us,
- mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- *(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
- true, false);
- }
- }
-
- /* Codec specific setup for L2 and R2 measurements */
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_POST_MEASURE);
-
- for (; i < ARRAY_SIZE(reg_set_mux); i++) {
- snd_soc_update_bits(codec, reg_set_mux[i].reg,
- reg_set_mux[i].mask,
- reg_set_mux[i].val);
- if (mbhc->mbhc_cb->get_cdc_type &&
- mbhc->mbhc_cb->get_cdc_type() ==
- WCD9XXX_CDC_TYPE_TOMTOM) {
- *(z[i]) = wcd9xxx_read_impedance_regs(mbhc);
- } else {
- if (mbhc->mbhc_cb->enable_mux_bias_block)
- mbhc->mbhc_cb->enable_mux_bias_block(codec);
- else
- snd_soc_update_bits(codec,
- WCD9XXX_A_MBHC_SCALING_MUX_1,
- 0x80, 0x80);
- /* 25us is required after mux change to settle down */
- usleep_range(mux_wait_us,
- mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US);
- *(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0,
- true, false);
- }
- }
-
- mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr);
-
- /*
- * For some codecs, an additional step of zdet is needed
- * to overcome effects of noise and for better accuracy of
- * z values
- */
- if (mbhc->mbhc_cb->get_cdc_type &&
- mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_TOMTOM) {
- uint32_t zl_t = 0, zr_t = 0;
- s16 *l_p, *r_p;
- enum mbhc_zdet_zones zdet_zone;
- int32_t gain;
-
- zdet_zone = wcd9xxx_assign_zdet_zone(*zl, *zr, &gain);
- switch (zdet_zone) {
- case ZL_ZONE1__ZR_ZONE1:
- l_p = NULL;
- r_p = NULL;
- break;
- case ZL_ZONE2__ZR_ZONE2:
- case ZL_ZONE3__ZR_ZONE3:
- case ZL_ZR_NOT_IN_ZONE1:
- l_p = l;
- r_p = r;
- break;
- case ZL_ZONE2__ZR_ZONE1:
- case ZL_ZONE3__ZR_ZONE1:
- /* If ZR falls in Zone 1, further computations with
- * gain update are not required
- */
- l_p = l;
- r_p = NULL;
- break;
- case ZL_ZONE1__ZR_ZONE2:
- case ZL_ZONE1__ZR_ZONE3:
- /* If ZL falls in Zone 1, further computations with
- * gain update are not required
- */
- l_p = NULL;
- r_p = r;
- break;
- }
- pr_debug("%s:zdet_zone = %d, gain = %d\n", __func__,
- zdet_zone, gain);
- if (gain)
- mbhc->mbhc_cb->setup_zdet(mbhc, gain);
-
- wcd9xxx_remeasure_z_values(mbhc, l_p, r_p, &zl_t, &zr_t,
- &zl_stereo, &zl_mono);
-
- *zl = (zl_t) ? zl_t : *zl;
- *zr = (zr_t) ? zr_t : *zr;
-
- /* Check for Mono/Stereo Type
- * Conditions to classify Mono/Stereo
- * i. Difference of zl_stereo and zl_mono > (1/2) of zl_mono
- * ii. Absolute difference of zl and zr above a threshold
- */
- zl_diff_1 = (zl_mono > zl_stereo) ? (zl_mono - zl_stereo) :
- (zl_stereo - zl_mono);
- zl_diff_2 = (*zl > *zr) ? (*zl - *zr) : (*zr - *zl);
-
- mbhc->hph_type = MBHC_HPH_NONE;
- if (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) {
- if ((zl_diff_1 > (zl_mono >> 1)) ||
- (zl_diff_2 > WCD9XXX_MONO_HS_DIFF_THR) ||
- ((*zl < WCD9XXX_MONO_HS_MIN_THR) &&
- (*zr > WCD9XXX_MONO_HS_MIN_THR)) ||
- ((*zr < WCD9XXX_MONO_HS_MIN_THR) &&
- (*zl > WCD9XXX_MONO_HS_MIN_THR))) {
- pr_debug("%s: MONO plug type detected\n",
- __func__);
- mbhc->hph_type = MBHC_HPH_MONO;
- *zl = zl_mono;
- } else {
- pr_debug("%s: STEREO plug type detected\n",
- __func__);
- mbhc->hph_type = MBHC_HPH_STEREO;
- }
- }
- }
-
- mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PA_DISABLE);
-
- /* Calculate z values based on the Q-fuse registers, if used */
- if (mbhc->mbhc_cb->zdet_error_approx)
- mbhc->mbhc_cb->zdet_error_approx(mbhc, zl, zr);
-
- wcd9xxx_onoff_ext_mclk(mbhc, false);
-
- if (!override_en)
- wcd9xxx_turn_onoff_override(mbhc, false);
-
- /* Undo the micbias disable for override */
- snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, micb_mbhc_val);
-
- pr_debug("%s: L0: 0x%x(%d), L1: 0x%x(%d), L2: 0x%x(%d)\n",
- __func__,
- l[0] & 0xffff, l[0], l[1] & 0xffff, l[1], l[2] & 0xffff, l[2]);
- pr_debug("%s: R0: 0x%x(%d), R1: 0x%x(%d), R2: 0x%x(%d)\n",
- __func__,
- r[0] & 0xffff, r[0], r[1] & 0xffff, r[1], r[2] & 0xffff, r[2]);
- pr_debug("%s: RL %u milliohm, RR %u milliohm\n", __func__, *zl, *zr);
- pr_debug("%s: Impedance detection completed\n", __func__);
-
- return ret;
-}
-
-int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr)
-{
- *zl = mbhc->zl;
- *zr = mbhc->zr;
-
- if (*zl && *zr)
- return 0;
- else
- return -EINVAL;
-}
-
-/*
- * wcd9xxx_mbhc_init : initialize MBHC internal structures.
- *
- * NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used
- */
-int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec,
- int (*micbias_enable_cb)(struct snd_soc_codec*, bool,
- enum wcd9xxx_micbias_num),
- const struct wcd9xxx_mbhc_cb *mbhc_cb,
- const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
- int rco_clk_rate,
- bool impedance_det_en)
-{
- int ret;
- void *core_res;
-
- pr_debug("%s: enter\n", __func__);
- memset(&mbhc->mbhc_bias_regs, 0, sizeof(struct mbhc_micbias_regs));
- memset(&mbhc->mbhc_data, 0, sizeof(struct mbhc_internal_cal_data));
-
- mbhc->mbhc_data.t_sta_dce = DEFAULT_DCE_STA_WAIT;
- mbhc->mbhc_data.t_dce = DEFAULT_DCE_WAIT;
- mbhc->mbhc_data.t_sta = DEFAULT_STA_WAIT;
- mbhc->mbhc_micbias_switched = false;
- mbhc->polling_active = false;
- mbhc->mbhc_state = MBHC_STATE_NONE;
- mbhc->in_swch_irq_handler = false;
- mbhc->current_plug = PLUG_TYPE_NONE;
- mbhc->lpi_enabled = false;
- mbhc->no_mic_headset_override = false;
- mbhc->mbhc_last_resume = 0;
- mbhc->codec = codec;
- mbhc->resmgr = resmgr;
- mbhc->resmgr->mbhc = mbhc;
- mbhc->micbias_enable_cb = micbias_enable_cb;
- mbhc->rco_clk_rate = rco_clk_rate;
- mbhc->mbhc_cb = mbhc_cb;
- mbhc->intr_ids = mbhc_cdc_intr_ids;
- mbhc->impedance_detect = impedance_det_en;
- mbhc->hph_type = MBHC_HPH_NONE;
-
- if (mbhc->intr_ids == NULL) {
- pr_err("%s: Interrupt mapping not provided\n", __func__);
- return -EINVAL;
- }
-
- if (mbhc->headset_jack.jack == NULL) {
- ret = snd_soc_card_jack_new(codec->component.card,
- "Headset Jack", WCD9XXX_JACK_MASK,
- &mbhc->headset_jack, NULL, 0);
- if (ret) {
- pr_err("%s: Failed to create new jack\n", __func__);
- return ret;
- }
-
- ret = snd_soc_card_jack_new(codec->component.card,
- "Button Jack",
- WCD9XXX_JACK_BUTTON_MASK,
- &mbhc->button_jack, NULL, 0);
- if (ret) {
- pr_err("Failed to create new jack\n");
- return ret;
- }
-
- ret = snd_jack_set_key(mbhc->button_jack.jack,
- SND_JACK_BTN_0,
- KEY_MEDIA);
- if (ret) {
- pr_err("%s: Failed to set code for btn-0\n",
- __func__);
- return ret;
- }
-
- INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
- wcd9xxx_mbhc_fw_read);
- INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
- INIT_DELAYED_WORK(&mbhc->mbhc_insert_dwork,
- wcd9xxx_mbhc_insert_work);
- }
-
- mutex_init(&mbhc->mbhc_lock);
-
- /* Register event notifier */
- mbhc->nblock.notifier_call = wcd9xxx_event_notify;
- ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock);
- if (ret) {
- pr_err("%s: Failed to register notifier %d\n", __func__, ret);
- mutex_destroy(&mbhc->mbhc_lock);
- return ret;
- }
-
- wcd9xxx_init_debugfs(mbhc);
-
- /* Disable Impedance detection by default for certain codec types */
- if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type &&
- (mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON))
- impedance_detect_en = 0;
- else
- impedance_detect_en = impedance_det_en ? 1 : 0;
-
- core_res = mbhc->resmgr->core_res;
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->insertion,
- wcd9xxx_hs_insert_irq,
- "Headset insert detect", mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d, ret = %d\n", __func__,
- mbhc->intr_ids->insertion, ret);
- goto err_insert_irq;
- }
- wcd9xxx_disable_irq(core_res, mbhc->intr_ids->insertion);
-
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->poll_plug_rem,
- wcd9xxx_hs_remove_irq,
- "Headset remove detect", mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- mbhc->intr_ids->poll_plug_rem);
- goto err_remove_irq;
- }
-
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->dce_est_complete,
- wcd9xxx_dce_handler, "DC Estimation detect",
- mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- mbhc->intr_ids->dce_est_complete);
- goto err_potential_irq;
- }
-
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->button_release,
- wcd9xxx_release_handler,
- "Button Release detect", mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- mbhc->intr_ids->button_release);
- goto err_release_irq;
- }
-
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_left_ocp,
- wcd9xxx_hphl_ocp_irq, "HPH_L OCP detect",
- mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- mbhc->intr_ids->hph_left_ocp);
- goto err_hphl_ocp_irq;
- }
- wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_left_ocp);
-
- ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_right_ocp,
- wcd9xxx_hphr_ocp_irq, "HPH_R OCP detect",
- mbhc);
- if (ret) {
- pr_err("%s: Failed to request irq %d\n", __func__,
- mbhc->intr_ids->hph_right_ocp);
- goto err_hphr_ocp_irq;
- }
- wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_right_ocp);
-
- wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC |
- 1 << WCD9XXX_COND_HPH);
-
- pr_debug("%s: leave ret %d\n", __func__, ret);
- return ret;
-
-err_hphr_ocp_irq:
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
-err_hphl_ocp_irq:
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
-err_release_irq:
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
-err_potential_irq:
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
-err_remove_irq:
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
-err_insert_irq:
- wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
-
- mutex_destroy(&mbhc->mbhc_lock);
-
- pr_debug("%s: leave ret %d\n", __func__, ret);
- return ret;
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_init);
-
-void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc)
-{
- struct wcd9xxx_core_resource *core_res =
- mbhc->resmgr->core_res;
-
- wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC |
- 1 << WCD9XXX_COND_HPH);
-
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->hs_jack_switch, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc);
- wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc);
-
- mutex_destroy(&mbhc->mbhc_lock);
- wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock);
- wcd9xxx_cleanup_debugfs(mbhc);
-}
-EXPORT_SYMBOL(wcd9xxx_mbhc_deinit);
-
-MODULE_DESCRIPTION("wcd9xxx MBHC module");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h
deleted file mode 100644
index e35f7d4..0000000
--- a/sound/soc/codecs/wcd9xxx-mbhc.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __WCD9XXX_MBHC_H__
-#define __WCD9XXX_MBHC_H__
-
-#include "wcd9xxx-resmgr.h"
-#include "wcdcal-hwdep.h"
-
-#define WCD9XXX_CFILT_FAST_MODE 0x00
-#define WCD9XXX_CFILT_SLOW_MODE 0x40
-#define WCD9XXX_CFILT_EXT_PRCHG_EN 0x30
-#define WCD9XXX_CFILT_EXT_PRCHG_DSBL 0x00
-
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-
-struct mbhc_micbias_regs {
- u16 cfilt_val;
- u16 cfilt_ctl;
- u16 mbhc_reg;
- u16 int_rbias;
- u16 ctl_reg;
- u8 cfilt_sel;
-};
-
-enum mbhc_v_index {
- MBHC_V_IDX_CFILT,
- MBHC_V_IDX_VDDIO,
- MBHC_V_IDX_NUM,
-};
-
-enum mbhc_cal_type {
- MBHC_CAL_MCLK,
- MBHC_CAL_RCO,
- MBHC_CAL_NUM,
-};
-
-enum mbhc_impedance_detect_stages {
- MBHC_ZDET_PRE_MEASURE,
- MBHC_ZDET_POST_MEASURE,
- MBHC_ZDET_GAIN_0,
- MBHC_ZDET_GAIN_1,
- MBHC_ZDET_GAIN_2,
- MBHC_ZDET_HPHR_RAMP_DISABLE,
- MBHC_ZDET_HPHL_RAMP_DISABLE,
- MBHC_ZDET_RAMP_DISABLE,
- MBHC_ZDET_HPHR_PA_DISABLE,
- MBHC_ZDET_PA_DISABLE,
- MBHC_ZDET_GAIN_UPDATE_1X,
-};
-
-/* Zone assignments used in WCD9330 for Zdet */
-enum mbhc_zdet_zones {
- ZL_ZONE1__ZR_ZONE1,
- ZL_ZONE2__ZR_ZONE2,
- ZL_ZONE3__ZR_ZONE3,
- ZL_ZONE2__ZR_ZONE1,
- ZL_ZONE3__ZR_ZONE1,
- ZL_ZONE1__ZR_ZONE2,
- ZL_ZONE1__ZR_ZONE3,
- ZL_ZR_NOT_IN_ZONE1,
-};
-
-/* Data used by MBHC */
-struct mbhc_internal_cal_data {
- u16 dce_z;
- u16 dce_nsc_cs_z;
- u16 dce_mb;
- u16 sta_z;
- u16 sta_mb;
- u32 t_sta_dce;
- u32 t_dce;
- u32 t_sta;
- u32 micb_mv;
- u16 v_ins_hu[MBHC_V_IDX_NUM];
- u16 v_ins_h[MBHC_V_IDX_NUM];
- u16 v_b1_hu[MBHC_V_IDX_NUM];
- u16 v_b1_h[MBHC_V_IDX_NUM];
- u16 v_brh[MBHC_V_IDX_NUM];
- u16 v_brl;
- u16 v_no_mic;
- s16 v_inval_ins_low;
- s16 v_inval_ins_high;
- u16 v_cs_ins_h;
-};
-
-enum wcd9xxx_mbhc_plug_type {
- PLUG_TYPE_INVALID = -1,
- PLUG_TYPE_NONE,
- PLUG_TYPE_HEADSET,
- PLUG_TYPE_HEADPHONE,
- PLUG_TYPE_HIGH_HPH,
- PLUG_TYPE_GND_MIC_SWAP,
- PLUG_TYPE_ANC_HEADPHONE,
-};
-
-enum wcd9xxx_mbhc_micbias_type {
- MBHC_PRIMARY_MIC_MB,
- MBHC_ANC_MIC_MB,
-};
-
-enum wcd9xxx_micbias_num {
- MBHC_MICBIAS_INVALID = -1,
- MBHC_MICBIAS1,
- MBHC_MICBIAS2,
- MBHC_MICBIAS3,
- MBHC_MICBIAS4,
-};
-
-enum hw_jack_type {
- FOUR_POLE_JACK = 0,
- FIVE_POLE_JACK,
- SIX_POLE_JACK,
-};
-
-enum wcd9xx_mbhc_micbias_enable_bits {
- MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET,
- MBHC_MICBIAS_ENABLE_REGULAR_HEADSET,
-};
-
-enum wcd9xx_mbhc_cs_enable_bits {
- MBHC_CS_ENABLE_POLLING,
- MBHC_CS_ENABLE_INSERTION,
- MBHC_CS_ENABLE_REMOVAL,
- MBHC_CS_ENABLE_DET_ANC,
-};
-
-enum wcd9xxx_mbhc_state {
- MBHC_STATE_NONE = -1,
- MBHC_STATE_POTENTIAL,
- MBHC_STATE_POTENTIAL_RECOVERY,
- MBHC_STATE_RELEASE,
-};
-
-enum wcd9xxx_mbhc_btn_det_mem {
- MBHC_BTN_DET_V_BTN_LOW,
- MBHC_BTN_DET_V_BTN_HIGH,
- MBHC_BTN_DET_N_READY,
- MBHC_BTN_DET_N_CIC,
- MBHC_BTN_DET_GAIN
-};
-
-enum wcd9xxx_mbhc_clk_freq {
- TAIKO_MCLK_12P2MHZ = 0,
- TAIKO_MCLK_9P6MHZ,
- TAIKO_NUM_CLK_FREQS,
-};
-
-enum wcd9xxx_mbhc_event_state {
- MBHC_EVENT_PA_HPHL,
- MBHC_EVENT_PA_HPHR,
- MBHC_EVENT_PRE_TX_3_ON,
- MBHC_EVENT_POST_TX_3_OFF,
-};
-
-enum mbhc_hph_type {
- MBHC_HPH_NONE = 0,
- MBHC_HPH_MONO,
- MBHC_HPH_STEREO,
-};
-
-struct wcd9xxx_mbhc_general_cfg {
- u8 t_ldoh;
- u8 t_bg_fast_settle;
- u8 t_shutdown_plug_rem;
- u8 mbhc_nsa;
- u8 mbhc_navg;
- u8 v_micbias_l;
- u8 v_micbias;
- u8 mbhc_reserved;
- u16 settle_wait;
- u16 t_micbias_rampup;
- u16 t_micbias_rampdown;
- u16 t_supply_bringup;
-} __packed;
-
-struct wcd9xxx_mbhc_plug_detect_cfg {
- u32 mic_current;
- u32 hph_current;
- u16 t_mic_pid;
- u16 t_ins_complete;
- u16 t_ins_retry;
- u16 v_removal_delta;
- u8 micbias_slow_ramp;
- u8 reserved0;
- u8 reserved1;
- u8 reserved2;
-} __packed;
-
-struct wcd9xxx_mbhc_plug_type_cfg {
- u8 av_detect;
- u8 mono_detect;
- u8 num_ins_tries;
- u8 reserved0;
- s16 v_no_mic;
- s16 v_av_min;
- s16 v_av_max;
- s16 v_hs_min;
- s16 v_hs_max;
- u16 reserved1;
-} __packed;
-
-struct wcd9xxx_mbhc_btn_detect_cfg {
- s8 c[8];
- u8 nc;
- u8 n_meas;
- u8 mbhc_nsc;
- u8 n_btn_meas;
- u8 n_btn_con;
- u8 num_btn;
- u8 reserved0;
- u8 reserved1;
- u16 t_poll;
- u16 t_bounce_wait;
- u16 t_rel_timeout;
- s16 v_btn_press_delta_sta;
- s16 v_btn_press_delta_cic;
- u16 t_btn0_timeout;
- s16 _v_btn_low[0]; /* v_btn_low[num_btn] */
- s16 _v_btn_high[0]; /* v_btn_high[num_btn] */
- u8 _n_ready[TAIKO_NUM_CLK_FREQS];
- u8 _n_cic[TAIKO_NUM_CLK_FREQS];
- u8 _gain[TAIKO_NUM_CLK_FREQS];
-} __packed;
-
-struct wcd9xxx_mbhc_imped_detect_cfg {
- u8 _hs_imped_detect;
- u8 _n_rload;
- u8 _hph_keep_on;
- u8 _repeat_rload_calc;
- u16 _t_dac_ramp_time;
- u16 _rhph_high;
- u16 _rhph_low;
- u16 _rload[0]; /* rload[n_rload] */
- u16 _alpha[0]; /* alpha[n_rload] */
- u16 _beta[3];
-} __packed;
-
-struct wcd9xxx_mbhc_config {
- bool read_fw_bin;
- /*
- * void* calibration contains:
- * struct wcd9xxx_mbhc_general_cfg generic;
- * struct wcd9xxx_mbhc_plug_detect_cfg plug_det;
- * struct wcd9xxx_mbhc_plug_type_cfg plug_type;
- * struct wcd9xxx_mbhc_btn_detect_cfg btn_det;
- * struct wcd9xxx_mbhc_imped_detect_cfg imped_det;
- * Note: various size depends on btn_det->num_btn
- */
- void *calibration;
- enum wcd9xxx_micbias_num micbias;
- enum wcd9xxx_micbias_num anc_micbias;
- int (*mclk_cb_fn)(struct snd_soc_codec*, int, bool);
- unsigned int mclk_rate;
- unsigned int gpio;
- unsigned int gpio_irq;
- int gpio_level_insert;
- bool insert_detect; /* codec has own MBHC_INSERT_DETECT */
- bool detect_extn_cable;
- /* bit mask of enum wcd9xx_mbhc_micbias_enable_bits */
- unsigned long micbias_enable_flags;
- /* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */
- bool (*swap_gnd_mic)(struct snd_soc_codec *);
- unsigned long cs_enable_flags;
- bool use_int_rbias;
- bool do_recalibration;
- bool use_vddio_meas;
- bool enable_anc_mic_detect;
- enum hw_jack_type hw_jack_type;
- int key_code[8];
-};
-
-struct wcd9xxx_cfilt_mode {
- u8 reg_mode_val;
- u8 cur_mode_val;
- u8 reg_mask;
-};
-
-struct wcd9xxx_mbhc_intr {
- int poll_plug_rem;
- int shortavg_complete;
- int potential_button_press;
- int button_release;
- int dce_est_complete;
- int insertion;
- int hph_left_ocp;
- int hph_right_ocp;
- int hs_jack_switch;
-};
-
-struct wcd9xxx_mbhc_cb {
- void (*enable_mux_bias_block)(struct snd_soc_codec *);
- void (*cfilt_fast_mode)(struct snd_soc_codec *, struct wcd9xxx_mbhc *);
- void (*codec_specific_cal)(struct snd_soc_codec *,
- struct wcd9xxx_mbhc *);
- struct wcd9xxx_cfilt_mode (*switch_cfilt_mode)(struct wcd9xxx_mbhc *,
- bool);
- void (*select_cfilt)(struct snd_soc_codec *, struct wcd9xxx_mbhc *);
- enum wcd9xxx_cdc_type (*get_cdc_type)(void);
- void (*enable_clock_gate)(struct snd_soc_codec *, bool);
- int (*setup_zdet)(struct wcd9xxx_mbhc *,
- enum mbhc_impedance_detect_stages stage);
- void (*compute_impedance)(struct wcd9xxx_mbhc *, s16 *, s16 *,
- uint32_t *, uint32_t *);
- void (*zdet_error_approx)(struct wcd9xxx_mbhc *, uint32_t *,
- uint32_t *);
- void (*enable_mbhc_txfe)(struct snd_soc_codec *, bool);
- int (*enable_mb_source)(struct snd_soc_codec *, bool, bool);
- void (*setup_int_rbias)(struct snd_soc_codec *, bool);
- void (*pull_mb_to_vddio)(struct snd_soc_codec *, bool);
- bool (*insert_rem_status)(struct snd_soc_codec *);
- void (*micbias_pulldown_ctrl)(struct wcd9xxx_mbhc *, bool);
- int (*codec_rco_ctrl)(struct snd_soc_codec *, bool);
- void (*hph_auto_pulldown_ctrl)(struct snd_soc_codec *, bool);
- struct firmware_cal * (*get_hwdep_fw_cal)(struct snd_soc_codec *,
- enum wcd_cal_type);
-};
-
-struct wcd9xxx_mbhc {
- bool polling_active;
- /* Delayed work to report long button press */
- struct delayed_work mbhc_btn_dwork;
- int buttons_pressed;
- enum wcd9xxx_mbhc_state mbhc_state;
- struct wcd9xxx_mbhc_config *mbhc_cfg;
- const struct wcd9xxx_mbhc_cb *mbhc_cb;
-
- struct mbhc_internal_cal_data mbhc_data;
-
- struct mbhc_micbias_regs mbhc_bias_regs;
- struct mbhc_micbias_regs mbhc_anc_bias_regs;
-
- bool mbhc_micbias_switched;
-
- u32 hph_status; /* track headhpone status */
- u8 hphlocp_cnt; /* headphone left ocp retry */
- u8 hphrocp_cnt; /* headphone right ocp retry */
-
- /* Work to perform MBHC Firmware Read */
- struct delayed_work mbhc_firmware_dwork;
- const struct firmware *mbhc_fw;
- struct firmware_cal *mbhc_cal;
-
- struct delayed_work mbhc_insert_dwork;
-
- u8 current_plug;
- struct work_struct correct_plug_swch;
- /*
- * Work to perform polling on microphone voltage
- * in order to correct plug type once plug type
- * is detected as headphone
- */
- struct work_struct correct_plug_noswch;
- bool hs_detect_work_stop;
-
- bool lpi_enabled; /* low power insertion detection */
- bool in_swch_irq_handler;
-
- struct wcd9xxx_resmgr *resmgr;
- struct snd_soc_codec *codec;
-
- bool no_mic_headset_override;
-
- /* track PA/DAC state to sync with userspace */
- unsigned long hph_pa_dac_state;
- /*
- * save codec's state with resmgr event notification
- * bit flags of enum wcd9xxx_mbhc_event_state
- */
- unsigned long event_state;
-
- unsigned long mbhc_last_resume; /* in jiffies */
-
- bool insert_detect_level_insert;
-
- struct snd_soc_jack headset_jack;
- struct snd_soc_jack button_jack;
-
- struct notifier_block nblock;
-
- bool micbias_enable;
- int (*micbias_enable_cb)(struct snd_soc_codec*, bool,
- enum wcd9xxx_micbias_num);
-
- bool impedance_detect;
- /* impedance of hphl and hphr */
- uint32_t zl, zr;
-
- u32 rco_clk_rate;
-
- bool update_z;
-
- u8 scaling_mux_in;
- /* Holds codec specific interrupt mapping */
- const struct wcd9xxx_mbhc_intr *intr_ids;
-
- /* Indicates status of current source switch */
- bool is_cs_enabled;
-
- /* Holds type of Headset - Mono/Stereo */
- enum mbhc_hph_type hph_type;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_poke;
- struct dentry *debugfs_mbhc;
-#endif
-
- struct mutex mbhc_lock;
-};
-
-#define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \
- sizeof(enum wcd9xxx_micbias_num) + \
- sizeof(struct wcd9xxx_mbhc_general_cfg) + \
- sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
- ((sizeof(s16) + sizeof(s16)) * buttons) + \
- sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
- sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
- sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
- ((sizeof(u16) + sizeof(u16)) * rload) \
- )
-
-#define WCD9XXX_MBHC_CAL_GENERAL_PTR(cali) ( \
- (struct wcd9xxx_mbhc_general_cfg *) cali)
-#define WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali) ( \
- (struct wcd9xxx_mbhc_plug_detect_cfg *) \
- &(WCD9XXX_MBHC_CAL_GENERAL_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \
- (struct wcd9xxx_mbhc_plug_type_cfg *) \
- &(WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali) ( \
- (struct wcd9xxx_mbhc_btn_detect_cfg *) \
- &(WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali)[1]))
-#define WCD9XXX_MBHC_CAL_IMPED_DET_PTR(cali) ( \
- (struct wcd9xxx_mbhc_imped_detect_cfg *) \
- (((void *)&WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \
- (WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \
- (sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \
- sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \
- )
-
-/* minimum size of calibration data assuming there is only one button and
- * one rload.
- */
-#define WCD9XXX_MBHC_CAL_MIN_SIZE ( \
- sizeof(struct wcd9xxx_mbhc_general_cfg) + \
- sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \
- sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \
- sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
- sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
- (sizeof(u16) * 2) \
- )
-
-#define WCD9XXX_MBHC_CAL_BTN_SZ(cfg_ptr) ( \
- sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \
- (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \
- sizeof(cfg_ptr->_v_btn_high[0]))))
-
-#define WCD9XXX_MBHC_CAL_IMPED_MIN_SZ ( \
- sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + sizeof(u16) * 2)
-
-#define WCD9XXX_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \
- sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \
- (cfg_ptr->_n_rload * \
- (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0]))))
-
-int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc);
-int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc,
- struct wcd9xxx_mbhc_config *mbhc_cfg);
-void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc);
-int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec,
- int (*micbias_enable_cb)(struct snd_soc_codec*, bool,
- enum wcd9xxx_micbias_num),
- const struct wcd9xxx_mbhc_cb *mbhc_cb,
- const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids,
- int rco_clk_rate,
- bool impedance_det_en);
-void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc);
-void *wcd9xxx_mbhc_cal_btn_det_mp(
- const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det,
- const enum wcd9xxx_mbhc_btn_det_mem mem);
-int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl,
- uint32_t *zr);
-#endif /* __WCD9XXX_MBHC_H__ */
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 825aaee..feef0a4 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -108,6 +108,7 @@
}
return resmgr->clk_type;
}
+EXPORT_SYMBOL(wcd_resmgr_get_clk_type);
static void wcd_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr_v2 *resmgr,
int clk_users)
@@ -123,6 +124,10 @@
}
}
+/*
+ * wcd_resmgr_post_ssr_v2
+ * @resmgr: handle to struct wcd9xxx_resmgr_v2
+ */
void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr)
{
int old_bg_audio_users;
@@ -157,7 +162,7 @@
WCD9XXX_V2_BG_CLK_UNLOCK(resmgr);
}
-
+EXPORT_SYMBOL(wcd_resmgr_post_ssr_v2);
/*
* wcd_resmgr_enable_master_bias: enable codec master bias
@@ -190,6 +195,7 @@
mutex_unlock(&resmgr->master_bias_lock);
return 0;
}
+EXPORT_SYMBOL(wcd_resmgr_enable_master_bias);
/*
* wcd_resmgr_disable_master_bias: disable codec master bias
@@ -213,6 +219,7 @@
mutex_unlock(&resmgr->master_bias_lock);
return 0;
}
+EXPORT_SYMBOL(wcd_resmgr_disable_master_bias);
static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
{
@@ -511,6 +518,7 @@
return ret;
}
+EXPORT_SYMBOL(wcd_resmgr_enable_clk_block);
void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
int sido_src)
@@ -601,6 +609,7 @@
return ret;
}
+EXPORT_SYMBOL(wcd_resmgr_disable_clk_block);
/*
* wcd_resmgr_init: initialize wcd resource manager
@@ -639,6 +648,7 @@
return resmgr;
}
+EXPORT_SYMBOL(wcd_resmgr_init);
/*
* wcd_resmgr_remove: Clean-up wcd resource manager
@@ -649,6 +659,7 @@
mutex_destroy(&resmgr->master_bias_lock);
kfree(resmgr);
}
+EXPORT_SYMBOL(wcd_resmgr_remove);
/*
* wcd_resmgr_post_init: post init call to assign codec handle
@@ -676,5 +687,7 @@
return 0;
}
+EXPORT_SYMBOL(wcd_resmgr_post_init);
+
MODULE_DESCRIPTION("wcd9xxx resmgr v2 module");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c
deleted file mode 100644
index 4b02652..0000000
--- a/sound/soc/codecs/wcd9xxx-resmgr.c
+++ /dev/null
@@ -1,1099 +0,0 @@
-/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/ratelimit.h>
-#include <linux/debugfs.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-#include <uapi/linux/mfd/wcd9xxx/wcd9320_registers.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
-#include <linux/mfd/wcd9xxx/pdata.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include "wcd9xxx-resmgr.h"
-
-static char wcd9xxx_event_string[][64] = {
- "WCD9XXX_EVENT_INVALID",
-
- "WCD9XXX_EVENT_PRE_RCO_ON",
- "WCD9XXX_EVENT_POST_RCO_ON",
- "WCD9XXX_EVENT_PRE_RCO_OFF",
- "WCD9XXX_EVENT_POST_RCO_OFF",
-
- "WCD9XXX_EVENT_PRE_MCLK_ON",
- "WCD9XXX_EVENT_POST_MCLK_ON",
- "WCD9XXX_EVENT_PRE_MCLK_OFF",
- "WCD9XXX_EVENT_POST_MCLK_OFF",
-
- "WCD9XXX_EVENT_PRE_BG_OFF",
- "WCD9XXX_EVENT_POST_BG_OFF",
- "WCD9XXX_EVENT_PRE_BG_AUDIO_ON",
- "WCD9XXX_EVENT_POST_BG_AUDIO_ON",
- "WCD9XXX_EVENT_PRE_BG_MBHC_ON",
- "WCD9XXX_EVENT_POST_BG_MBHC_ON",
-
- "WCD9XXX_EVENT_PRE_MICBIAS_1_OFF",
- "WCD9XXX_EVENT_POST_MICBIAS_1_OFF",
- "WCD9XXX_EVENT_PRE_MICBIAS_2_OFF",
- "WCD9XXX_EVENT_POST_MICBIAS_2_OFF",
- "WCD9XXX_EVENT_PRE_MICBIAS_3_OFF",
- "WCD9XXX_EVENT_POST_MICBIAS_3_OFF",
- "WCD9XXX_EVENT_PRE_MICBIAS_4_OFF",
- "WCD9XXX_EVENT_POST_MICBIAS_4_OFF",
- "WCD9XXX_EVENT_PRE_MICBIAS_1_ON",
- "WCD9XXX_EVENT_POST_MICBIAS_1_ON",
- "WCD9XXX_EVENT_PRE_MICBIAS_2_ON",
- "WCD9XXX_EVENT_POST_MICBIAS_2_ON",
- "WCD9XXX_EVENT_PRE_MICBIAS_3_ON",
- "WCD9XXX_EVENT_POST_MICBIAS_3_ON",
- "WCD9XXX_EVENT_PRE_MICBIAS_4_ON",
- "WCD9XXX_EVENT_POST_MICBIAS_4_ON",
-
- "WCD9XXX_EVENT_PRE_CFILT_1_OFF",
- "WCD9XXX_EVENT_POST_CFILT_1_OFF",
- "WCD9XXX_EVENT_PRE_CFILT_2_OFF",
- "WCD9XXX_EVENT_POST_CFILT_2_OFF",
- "WCD9XXX_EVENT_PRE_CFILT_3_OFF",
- "WCD9XXX_EVENT_POST_CFILT_3_OFF",
- "WCD9XXX_EVENT_PRE_CFILT_1_ON",
- "WCD9XXX_EVENT_POST_CFILT_1_ON",
- "WCD9XXX_EVENT_PRE_CFILT_2_ON",
- "WCD9XXX_EVENT_POST_CFILT_2_ON",
- "WCD9XXX_EVENT_PRE_CFILT_3_ON",
- "WCD9XXX_EVENT_POST_CFILT_3_ON",
-
- "WCD9XXX_EVENT_PRE_HPHL_PA_ON",
- "WCD9XXX_EVENT_POST_HPHL_PA_OFF",
- "WCD9XXX_EVENT_PRE_HPHR_PA_ON",
- "WCD9XXX_EVENT_POST_HPHR_PA_OFF",
-
- "WCD9XXX_EVENT_POST_RESUME",
-
- "WCD9XXX_EVENT_PRE_TX_3_ON",
- "WCD9XXX_EVENT_POST_TX_3_OFF",
-
- "WCD9XXX_EVENT_LAST",
-};
-
-#define WCD9XXX_RCO_CALIBRATION_RETRY_COUNT 5
-#define WCD9XXX_RCO_CALIBRATION_DELAY_US 5000
-#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100
-#define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 1000
-
-struct wcd9xxx_resmgr_cond_entry {
- unsigned short reg;
- int shift;
- bool invert;
- enum wcd9xxx_resmgr_cond cond;
- struct list_head list;
-};
-
-static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr
- *resmgr);
-static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type);
-
-const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type)
-{
- return wcd9xxx_event_string[type];
-}
-
-void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
- const enum wcd9xxx_notify_event e)
-{
- pr_debug("%s: notifier call event %d\n", __func__, e);
- blocking_notifier_call_chain(&resmgr->notifier, e, resmgr);
-}
-
-static void wcd9xxx_disable_bg(struct wcd9xxx_resmgr *resmgr)
-{
- /* Notify bg mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_OFF);
- /* Disable bg */
- snd_soc_update_bits(resmgr->codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
- 0x03, 0x00);
- usleep_range(100, 110);
- /* Notify bg mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_OFF);
-}
-
-/*
- * BG enablement should always enable in slow mode.
- * The fast mode doesn't need to be enabled as fast mode BG is to be driven
- * by MBHC override.
- */
-static void wcd9xxx_enable_bg(struct wcd9xxx_resmgr *resmgr)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- /* Enable BG in slow mode and precharge */
- snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x80);
- snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x04, 0x04);
- snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x01, 0x01);
- usleep_range(1000, 1100);
- snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x00);
-}
-
-static void wcd9xxx_enable_bg_audio(struct wcd9xxx_resmgr *resmgr)
-{
- /* Notify bandgap mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_AUDIO_ON);
- wcd9xxx_enable_bg(resmgr);
- /* Notify bandgap mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_AUDIO_ON);
-}
-
-static void wcd9xxx_enable_bg_mbhc(struct wcd9xxx_resmgr *resmgr)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- /* Notify bandgap mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_MBHC_ON);
-
- /*
- * mclk should be off or clk buff source souldn't be VBG
- * Let's turn off mclk always
- */
- WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
-
- wcd9xxx_enable_bg(resmgr);
- /* Notify bandgap mode change */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_MBHC_ON);
-}
-
-static void wcd9xxx_disable_clock_block(struct wcd9xxx_resmgr *resmgr)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- pr_debug("%s: enter\n", __func__);
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
-
- /* Notify */
- if (resmgr->clk_type == WCD9XXX_CLK_RCO)
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_OFF);
- else
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_PRE_MCLK_OFF);
-
- switch (resmgr->codec_type) {
- case WCD9XXX_CDC_TYPE_TOMTOM:
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
- usleep_range(50, 55);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00);
- break;
- default:
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00);
- usleep_range(50, 55);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00);
- break;
- }
- usleep_range(50, 55);
- /* Notify */
- if (resmgr->clk_type == WCD9XXX_CLK_RCO) {
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_RCO_OFF);
- } else {
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_MCLK_OFF);
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-static void wcd9xxx_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr *resmgr,
- int clk_users)
-{
- /* Caller of this function should have acquired
- * BG_CLK lock
- */
- WCD9XXX_BG_CLK_UNLOCK(resmgr);
- if (clk_users) {
- if (resmgr->resmgr_cb &&
- resmgr->resmgr_cb->cdc_rco_ctrl) {
- while (clk_users--)
- resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec,
- true);
- }
- }
- /* Acquire BG_CLK lock before return */
- WCD9XXX_BG_CLK_LOCK(resmgr);
-}
-
-void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr)
-{
- int old_bg_audio_users, old_bg_mbhc_users;
- int old_clk_rco_users, old_clk_mclk_users;
-
- pr_debug("%s: enter\n", __func__);
-
- WCD9XXX_BG_CLK_LOCK(resmgr);
- old_bg_audio_users = resmgr->bg_audio_users;
- old_bg_mbhc_users = resmgr->bg_mbhc_users;
- old_clk_rco_users = resmgr->clk_rco_users;
- old_clk_mclk_users = resmgr->clk_mclk_users;
- resmgr->bg_audio_users = 0;
- resmgr->bg_mbhc_users = 0;
- resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
- resmgr->clk_rco_users = 0;
- resmgr->clk_mclk_users = 0;
- resmgr->clk_type = WCD9XXX_CLK_OFF;
-
- if (old_bg_audio_users) {
- while (old_bg_audio_users--)
- wcd9xxx_resmgr_get_bandgap(resmgr,
- WCD9XXX_BANDGAP_AUDIO_MODE);
- }
-
- if (old_bg_mbhc_users) {
- while (old_bg_mbhc_users--)
- wcd9xxx_resmgr_get_bandgap(resmgr,
- WCD9XXX_BANDGAP_MBHC_MODE);
- }
-
- if (old_clk_mclk_users) {
- while (old_clk_mclk_users--)
- wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_MCLK);
- }
-
- if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
- wcd9xxx_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users);
- } else if (old_clk_rco_users) {
- while (old_clk_rco_users--)
- wcd9xxx_resmgr_get_clk_block(resmgr,
- WCD9XXX_CLK_RCO);
- }
- WCD9XXX_BG_CLK_UNLOCK(resmgr);
- pr_debug("%s: leave\n", __func__);
-}
-
-/*
- * wcd9xxx_resmgr_get_bandgap : Vote for bandgap ref
- * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
- */
-void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
- const enum wcd9xxx_bandgap_type choice)
-{
- enum wcd9xxx_clock_type clock_save = WCD9XXX_CLK_OFF;
-
- pr_debug("%s: enter, wants %d\n", __func__, choice);
-
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
- switch (choice) {
- case WCD9XXX_BANDGAP_AUDIO_MODE:
- resmgr->bg_audio_users++;
- if (resmgr->bg_audio_users == 1 && resmgr->bg_mbhc_users) {
- /*
- * Current bg is MBHC mode, about to switch to
- * audio mode.
- */
- WARN_ON(resmgr->bandgap_type !=
- WCD9XXX_BANDGAP_MBHC_MODE);
-
- /* BG mode can be changed only with clock off */
- if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
- clock_save = wcd9xxx_save_clock(resmgr);
- /* Swtich BG mode */
- wcd9xxx_disable_bg(resmgr);
- wcd9xxx_enable_bg_audio(resmgr);
- /* restore clock */
- if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
- wcd9xxx_restore_clock(resmgr, clock_save);
- } else if (resmgr->bg_audio_users == 1) {
- /* currently off, just enable it */
- WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_OFF);
- wcd9xxx_enable_bg_audio(resmgr);
- }
- resmgr->bandgap_type = WCD9XXX_BANDGAP_AUDIO_MODE;
- break;
- case WCD9XXX_BANDGAP_MBHC_MODE:
- resmgr->bg_mbhc_users++;
- if (resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE ||
- resmgr->bandgap_type == WCD9XXX_BANDGAP_AUDIO_MODE)
- /* do nothing */
- break;
-
- /* bg mode can be changed only with clock off */
- clock_save = wcd9xxx_save_clock(resmgr);
- /* enable bg with MBHC mode */
- wcd9xxx_enable_bg_mbhc(resmgr);
- /* restore clock */
- wcd9xxx_restore_clock(resmgr, clock_save);
- /* save current mode */
- resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE;
- break;
- default:
- pr_err("%s: Error, Invalid bandgap settings\n", __func__);
- break;
- }
-
- pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
- resmgr->bg_audio_users, resmgr->bg_mbhc_users);
-}
-
-/*
- * wcd9xxx_resmgr_put_bandgap : Unvote bandgap ref that has been voted
- * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE
- */
-void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_bandgap_type choice)
-{
- enum wcd9xxx_clock_type clock_save;
-
- pr_debug("%s: enter choice %d\n", __func__, choice);
-
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
- switch (choice) {
- case WCD9XXX_BANDGAP_AUDIO_MODE:
- if (--resmgr->bg_audio_users == 0) {
- if (resmgr->bg_mbhc_users) {
- /* bg mode can be changed only with clock off */
- clock_save = wcd9xxx_save_clock(resmgr);
- /* switch to MBHC mode */
- wcd9xxx_enable_bg_mbhc(resmgr);
- /* restore clock */
- wcd9xxx_restore_clock(resmgr, clock_save);
- resmgr->bandgap_type =
- WCD9XXX_BANDGAP_MBHC_MODE;
- } else {
- /* turn off */
- wcd9xxx_disable_bg(resmgr);
- resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
- }
- }
- break;
- case WCD9XXX_BANDGAP_MBHC_MODE:
- WARN(resmgr->bandgap_type == WCD9XXX_BANDGAP_OFF,
- "Unexpected bandgap type %d\n", resmgr->bandgap_type);
- if (--resmgr->bg_mbhc_users == 0 &&
- resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE) {
- wcd9xxx_disable_bg(resmgr);
- resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
- }
- break;
- default:
- pr_err("%s: Error, Invalid bandgap settings\n", __func__);
- break;
- }
-
- pr_debug("%s: bg users audio %d, mbhc %d\n", __func__,
- resmgr->bg_audio_users, resmgr->bg_mbhc_users);
-}
-
-void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- if (enable) {
- resmgr->rx_bias_count++;
- if (resmgr->rx_bias_count == 1)
- snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
- 0x80, 0x80);
- } else {
- resmgr->rx_bias_count--;
- if (!resmgr->rx_bias_count)
- snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS,
- 0x80, 0x00);
- }
-}
-
-int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, int enable)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- pr_debug("%s: enable = %d\n", __func__, enable);
- if (enable) {
- snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0);
- /* bandgap mode to fast */
- if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ)
- /* Set current value to 200nA for 12.288MHz clock */
- snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x37);
- else
- snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17);
-
- usleep_range(5, 10);
- snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80);
- snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80);
- usleep_range(10, 20);
- snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0);
- usleep_range(10000, 10100);
-
- if (resmgr->pdata->mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ)
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x08);
- } else {
- snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0);
- snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0);
- }
-
- return 0;
-}
-
-static void wcd9xxx_enable_clock_block(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_config_mode config_mode)
-{
- struct snd_soc_codec *codec = resmgr->codec;
- unsigned long delay = WCD9XXX_RCO_CALIBRATION_DELAY_US;
- int num_retry = 0;
- unsigned int valr;
- unsigned int valr1;
- unsigned int valw[] = {0x01, 0x01, 0x10, 0x00};
-
- pr_debug("%s: config_mode = %d\n", __func__, config_mode);
-
- /* transit to RCO requires mclk off */
- if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM)
- WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2));
-
- if (config_mode == WCD9XXX_CFG_RCO) {
- /* Notify */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON);
- /* enable RCO and switch to it */
- wcd9xxx_resmgr_enable_config_mode(resmgr, 1);
- snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02);
- usleep_range(1000, 1100);
- } else if (config_mode == WCD9XXX_CFG_CAL_RCO) {
- snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL,
- 0x01, 0x01);
- /* 1ms sleep required after BG enabled */
- usleep_range(1000, 1100);
-
- if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) {
- /*
- * Set RCO clock rate as 12.288MHz rate explicitly
- * as the Qfuse values are incorrect for this rate
- */
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
- 0x50, 0x50);
- } else {
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
- 0x18, 0x10);
- valr = snd_soc_read(codec,
- TOMTOM_A_QFUSE_DATA_OUT0) & (0x04);
- valr1 = snd_soc_read(codec,
- TOMTOM_A_QFUSE_DATA_OUT1) & (0x08);
- valr = (valr >> 1) | (valr1 >> 3);
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x60,
- valw[valr] << 5);
- }
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x80);
-
- do {
- snd_soc_update_bits(codec,
- TOMTOM_A_RCO_CALIBRATION_CTRL1,
- 0x80, 0x80);
- snd_soc_update_bits(codec,
- TOMTOM_A_RCO_CALIBRATION_CTRL1,
- 0x80, 0x00);
- /* RCO calibration takes approx. 5ms */
- usleep_range(delay, delay +
- WCD9XXX_USLEEP_RANGE_MARGIN_US);
- if (!(snd_soc_read(codec,
- TOMTOM_A_RCO_CALIBRATION_RESULT1) & 0x10))
- break;
- if (num_retry >= 3) {
- delay = delay +
- WCD9XXX_RCO_CALIBRATION_DELAY_INC_US;
- }
- } while (num_retry++ < WCD9XXX_RCO_CALIBRATION_RETRY_COUNT);
- } else {
- /* Notify */
- wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON);
- /* switch to MCLK */
-
- switch (resmgr->codec_type) {
- case WCD9XXX_CDC_TYPE_TOMTOM:
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x00);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x40, 0x40);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x40, 0x00);
- /* clk source to ext clk and clk buff ref to VBG */
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x0C, 0x04);
- break;
- default:
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x00);
- /* if RCO is enabled, switch from it */
- if (snd_soc_read(codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) {
- snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2,
- 0x02);
- wcd9xxx_resmgr_enable_config_mode(resmgr, 0);
- }
- /* clk source to ext clk and clk buff ref to VBG */
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x0C, 0x04);
- break;
- }
- }
-
- if (config_mode != WCD9XXX_CFG_CAL_RCO) {
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1,
- 0x01, 0x01);
- /*
- * sleep required by codec hardware to
- * enable clock buffer
- */
- usleep_range(1000, 1200);
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
- 0x02, 0x00);
- /* on MCLK */
- snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL,
- 0x01, 0x01);
- }
- usleep_range(50, 55);
-
- /* Notify */
- if (config_mode == WCD9XXX_CFG_RCO)
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_RCO_ON);
- else if (config_mode == WCD9XXX_CFG_MCLK)
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_MCLK_ON);
-}
-
-/*
- * disable clock and return previous clock state
- */
-static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr)
-{
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
- if (resmgr->clk_type != WCD9XXX_CLK_OFF)
- wcd9xxx_disable_clock_block(resmgr);
- return resmgr->clk_type != WCD9XXX_CLK_OFF;
-}
-
-static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type)
-{
- if (type != WCD9XXX_CLK_OFF)
- wcd9xxx_enable_clock_block(resmgr, type == WCD9XXX_CLK_RCO);
-}
-
-void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- pr_debug("%s: current %d, requested %d, rco_users %d, mclk_users %d\n",
- __func__, resmgr->clk_type, type,
- resmgr->clk_rco_users, resmgr->clk_mclk_users);
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
- switch (type) {
- case WCD9XXX_CLK_RCO:
- if (++resmgr->clk_rco_users == 1 &&
- resmgr->clk_type == WCD9XXX_CLK_OFF) {
- /* enable RCO and switch to it */
- wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO);
- resmgr->clk_type = WCD9XXX_CLK_RCO;
- } else if (resmgr->clk_rco_users == 1 &&
- resmgr->clk_type == WCD9XXX_CLK_MCLK &&
- resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
- /*
- * Enable RCO but do not switch CLK MUX to RCO
- * unless ext_clk_users is 1, which indicates
- * EXT CLK is enabled for RCO calibration
- */
- wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO);
- if (resmgr->ext_clk_users == 1) {
- /* Notify */
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_PRE_RCO_ON);
- /* CLK MUX to RCO */
- if (resmgr->pdata->mclk_rate !=
- WCD9XXX_MCLK_CLK_12P288MHZ)
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x08);
- resmgr->clk_type = WCD9XXX_CLK_RCO;
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_RCO_ON);
- }
- }
- break;
- case WCD9XXX_CLK_MCLK:
- if (++resmgr->clk_mclk_users == 1 &&
- resmgr->clk_type == WCD9XXX_CLK_OFF) {
- /* switch to MCLK */
- wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK);
- resmgr->clk_type = WCD9XXX_CLK_MCLK;
- } else if (resmgr->clk_mclk_users == 1 &&
- resmgr->clk_type == WCD9XXX_CLK_RCO) {
- /* RCO to MCLK switch, with RCO still powered on */
- if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_PRE_MCLK_ON);
- snd_soc_update_bits(codec,
- WCD9XXX_A_BIAS_CENTRAL_BG_CTL,
- 0x40, 0x00);
- /* Enable clock buffer */
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x01, 0x01);
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x00);
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_MCLK_ON);
- } else {
- /* if RCO is enabled, switch from it */
- WARN_ON(!(snd_soc_read(resmgr->codec,
- WCD9XXX_A_RC_OSC_FREQ) & 0x80));
- /* disable clock block */
- wcd9xxx_disable_clock_block(resmgr);
- /* switch to MCLK */
- wcd9xxx_enable_clock_block(resmgr,
- WCD9XXX_CFG_MCLK);
- }
- resmgr->clk_type = WCD9XXX_CLK_MCLK;
- }
- break;
- default:
- pr_err("%s: Error, Invalid clock get request %d\n", __func__,
- type);
- break;
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type)
-{
- struct snd_soc_codec *codec = resmgr->codec;
-
- pr_debug("%s: current %d, put %d\n", __func__, resmgr->clk_type, type);
-
- WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr);
- switch (type) {
- case WCD9XXX_CLK_RCO:
- if (--resmgr->clk_rco_users == 0 &&
- resmgr->clk_type == WCD9XXX_CLK_RCO) {
- wcd9xxx_disable_clock_block(resmgr);
- if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
- /* Powerdown RCO */
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
- 0x80, 0x00);
- snd_soc_update_bits(codec,
- TOMTOM_A_BIAS_OSC_BG_CTL,
- 0x01, 0x00);
- } else {
- /* if RCO is enabled, switch from it */
- if (snd_soc_read(resmgr->codec,
- WCD9XXX_A_RC_OSC_FREQ)
- & 0x80) {
- snd_soc_write(resmgr->codec,
- WCD9XXX_A_CLK_BUFF_EN2,
- 0x02);
- wcd9xxx_resmgr_enable_config_mode(
- resmgr, 0);
- }
- }
- resmgr->clk_type = WCD9XXX_CLK_OFF;
- }
- break;
- case WCD9XXX_CLK_MCLK:
- if (--resmgr->clk_mclk_users == 0 &&
- resmgr->clk_rco_users == 0) {
- wcd9xxx_disable_clock_block(resmgr);
-
- if ((resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) &&
- (snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) {
- /* powerdown RCO*/
- snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL,
- 0x80, 0x00);
- snd_soc_update_bits(codec,
- TOMTOM_A_BIAS_OSC_BG_CTL,
- 0x01, 0x00);
- }
- resmgr->clk_type = WCD9XXX_CLK_OFF;
- } else if (resmgr->clk_mclk_users == 0 &&
- resmgr->clk_rco_users) {
- if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) {
- if (!(snd_soc_read(codec, TOMTOM_A_RCO_CTRL) &
- 0x80)) {
- dev_dbg(codec->dev, "%s: Enabling RCO\n",
- __func__);
- wcd9xxx_enable_clock_block(resmgr,
- WCD9XXX_CFG_CAL_RCO);
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x01, 0x00);
- } else {
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_PRE_MCLK_OFF);
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x08, 0x08);
- snd_soc_update_bits(codec,
- WCD9XXX_A_CLK_BUFF_EN1,
- 0x01, 0x00);
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_MCLK_OFF);
- /* CLK Mux changed to RCO, notify that
- * RCO is ON
- */
- wcd9xxx_resmgr_notifier_call(resmgr,
- WCD9XXX_EVENT_POST_RCO_ON);
- }
- } else {
- /* disable clock */
- wcd9xxx_disable_clock_block(resmgr);
- /* switch to RCO */
- wcd9xxx_enable_clock_block(resmgr,
- WCD9XXX_CFG_RCO);
- }
- resmgr->clk_type = WCD9XXX_CLK_RCO;
- }
- break;
- default:
- pr_err("%s: Error, Invalid clock get request %d\n", __func__,
- type);
- break;
- }
- WARN_ON(resmgr->clk_rco_users < 0);
- WARN_ON(resmgr->clk_mclk_users < 0);
-
- pr_debug("%s: new rco_users %d, mclk_users %d\n", __func__,
- resmgr->clk_rco_users, resmgr->clk_mclk_users);
-}
-
-/*
- * wcd9xxx_resmgr_get_clk_type()
- * Returns clk type that is currently enabled
- */
-int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr)
-{
- return resmgr->clk_type;
-}
-
-static void wcd9xxx_resmgr_update_cfilt_usage(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_cfilt_sel cfilt_sel,
- bool inc)
-{
- u16 micb_cfilt_reg;
- enum wcd9xxx_notify_event e_pre_on, e_post_off;
- struct snd_soc_codec *codec = resmgr->codec;
-
- switch (cfilt_sel) {
- case WCD9XXX_CFILT1_SEL:
- micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_1_CTL;
- e_pre_on = WCD9XXX_EVENT_PRE_CFILT_1_ON;
- e_post_off = WCD9XXX_EVENT_POST_CFILT_1_OFF;
- break;
- case WCD9XXX_CFILT2_SEL:
- micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_2_CTL;
- e_pre_on = WCD9XXX_EVENT_PRE_CFILT_2_ON;
- e_post_off = WCD9XXX_EVENT_POST_CFILT_2_OFF;
- break;
- case WCD9XXX_CFILT3_SEL:
- micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_3_CTL;
- e_pre_on = WCD9XXX_EVENT_PRE_CFILT_3_ON;
- e_post_off = WCD9XXX_EVENT_POST_CFILT_3_OFF;
- break;
- default:
- WARN(1, "Invalid CFILT selection %d\n", cfilt_sel);
- return; /* should not happen */
- }
-
- if (inc) {
- if ((resmgr->cfilt_users[cfilt_sel]++) == 0) {
- /* Notify */
- wcd9xxx_resmgr_notifier_call(resmgr, e_pre_on);
- /* Enable CFILT */
- snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0x80);
- }
- } else {
- /*
- * Check if count not zero, decrease
- * then check if zero, go ahead disable cfilter
- */
- WARN(resmgr->cfilt_users[cfilt_sel] == 0,
- "Invalid CFILT use count 0\n");
- if ((--resmgr->cfilt_users[cfilt_sel]) == 0) {
- /* Disable CFILT */
- snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0);
- /* Notify MBHC so MBHC can switch CFILT to fast mode */
- wcd9xxx_resmgr_notifier_call(resmgr, e_post_off);
- }
- }
-}
-
-void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_cfilt_sel cfilt_sel)
-{
- return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, true);
-}
-
-void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_cfilt_sel cfilt_sel)
-{
- return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, false);
-}
-
-int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
- unsigned int cfilt_mv)
-{
- int rc = -EINVAL;
- unsigned int ldoh_v = resmgr->micbias_pdata->ldoh_v;
- unsigned int min_mv, max_mv;
-
- switch (ldoh_v) {
- case WCD9XXX_LDOH_1P95_V:
- min_mv = 160;
- max_mv = 1800;
- break;
- case WCD9XXX_LDOH_2P35_V:
- min_mv = 200;
- max_mv = 2200;
- break;
- case WCD9XXX_LDOH_2P75_V:
- min_mv = 240;
- max_mv = 2600;
- break;
- case WCD9XXX_LDOH_3P0_V:
- min_mv = 260;
- max_mv = 2875;
- break;
- default:
- goto done;
- }
-
- if (cfilt_mv < min_mv || cfilt_mv > max_mv)
- goto done;
-
- for (rc = 4; rc <= 44; rc++) {
- min_mv = max_mv * (rc) / 44;
- if (min_mv >= cfilt_mv) {
- rc -= 4;
- break;
- }
- }
-done:
- return rc;
-}
-
-static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond)
-{
- struct list_head *l;
- struct wcd9xxx_resmgr_cond_entry *e;
- bool set;
-
- pr_debug("%s: enter\n", __func__);
- /* update bit if cond isn't available or cond is set */
- set = !test_bit(cond, &resmgr->cond_avail_flags) ||
- !!test_bit(cond, &resmgr->cond_flags);
- list_for_each(l, &resmgr->update_bit_cond_h) {
- e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
- if (e->cond == cond)
- snd_soc_update_bits(resmgr->codec, e->reg,
- 1 << e->shift,
- (set ? !e->invert : e->invert)
- << e->shift);
- }
- pr_debug("%s: leave\n", __func__);
-}
-
-/*
- * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are
- * available and notified.
- * condbits : contains bitmask of enum wcd9xxx_resmgr_cond
- */
-void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
- unsigned long condbits)
-{
- unsigned int cond;
-
- for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
- mutex_lock(&resmgr->update_bit_cond_lock);
- WARN(test_bit(cond, &resmgr->cond_avail_flags),
- "Condition 0x%0x is already registered\n", cond);
- set_bit(cond, &resmgr->cond_avail_flags);
- wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
- mutex_unlock(&resmgr->update_bit_cond_lock);
- pr_debug("%s: Condition 0x%x is registered\n", __func__, cond);
- }
-}
-
-void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
- unsigned long condbits)
-{
- unsigned int cond;
-
- for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) {
- mutex_lock(&resmgr->update_bit_cond_lock);
- WARN(!test_bit(cond, &resmgr->cond_avail_flags),
- "Condition 0x%0x isn't registered\n", cond);
- clear_bit(cond, &resmgr->cond_avail_flags);
- wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
- mutex_unlock(&resmgr->update_bit_cond_lock);
- pr_debug("%s: Condition 0x%x is deregistered\n", __func__,
- cond);
- }
-}
-
-void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond, bool set)
-{
- mutex_lock(&resmgr->update_bit_cond_lock);
- if ((set && !test_and_set_bit(cond, &resmgr->cond_flags)) ||
- (!set && test_and_clear_bit(cond, &resmgr->cond_flags))) {
- pr_debug("%s: Resource %d condition changed to %s\n", __func__,
- cond, set ? "set" : "clear");
- wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
- }
- mutex_unlock(&resmgr->update_bit_cond_lock);
-}
-
-int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond,
- unsigned short reg, int shift,
- bool invert)
-{
- struct wcd9xxx_resmgr_cond_entry *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->cond = cond;
- entry->reg = reg;
- entry->shift = shift;
- entry->invert = invert;
-
- mutex_lock(&resmgr->update_bit_cond_lock);
- list_add_tail(&entry->list, &resmgr->update_bit_cond_h);
-
- wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond);
- mutex_unlock(&resmgr->update_bit_cond_lock);
-
- return 0;
-}
-
-/*
- * wcd9xxx_resmgr_rm_cond_update_bits :
- * Clear bit and remove from the conditional bit update list
- */
-int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond,
- unsigned short reg, int shift,
- bool invert)
-{
- struct list_head *l, *next;
- struct wcd9xxx_resmgr_cond_entry *e = NULL;
-
- pr_debug("%s: enter\n", __func__);
- mutex_lock(&resmgr->update_bit_cond_lock);
- list_for_each_safe(l, next, &resmgr->update_bit_cond_h) {
- e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list);
- if (e->reg == reg && e->shift == shift && e->invert == invert) {
- snd_soc_update_bits(resmgr->codec, e->reg,
- 1 << e->shift,
- e->invert << e->shift);
- list_del(&e->list);
- mutex_unlock(&resmgr->update_bit_cond_lock);
- kfree(e);
- return 0;
- }
- }
- mutex_unlock(&resmgr->update_bit_cond_lock);
- pr_err("%s: Cannot find update bit entry reg 0x%x, shift %d\n",
- __func__, e ? e->reg : 0, e ? e->shift : 0);
-
- return -EINVAL;
-}
-
-int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
- struct notifier_block *nblock)
-{
- return blocking_notifier_chain_register(&resmgr->notifier, nblock);
-}
-
-int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
- struct notifier_block *nblock)
-{
- return blocking_notifier_chain_unregister(&resmgr->notifier, nblock);
-}
-
-int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec,
- struct wcd9xxx_core_resource *core_res,
- struct wcd9xxx_pdata *pdata,
- struct wcd9xxx_micbias_setting *micbias_pdata,
- struct wcd9xxx_reg_address *reg_addr,
- const struct wcd9xxx_resmgr_cb *resmgr_cb,
- enum wcd9xxx_cdc_type cdc_type)
-{
- WARN(ARRAY_SIZE(wcd9xxx_event_string) != WCD9XXX_EVENT_LAST + 1,
- "Event string table isn't up to date!, %zd != %d\n",
- ARRAY_SIZE(wcd9xxx_event_string), WCD9XXX_EVENT_LAST + 1);
-
- resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF;
- resmgr->codec = codec;
- resmgr->codec_type = cdc_type;
- /* This gives access of core handle to lock/unlock suspend */
- resmgr->core_res = core_res;
- resmgr->pdata = pdata;
- resmgr->micbias_pdata = micbias_pdata;
- resmgr->reg_addr = reg_addr;
- resmgr->resmgr_cb = resmgr_cb;
-
- INIT_LIST_HEAD(&resmgr->update_bit_cond_h);
-
- BLOCKING_INIT_NOTIFIER_HEAD(&resmgr->notifier);
-
- mutex_init(&resmgr->codec_resource_lock);
- mutex_init(&resmgr->codec_bg_clk_lock);
- mutex_init(&resmgr->update_bit_cond_lock);
-
- return 0;
-}
-
-void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr)
-{
- mutex_destroy(&resmgr->update_bit_cond_lock);
- mutex_destroy(&resmgr->codec_bg_clk_lock);
- mutex_destroy(&resmgr->codec_resource_lock);
-}
-
-void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr)
-{
- mutex_lock(&resmgr->codec_resource_lock);
-}
-
-void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr)
-{
- mutex_unlock(&resmgr->codec_resource_lock);
-}
-
-MODULE_DESCRIPTION("wcd9xxx resmgr module");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd9xxx-resmgr.h b/sound/soc/codecs/wcd9xxx-resmgr.h
deleted file mode 100644
index e35d616..0000000
--- a/sound/soc/codecs/wcd9xxx-resmgr.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __WCD9XXX_COMMON_H__
-#define __WCD9XXX_COMMON_H__
-
-#include <linux/notifier.h>
-#include <linux/mfd/wcd9xxx/core.h>
-#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
-
-enum wcd9xxx_bandgap_type {
- WCD9XXX_BANDGAP_OFF,
- WCD9XXX_BANDGAP_AUDIO_MODE,
- WCD9XXX_BANDGAP_MBHC_MODE,
-};
-
-enum wcd9xxx_cdc_type {
- WCD9XXX_CDC_TYPE_INVALID = 0,
- WCD9XXX_CDC_TYPE_TAIKO,
- WCD9XXX_CDC_TYPE_TAPAN,
- WCD9XXX_CDC_TYPE_HELICON,
- WCD9XXX_CDC_TYPE_TOMTOM,
-};
-
-enum wcd9xxx_clock_type {
- WCD9XXX_CLK_OFF,
- WCD9XXX_CLK_RCO,
- WCD9XXX_CLK_MCLK,
-};
-
-enum wcd9xxx_clock_config_mode {
- WCD9XXX_CFG_MCLK = 0,
- WCD9XXX_CFG_RCO,
- WCD9XXX_CFG_CAL_RCO,
-};
-
-enum wcd9xxx_cfilt_sel {
- WCD9XXX_CFILT1_SEL,
- WCD9XXX_CFILT2_SEL,
- WCD9XXX_CFILT3_SEL,
- WCD9XXX_NUM_OF_CFILT,
-};
-
-struct wcd9xxx_reg_address {
- u16 micb_4_ctl;
- u16 micb_4_int_rbias;
- u16 micb_4_mbhc;
-};
-
-enum wcd9xxx_notify_event {
- WCD9XXX_EVENT_INVALID,
-
- WCD9XXX_EVENT_PRE_RCO_ON,
- WCD9XXX_EVENT_POST_RCO_ON,
- WCD9XXX_EVENT_PRE_RCO_OFF,
- WCD9XXX_EVENT_POST_RCO_OFF,
-
- WCD9XXX_EVENT_PRE_MCLK_ON,
- WCD9XXX_EVENT_POST_MCLK_ON,
- WCD9XXX_EVENT_PRE_MCLK_OFF,
- WCD9XXX_EVENT_POST_MCLK_OFF,
-
- WCD9XXX_EVENT_PRE_BG_OFF,
- WCD9XXX_EVENT_POST_BG_OFF,
- WCD9XXX_EVENT_PRE_BG_AUDIO_ON,
- WCD9XXX_EVENT_POST_BG_AUDIO_ON,
- WCD9XXX_EVENT_PRE_BG_MBHC_ON,
- WCD9XXX_EVENT_POST_BG_MBHC_ON,
-
- WCD9XXX_EVENT_PRE_MICBIAS_1_OFF,
- WCD9XXX_EVENT_POST_MICBIAS_1_OFF,
- WCD9XXX_EVENT_PRE_MICBIAS_2_OFF,
- WCD9XXX_EVENT_POST_MICBIAS_2_OFF,
- WCD9XXX_EVENT_PRE_MICBIAS_3_OFF,
- WCD9XXX_EVENT_POST_MICBIAS_3_OFF,
- WCD9XXX_EVENT_PRE_MICBIAS_4_OFF,
- WCD9XXX_EVENT_POST_MICBIAS_4_OFF,
- WCD9XXX_EVENT_PRE_MICBIAS_1_ON,
- WCD9XXX_EVENT_POST_MICBIAS_1_ON,
- WCD9XXX_EVENT_PRE_MICBIAS_2_ON,
- WCD9XXX_EVENT_POST_MICBIAS_2_ON,
- WCD9XXX_EVENT_PRE_MICBIAS_3_ON,
- WCD9XXX_EVENT_POST_MICBIAS_3_ON,
- WCD9XXX_EVENT_PRE_MICBIAS_4_ON,
- WCD9XXX_EVENT_POST_MICBIAS_4_ON,
-
- WCD9XXX_EVENT_PRE_CFILT_1_OFF,
- WCD9XXX_EVENT_POST_CFILT_1_OFF,
- WCD9XXX_EVENT_PRE_CFILT_2_OFF,
- WCD9XXX_EVENT_POST_CFILT_2_OFF,
- WCD9XXX_EVENT_PRE_CFILT_3_OFF,
- WCD9XXX_EVENT_POST_CFILT_3_OFF,
- WCD9XXX_EVENT_PRE_CFILT_1_ON,
- WCD9XXX_EVENT_POST_CFILT_1_ON,
- WCD9XXX_EVENT_PRE_CFILT_2_ON,
- WCD9XXX_EVENT_POST_CFILT_2_ON,
- WCD9XXX_EVENT_PRE_CFILT_3_ON,
- WCD9XXX_EVENT_POST_CFILT_3_ON,
-
- WCD9XXX_EVENT_PRE_HPHL_PA_ON,
- WCD9XXX_EVENT_POST_HPHL_PA_OFF,
- WCD9XXX_EVENT_PRE_HPHR_PA_ON,
- WCD9XXX_EVENT_POST_HPHR_PA_OFF,
-
- WCD9XXX_EVENT_POST_RESUME,
-
- WCD9XXX_EVENT_PRE_TX_3_ON,
- WCD9XXX_EVENT_POST_TX_3_OFF,
-
- WCD9XXX_EVENT_LAST,
-};
-
-struct wcd9xxx_resmgr_cb {
- int (*cdc_rco_ctrl)(struct snd_soc_codec *, bool);
-};
-
-struct wcd9xxx_resmgr {
- struct snd_soc_codec *codec;
- struct wcd9xxx_core_resource *core_res;
-
- u32 rx_bias_count;
-
- /*
- * bandgap_type, bg_audio_users and bg_mbhc_users have to be
- * referred/manipulated after acquiring codec_bg_clk_lock mutex
- */
- enum wcd9xxx_bandgap_type bandgap_type;
- u16 bg_audio_users;
- u16 bg_mbhc_users;
-
- /*
- * clk_type, clk_rco_users and clk_mclk_users have to be
- * referred/manipulated after acquiring codec_bg_clk_lock mutex
- */
- enum wcd9xxx_clock_type clk_type;
- u16 clk_rco_users;
- u16 clk_mclk_users;
- u16 ext_clk_users;
-
- /* cfilt users per cfilts */
- u16 cfilt_users[WCD9XXX_NUM_OF_CFILT];
-
- struct wcd9xxx_reg_address *reg_addr;
-
- struct wcd9xxx_pdata *pdata;
-
- struct wcd9xxx_micbias_setting *micbias_pdata;
-
- struct blocking_notifier_head notifier;
- /* Notifier needs mbhc pointer with resmgr */
- struct wcd9xxx_mbhc *mbhc;
-
- unsigned long cond_flags;
- unsigned long cond_avail_flags;
- struct list_head update_bit_cond_h;
- struct mutex update_bit_cond_lock;
-
- /*
- * Currently, only used for mbhc purpose, to protect
- * concurrent execution of mbhc threaded irq handlers and
- * kill race between DAPM and MBHC. But can serve as a
- * general lock to protect codec resource
- */
- struct mutex codec_resource_lock;
- struct mutex codec_bg_clk_lock;
-
- enum wcd9xxx_cdc_type codec_type;
-
- const struct wcd9xxx_resmgr_cb *resmgr_cb;
-};
-
-int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr,
- struct snd_soc_codec *codec,
- struct wcd9xxx_core_resource *core_res,
- struct wcd9xxx_pdata *pdata,
- struct wcd9xxx_micbias_setting *micbias_pdata,
- struct wcd9xxx_reg_address *reg_addr,
- const struct wcd9xxx_resmgr_cb *resmgr_cb,
- enum wcd9xxx_cdc_type cdc_type);
-void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr);
-
-int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr,
- int enable);
-
-void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable);
-void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type);
-void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_clock_type type);
-void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr,
- const enum wcd9xxx_bandgap_type choice);
-void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_bandgap_type choice);
-void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_cfilt_sel cfilt_sel);
-void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_cfilt_sel cfilt_sel);
-int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr);
-
-void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr);
-void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr);
-#define WCD9XXX_BCL_LOCK(resmgr) \
-{ \
- pr_debug("%s: Acquiring BCL\n", __func__); \
- wcd9xxx_resmgr_bcl_lock(resmgr); \
- pr_debug("%s: Acquiring BCL done\n", __func__); \
-}
-
-void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr);
-#define WCD9XXX_BCL_UNLOCK(resmgr) \
-{ \
- pr_debug("%s: Release BCL\n", __func__); \
- wcd9xxx_resmgr_bcl_unlock(resmgr); \
-}
-
-#define WCD9XXX_BCL_ASSERT_LOCKED(resmgr) \
-{ \
- WARN_ONCE(!mutex_is_locked(&resmgr->codec_resource_lock), \
- "%s: BCL should have acquired\n", __func__); \
-}
-
-#define WCD9XXX_BG_CLK_LOCK(resmgr) \
-{ \
- struct wcd9xxx_resmgr *__resmgr = resmgr; \
- pr_debug("%s: Acquiring BG_CLK\n", __func__); \
- mutex_lock(&__resmgr->codec_bg_clk_lock); \
- pr_debug("%s: Acquiring BG_CLK done\n", __func__); \
-}
-
-#define WCD9XXX_BG_CLK_UNLOCK(resmgr) \
-{ \
- struct wcd9xxx_resmgr *__resmgr = resmgr; \
- pr_debug("%s: Releasing BG_CLK\n", __func__); \
- mutex_unlock(&__resmgr->codec_bg_clk_lock); \
-}
-
-#define WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr) \
-{ \
- WARN_ONCE(!mutex_is_locked(&resmgr->codec_bg_clk_lock), \
- "%s: BG_CLK lock should have acquired\n", __func__); \
-}
-
-const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type);
-int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr,
- unsigned int cfilt_mv);
-int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr,
- struct notifier_block *nblock);
-int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr,
- struct notifier_block *nblock);
-void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr,
- const enum wcd9xxx_notify_event e);
-
-enum wcd9xxx_resmgr_cond {
- WCD9XXX_COND_HPH = 0x01, /* Headphone */
- WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */
-};
-void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr,
- unsigned long condbits);
-void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr,
- unsigned long condbits);
-int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond,
- unsigned short reg, int shift,
- bool invert);
-int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond,
- unsigned short reg, int shift,
- bool invert);
-void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr,
- enum wcd9xxx_resmgr_cond cond, bool set);
-
-#endif /* __WCD9XXX_COMMON_H__ */
diff --git a/sound/soc/codecs/wcd9xxx-soc-init.c b/sound/soc/codecs/wcd9xxx-soc-init.c
new file mode 100644
index 0000000..fa8abb7
--- /dev/null
+++ b/sound/soc/codecs/wcd9xxx-soc-init.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/wcd-dsp-mgr.h>
+#include "audio-ext-clk-up.h"
+
+static int __init wcd9xxx_soc_init(void)
+{
+ int ret = 0;
+
+ ret = wcd_dsp_mgr_init();
+ if (!ret) {
+ ret = audio_ref_clk_platform_init();
+ if (ret) {
+ pr_err("%s: init extclk fail: %d\n", __func__, ret);
+ wcd_dsp_mgr_exit();
+ }
+ } else {
+ pr_err("%s: init dsp mgr fail: %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+module_init(wcd9xxx_soc_init);
+
+static void __exit wcd9xxx_soc_exit(void)
+{
+ audio_ref_clk_platform_exit();
+ wcd_dsp_mgr_exit();
+}
+module_exit(wcd9xxx_soc_exit);
+
+MODULE_DESCRIPTION("WCD9XXX CODEC soc init driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 153cc2e..f2a20d51 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -648,6 +648,7 @@
done:
return core;
}
+EXPORT_SYMBOL(wcd_cpe_get_core_handle);
/*
* svass_engine_irq: threaded interrupt handler for svass engine irq
diff --git a/sound/soc/codecs/wcd_cpe_services.c b/sound/soc/codecs/wcd_cpe_services.c
index 0028ebc..ad8962b 100644
--- a/sound/soc/codecs/wcd_cpe_services.c
+++ b/sound/soc/codecs/wcd_cpe_services.c
@@ -20,7 +20,6 @@
#include <linux/mfd/wcd9xxx/core.h>
#include <sound/cpe_cmi.h>
#include <sound/soc.h>
-#include <linux/mfd/wcd9xxx/wcd9330_registers.h>
#include <linux/mfd/wcd9335/registers.h>
#include "wcd_cpe_services.h"
#include "wcd_cmi_api.h"
@@ -46,9 +45,6 @@
#define LISTEN_CTL_SPE_VAL 0x0
#define LISTEN_CTL_MSM_VAL 0x1
-#define TOMTOM_A_SVASS_SPE_INBOX(N) (TOMTOM_A_SVASS_SPE_INBOX_0 + (N))
-#define TOMTOM_A_SVASS_SPE_OUTBOX(N) (TOMTOM_A_SVASS_SPE_OUTBOX_0 + (N))
-
#define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000
#define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000
#define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000
@@ -316,8 +312,7 @@
{
int ret = 0;
- if (reg != TOMTOM_A_SVASS_MEM_BANK &&
- reg != WCD9335_CPE_SS_MEM_BANK_0)
+ if (reg != WCD9335_CPE_SS_MEM_BANK_0)
pr_debug("%s: reg = 0x%x, value = 0x%x\n",
__func__, reg, val);
@@ -2149,73 +2144,27 @@
static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
-
- if (!debug_mode)
- rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
- 0x3F, 0x31);
- else
- pr_info("%s: CPE in debug mode, WDOG disabled\n",
- __func__);
-
- rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
- 0x02, 0x00);
- rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
- 0x0C, 0x04);
- rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
- 0x01, 0x01);
-
- return rc;
+ return CPE_SVC_SUCCESS;
}
static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
{
- u8 status = 0;
-
- cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
- return status & 0x01;
+ return 0;
}
static u32 cpe_tgt_tomtom_is_active(void)
{
- u8 status = 0;
-
- cpe_register_read(TOMTOM_A_SVASS_STATUS, &status);
- return status & 0x04;
+ return 0;
}
static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
-
- rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG,
- 0x30, 0x00);
-
- rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG,
- 0x01, 0x00);
- rc = cpe_update_bits(TOMTOM_A_MEM_LEAKAGE_CTL,
- 0x07, 0x03);
- rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
- 0x08, 0x08);
- rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL,
- 0x02, 0x02);
- return rc;
+ return CPE_SVC_SUCCESS;
}
enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u8 val = 0;
-
- if (enable)
- val = 0x02;
- else
- val = 0x00;
- rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
- 0x02, val);
- val = 0;
- cpe_register_read(TOMTOM_A_SVASS_CFG, &val);
- return rc;
+ return CPE_SVC_SUCCESS;
}
enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
@@ -2235,251 +2184,37 @@
static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
size_t size)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u32 cnt = 0;
-
- if (size >= TOMTOM_A_SVASS_SPE_OUTBOX_SIZE)
- size = TOMTOM_A_SVASS_SPE_OUTBOX_SIZE - 1;
- for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
- rc = cpe_register_read(TOMTOM_A_SVASS_SPE_OUTBOX(cnt),
- &(buffer[cnt]));
- }
- return rc;
+ return CPE_SVC_SUCCESS;
}
static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
size_t size)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u32 cnt = 0;
-
- if (size >= TOMTOM_A_SVASS_SPE_INBOX_SIZE)
- size = TOMTOM_A_SVASS_SPE_INBOX_SIZE - 1;
- for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
- rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX(cnt),
- buffer[cnt]);
- }
-
- if (rc == CPE_SVC_SUCCESS)
- rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX_TRG, 1);
-
- return rc;
-}
-
-static enum cpe_svc_result cpe_get_mem_addr(struct cpe_info *t_info,
- const struct cpe_svc_mem_segment *mem_seg,
- u32 *addr, u8 *mem)
-{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u32 offset, mem_sz, address;
- u8 mem_type;
-
- switch (mem_seg->type) {
-
- case CPE_SVC_DATA_MEM:
- mem_type = MEM_ACCESS_DRAM_VAL;
- offset = TOMTOM_A_SVASS_SPE_DRAM_OFFSET;
- mem_sz = TOMTOM_A_SVASS_SPE_DRAM_SIZE;
- break;
-
- case CPE_SVC_INSTRUCTION_MEM:
- mem_type = MEM_ACCESS_IRAM_VAL;
- offset = TOMTOM_A_SVASS_SPE_IRAM_OFFSET;
- mem_sz = TOMTOM_A_SVASS_SPE_IRAM_SIZE;
- break;
-
- default:
- pr_err("%s: Invalid mem type = %u\n",
- __func__, mem_seg->type);
- return CPE_SVC_INVALID_HANDLE;
- }
-
- if (mem_seg->cpe_addr < offset) {
- pr_err("%s: Invalid addr %x for mem type %u\n",
- __func__, mem_seg->cpe_addr, mem_type);
- return CPE_SVC_INVALID_HANDLE;
- }
-
- address = mem_seg->cpe_addr - offset;
- if (address + mem_seg->size > mem_sz) {
- pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
- __func__, mem_seg->size, address, mem_type);
- return CPE_SVC_INVALID_HANDLE;
- }
-
- (*addr) = address;
- (*mem) = mem_type;
-
- return rc;
+ return CPE_SVC_SUCCESS;
}
static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
struct cpe_svc_mem_segment *mem_seg)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u8 mem_reg_val = 0;
- u32 cnt = 0;
- bool autoinc;
- u8 mem = MEM_ACCESS_NONE_VAL;
- u32 addr = 0;
- u32 ptr_update = true;
-
- if (!mem_seg) {
- pr_err("%s: Invalid mem segment\n",
- __func__);
- return CPE_SVC_INVALID_HANDLE;
- }
-
- rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
-
- if (rc != CPE_SVC_SUCCESS) {
- pr_err("%s: Cannot obtain address, mem_type %u\n",
- __func__, mem_seg->type);
- return rc;
- }
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
- autoinc = cpe_register_read_autoinc_supported();
- if (autoinc)
- mem_reg_val |= 0x04;
-
- mem_reg_val |= 0x08;
- mem_reg_val |= mem;
-
- do {
- if (!autoinc || ptr_update) {
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
- (addr & 0xFF));
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
- ((addr >> 8) & 0xFF));
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
- ((addr >> 16) & 0xFF));
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL,
- mem_reg_val);
-
- ptr_update = false;
- }
- rc = cpe_register_read(TOMTOM_A_SVASS_MEM_BANK,
- &mem_seg->data[cnt]);
-
- if (!autoinc)
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
- } while (++cnt < mem_seg->size);
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
-
- return rc;
+ return CPE_SVC_SUCCESS;
}
static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
const struct cpe_svc_mem_segment *mem_seg)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u8 mem_reg_val = 0;
- u8 mem = MEM_ACCESS_NONE_VAL;
- u32 addr = 0;
- u8 *temp_ptr = NULL;
- u32 temp_size = 0;
- bool autoinc;
-
- if (!mem_seg) {
- pr_err("%s: Invalid mem segment\n",
- __func__);
- return CPE_SVC_INVALID_HANDLE;
- }
-
- rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem);
-
- if (rc != CPE_SVC_SUCCESS) {
- pr_err("%s: Cannot obtain address, mem_type %u\n",
- __func__, mem_seg->type);
- return rc;
- }
-
- autoinc = cpe_register_read_autoinc_supported();
- if (autoinc)
- mem_reg_val |= 0x04;
- mem_reg_val |= mem;
-
- rc = cpe_update_bits(TOMTOM_A_SVASS_MEM_CTL,
- 0x0F, mem_reg_val);
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0,
- (addr & 0xFF));
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1,
- ((addr >> 8) & 0xFF));
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2,
- ((addr >> 16) & 0xFF));
-
- temp_size = 0;
- temp_ptr = mem_seg->data;
-
- while (temp_size <= mem_seg->size) {
- u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
- ? CHUNK_SIZE : (mem_seg->size-temp_size);
-
- if (t_info->state == CPE_STATE_OFFLINE) {
- pr_err("%s: CPE is offline\n", __func__);
- return CPE_SVC_FAILED;
- }
-
- cpe_register_write_repeat(TOMTOM_A_SVASS_MEM_BANK,
- temp_ptr, to_write);
- temp_size += CHUNK_SIZE;
- temp_ptr += CHUNK_SIZE;
- }
-
- rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0);
- return rc;
+ return CPE_SVC_SUCCESS;
}
static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
enum cpe_svc_module module,
enum cpe_svc_route_dest dest)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u8 ctl_reg_val = 0;
-
- switch (module) {
- case CPE_SVC_LISTEN_PROC:
- switch (dest) {
- case CPE_SVC_EXTERNAL:
- ctl_reg_val = LISTEN_CTL_MSM_VAL;
- break;
- case CPE_SVC_INTERNAL:
- ctl_reg_val = LISTEN_CTL_SPE_VAL;
- break;
- default:
- pr_err("%s: Invalid dest %d\n",
- __func__, dest);
- return CPE_SVC_FAILED;
- }
-
- rc = cpe_update_bits(TOMTOM_A_SVASS_CFG,
- 0x01, ctl_reg_val);
- break;
- default:
- pr_err("%s: Invalid module %d\n",
- __func__, module);
- rc = CPE_SVC_FAILED;
- break;
- }
-
- return rc;
+ return CPE_SVC_SUCCESS;
}
static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
{
- enum cpe_svc_result rc = CPE_SVC_SUCCESS;
- u8 dbg_reg_val = 0x00;
-
- if (enable)
- dbg_reg_val = 0x08;
- rc = cpe_update_bits(TOMTOM_A_SVASS_DEBUG,
- 0x08, dbg_reg_val);
- return rc;
+ return CPE_SVC_SUCCESS;
}
static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
diff --git a/sound/soc/codecs/wsa881x-analog.c b/sound/soc/codecs/wsa881x-analog.c
deleted file mode 100644
index 4de9624..0000000
--- a/sound/soc/codecs/wsa881x-analog.c
+++ /dev/null
@@ -1,1446 +0,0 @@
-/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/printk.h>
-#include <linux/bitops.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_runtime.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-#include <sound/q6afe-v2.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/regmap.h>
-#include "wsa881x-analog.h"
-#include "wsa881x-temp-sensor.h"
-#include "../msm/msm-audio-pinctrl.h"
-
-#define SPK_GAIN_12DB 4
-#define WIDGET_NAME_MAX_SIZE 80
-
-/*
- * Private data Structure for wsa881x. All parameters related to
- * WSA881X codec needs to be defined here.
- */
-struct wsa881x_pdata {
- struct regmap *regmap[2];
- struct i2c_client *client[2];
- struct snd_soc_codec *codec;
-
- /* track wsa881x status during probe */
- int status;
- bool boost_enable;
- bool visense_enable;
- int spk_pa_gain;
- struct i2c_msg xfer_msg[2];
- struct mutex xfer_lock;
- bool regmap_flag;
- bool wsa_active;
- int index;
- int (*enable_mclk)(struct snd_soc_card *, bool);
- struct wsa881x_tz_priv tz_pdata;
- int bg_cnt;
- int clk_cnt;
- int enable_cnt;
- int version;
- struct mutex bg_lock;
- struct mutex res_lock;
- struct delayed_work ocp_ctl_work;
-};
-
-enum {
- WSA881X_STATUS_PROBING,
- WSA881X_STATUS_I2C,
-};
-
-#define WSA881X_OCP_CTL_TIMER_SEC 2
-#define WSA881X_OCP_CTL_TEMP_CELSIUS 25
-#define WSA881X_OCP_CTL_POLL_TIMER_SEC 60
-
-static int wsa881x_ocp_poll_timer_sec = WSA881X_OCP_CTL_POLL_TIMER_SEC;
-module_param(wsa881x_ocp_poll_timer_sec, int, 0664);
-MODULE_PARM_DESC(wsa881x_ocp_poll_timer_sec, "timer for ocp ctl polling");
-
-static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
- bool enable);
-
-const char *wsa_tz_names[] = {"wsa881x.0e", "wsa881x.0f"};
-
-struct wsa881x_pdata wsa_pdata[MAX_WSA881X_DEVICE];
-
-static bool pinctrl_init;
-
-static int wsa881x_populate_dt_pdata(struct device *dev);
-static int wsa881x_reset(struct wsa881x_pdata *pdata, bool enable);
-static int wsa881x_startup(struct wsa881x_pdata *pdata);
-static int wsa881x_shutdown(struct wsa881x_pdata *pdata);
-
-static int delay_array_msec[] = {10, 20, 30, 40, 50};
-
-static int wsa881x_i2c_addr = -1;
-static int wsa881x_probing_count;
-static int wsa881x_presence_count;
-
-static const char * const wsa881x_spk_pa_gain_text[] = {
-"POS_13P5_DB", "POS_12_DB", "POS_10P5_DB", "POS_9_DB", "POS_7P5_DB",
-"POS_6_DB", "POS_4P5_DB", "POS_3_DB", "POS_1P5_DB", "POS_0_DB"};
-
-static const struct soc_enum wsa881x_spk_pa_gain_enum[] = {
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(wsa881x_spk_pa_gain_text),
- wsa881x_spk_pa_gain_text),
-};
-
-static int wsa881x_spk_pa_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->spk_pa_gain;
-
- dev_dbg(codec->dev, "%s: spk_pa_gain = %ld\n", __func__,
- ucontrol->value.integer.value[0]);
-
- return 0;
-}
-
-static int wsa881x_spk_pa_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (ucontrol->value.integer.value[0] < 0 ||
- ucontrol->value.integer.value[0] > 0xC) {
- dev_err(codec->dev, "%s: Unsupported gain val %ld\n",
- __func__, ucontrol->value.integer.value[0]);
- return -EINVAL;
- }
- wsa881x->spk_pa_gain = ucontrol->value.integer.value[0];
- dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
- __func__, ucontrol->value.integer.value[0]);
-
- return 0;
-}
-
-static int get_i2c_wsa881x_device_index(u16 reg)
-{
- u16 mask = 0x0f00;
- int value = 0;
-
- value = ((reg & mask) >> 8) & 0x000f;
-
- switch (value) {
- case 0:
- return 0;
- case 1:
- return 1;
- default:
- break;
- }
- return -EINVAL;
-}
-
-static int wsa881x_i2c_write_device(struct wsa881x_pdata *wsa881x,
- unsigned int reg, unsigned int val)
-{
- int i = 0, rc = 0;
- int wsa881x_index;
- struct i2c_msg *msg;
- int ret = 0;
- int bytes = 1;
- u8 reg_addr = 0;
- u8 data[bytes + 1];
-
- wsa881x_index = get_i2c_wsa881x_device_index(reg);
- if (wsa881x_index < 0) {
- pr_err("%s:invalid register to write\n", __func__);
- return -EINVAL;
- }
- if (wsa881x->regmap_flag) {
- rc = regmap_write(wsa881x->regmap[wsa881x_index], reg, val);
- for (i = 0; rc && i < ARRAY_SIZE(delay_array_msec); i++) {
- pr_err("Failed writing reg=%u - retry(%d)\n", reg, i);
- /* retry after delay of increasing order */
- msleep(delay_array_msec[i]);
- rc = regmap_write(wsa881x->regmap[wsa881x_index],
- reg, val);
- }
- if (rc)
- pr_err("Failed writing reg=%u rc=%d\n", reg, rc);
- else
- pr_err("write success register = %x val = %x\n",
- reg, val);
- } else {
- reg_addr = (u8)reg;
- msg = &wsa881x->xfer_msg[0];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = bytes + 1;
- msg->flags = 0;
- data[0] = reg;
- data[1] = (u8)val;
- msg->buf = data;
- ret = i2c_transfer(wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 1);
- /* Try again if the write fails */
- if (ret != 1) {
- ret = i2c_transfer(
- wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 1);
- if (ret != 1) {
- pr_err("failed to write the device\n");
- return ret;
- }
- }
- pr_debug("write success reg = %x val = %x\n", reg, data[1]);
- }
- return rc;
-}
-
-static int wsa881x_i2c_read_device(struct wsa881x_pdata *wsa881x,
- unsigned int reg)
-{
- int wsa881x_index;
- int i = 0, rc = 0;
- unsigned int val;
- struct i2c_msg *msg;
- int ret = 0;
- u8 reg_addr = 0;
- u8 dest[5];
-
- wsa881x_index = get_i2c_wsa881x_device_index(reg);
- if (wsa881x_index < 0) {
- pr_err("%s:invalid register to read\n", __func__);
- return -EINVAL;
- }
- if (wsa881x->regmap_flag) {
- rc = regmap_read(wsa881x->regmap[wsa881x_index], reg, &val);
- for (i = 0; rc && i < ARRAY_SIZE(delay_array_msec); i++) {
- pr_err("Failed reading reg=%u - retry(%d)\n", reg, i);
- /* retry after delay of increasing order */
- msleep(delay_array_msec[i]);
- rc = regmap_read(wsa881x->regmap[wsa881x_index],
- reg, &val);
- }
- if (rc) {
- pr_err("Failed reading reg=%u rc=%d\n", reg, rc);
- return rc;
- }
- pr_debug("read success reg = %x val = %x\n",
- reg, val);
- } else {
- reg_addr = (u8)reg;
- msg = &wsa881x->xfer_msg[0];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = 1;
- msg->flags = 0;
- msg->buf = ®_addr;
-
- msg = &wsa881x->xfer_msg[1];
- msg->addr = wsa881x->client[wsa881x_index]->addr;
- msg->len = 1;
- msg->flags = I2C_M_RD;
- msg->buf = dest;
- ret = i2c_transfer(wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 2);
-
- /* Try again if read fails first time */
- if (ret != 2) {
- ret = i2c_transfer(
- wsa881x->client[wsa881x_index]->adapter,
- wsa881x->xfer_msg, 2);
- if (ret != 2) {
- pr_err("failed to read wsa register:%d\n",
- reg);
- return ret;
- }
- }
- val = dest[0];
- }
- return val;
-}
-
-static unsigned int wsa881x_i2c_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct wsa881x_pdata *wsa881x;
- unsigned int val;
- int ret;
-
- if (codec == NULL) {
- pr_err("%s: invalid codec\n", __func__);
- return -EINVAL;
- }
- wsa881x = snd_soc_codec_get_drvdata(codec);
- if (!wsa881x->wsa_active) {
- ret = snd_soc_cache_read(codec, reg, &val);
- if (ret >= 0)
- return val;
- dev_err(codec->dev,
- "cache read failed for reg: 0x%x ret: %d\n",
- reg, ret);
- return ret;
- }
- return wsa881x_i2c_read_device(wsa881x, reg);
-}
-
-static int wsa881x_i2c_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- struct wsa881x_pdata *wsa881x;
- int ret = 0;
-
- if (codec == NULL) {
- pr_err("%s: invalid codec\n", __func__);
- return -EINVAL;
- }
- wsa881x = snd_soc_codec_get_drvdata(codec);
- if (!wsa881x->wsa_active) {
- ret = snd_soc_cache_write(codec, reg, val);
- if (ret != 0)
- dev_err(codec->dev, "cache write to %x failed: %d\n",
- reg, ret);
- return ret;
- }
- return wsa881x_i2c_write_device(wsa881x, reg, val);
-}
-
-static int wsa881x_i2c_get_client_index(struct i2c_client *client,
- int *wsa881x_index)
-{
- int ret = 0;
-
- switch (client->addr) {
- case WSA881X_I2C_SPK0_SLAVE0_ADDR:
- case WSA881X_I2C_SPK0_SLAVE1_ADDR:
- *wsa881x_index = WSA881X_I2C_SPK0_SLAVE0;
- break;
- case WSA881X_I2C_SPK1_SLAVE0_ADDR:
- case WSA881X_I2C_SPK1_SLAVE1_ADDR:
- *wsa881x_index = WSA881X_I2C_SPK1_SLAVE0;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int wsa881x_boost_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x01, 0x01);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_BOOST_PS_CTL,
- 0x40, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT1,
- 0xF0, 0xB0);
- snd_soc_update_bits(codec, WSA881X_BOOST_ZX_CTL,
- 0x20, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL,
- 0x80, 0x80);
- } else {
- snd_soc_update_bits(codec, WSA881X_BOOST_LOOP_STABILITY,
- 0x03, 0x03);
- snd_soc_update_bits(codec, WSA881X_BOOST_MISC2_CTL,
- 0xFF, 0x14);
- snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL,
- 0x80, 0x80);
- snd_soc_update_bits(codec, WSA881X_BOOST_START_CTL,
- 0x03, 0x00);
- snd_soc_update_bits(codec,
- WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
- 0x0C, 0x04);
- snd_soc_update_bits(codec,
- WSA881X_BOOST_SLOPE_COMP_ISENSE_FB,
- 0x03, 0x00);
- snd_soc_update_bits(codec, WSA881X_BOOST_PRESET_OUT1,
- 0xF0, 0x70);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x03, 0x01);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_BOOST_CURRENT_LIMIT,
- 0x0F, 0x08);
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL,
- 0x80, 0x80);
- }
- /* For WSA8810, start-up time is 1500us as per qcrg sequence */
- usleep_range(1500, 1510);
- } else {
- /* ENSURE: Class-D amp is shutdown. CLK is still on */
- snd_soc_update_bits(codec, WSA881X_BOOST_EN_CTL, 0x80, 0x00);
- /* boost settle time is 1500us as per qcrg sequence */
- usleep_range(1500, 1510);
- }
- return 0;
-}
-
-static int wsa881x_visense_txfe_ctrl(struct snd_soc_codec *codec, bool enable,
- u8 isense1_gain, u8 isense2_gain,
- u8 vsense_gain)
-{
- u8 value = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
-
- if (enable) {
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_OTP_REG_28,
- 0x3F, 0x3A);
- snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG1,
- 0xFF, 0xB2);
- snd_soc_update_bits(codec, WSA881X_BONGO_RESRV_REG2,
- 0xFF, 0x05);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_VSENSE_VCM,
- 0x08, 0x00);
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x1C, 0x04);
- } else {
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_ATEST2,
- 0x02, 0x02);
- }
- value = ((isense2_gain << 6) | (isense1_gain << 4) |
- (vsense_gain << 3));
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0xF8, value);
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0x01, 0x01);
- } else {
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec,
- WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x10, 0x10);
- else
- snd_soc_update_bits(codec,
- WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x08, 0x08);
- /*
- * 200us sleep is needed after visense txfe disable as per
- * HW requirement.
- */
- usleep_range(200, 210);
-
- snd_soc_update_bits(codec, WSA881X_SPKR_PROT_FE_GAIN,
- 0x01, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_visense_adc_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_ADC_SEL_IBIAS,
- 0x70, 0x40);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_SEL_IBIAS,
- 0x07, 0x04);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V, 0x80, 0x80);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_I, 0x80, 0x80);
- } else {
- /* Ensure: Speaker Protection has been stopped */
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V, 0x80, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_I, 0x80, 0x00);
- }
-
- return 0;
-}
-
-static void wsa881x_bandgap_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- dev_dbg(codec->dev, "%s: enable:%d, bg_count:%d\n", __func__,
- enable, wsa881x->bg_cnt);
- mutex_lock(&wsa881x->bg_lock);
- if (enable) {
- ++wsa881x->bg_cnt;
- if (wsa881x->bg_cnt == 1) {
- snd_soc_update_bits(codec, WSA881X_TEMP_OP,
- 0x08, 0x08);
- /* 400usec sleep is needed as per HW requirement */
- usleep_range(400, 410);
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x04, 0x04);
- }
- } else {
- --wsa881x->bg_cnt;
- if (wsa881x->bg_cnt <= 0) {
- WARN_ON(wsa881x->bg_cnt < 0);
- wsa881x->bg_cnt = 0;
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x04, 0x00);
- snd_soc_update_bits(codec, WSA881X_TEMP_OP, 0x08, 0x00);
- }
- }
- mutex_unlock(&wsa881x->bg_lock);
-}
-
-static void wsa881x_clk_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- dev_dbg(codec->dev, "%s:ss enable:%d, clk_count:%d\n", __func__,
- enable, wsa881x->clk_cnt);
- mutex_lock(&wsa881x->res_lock);
- if (enable) {
- ++wsa881x->clk_cnt;
- if (wsa881x->clk_cnt == 1) {
- snd_soc_write(codec, WSA881X_CDC_RST_CTL, 0x02);
- snd_soc_write(codec, WSA881X_CDC_RST_CTL, 0x03);
- snd_soc_write(codec, WSA881X_CLOCK_CONFIG, 0x01);
- snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x01);
- snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x01);
- }
- } else {
- --wsa881x->clk_cnt;
- if (wsa881x->clk_cnt <= 0) {
- WARN_ON(wsa881x->clk_cnt < 0);
- wsa881x->clk_cnt = 0;
- snd_soc_write(codec, WSA881X_CDC_ANA_CLK_CTL, 0x00);
- snd_soc_write(codec, WSA881X_CDC_DIG_CLK_CTL, 0x00);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec,
- WSA881X_CDC_TOP_CLK_CTL, 0x01, 0x00);
- }
- }
- mutex_unlock(&wsa881x->res_lock);
-}
-
-static int wsa881x_rdac_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- snd_soc_update_bits(codec, WSA881X_ANA_CTL, 0x08, 0x00);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0x08, 0x08);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x20, 0x20);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x20, 0x00);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x40, 0x40);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x80, 0x80);
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x01);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL,
- 0x30, 0x30);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL,
- 0x0C, 0x00);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0xF0, 0x40);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0x01, 0x01);
- } else {
- /* Ensure class-D amp is off */
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x80, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_spkr_pa_ctrl(struct snd_soc_codec *codec, bool enable)
-{
- int ret = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: enable:%d\n", __func__, enable);
- if (enable) {
- /*
- * Ensure: Boost is enabled and stable, Analog input is up
- * and outputting silence
- */
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_I,
- 0xFF, 0x01);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V,
- 0x02, 0x02);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_V,
- 0xFF, 0x10);
- snd_soc_update_bits(codec, WSA881X_SPKR_PWRSTG_DBG,
- 0xA0, 0xA0);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x80, 0x80);
- usleep_range(700, 710);
- snd_soc_update_bits(codec, WSA881X_SPKR_PWRSTG_DBG,
- 0x00, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_V,
- 0xFF, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_MODU_V,
- 0x02, 0x00);
- snd_soc_update_bits(codec, WSA881X_ADC_EN_DET_TEST_I,
- 0xFF, 0x00);
- } else
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN,
- 0x80, 0x80);
- /* add 1000us delay as per qcrg */
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x01, 0x01);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x00);
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_GAIN, 0xF0,
- (wsa881x->spk_pa_gain << 4));
- if (wsa881x->visense_enable) {
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1,
- "wsa_vi");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_vi");
- return ret;
- }
- wsa881x_visense_txfe_ctrl(codec, true,
- 0x00, 0x01, 0x00);
- wsa881x_visense_adc_ctrl(codec, true);
- }
- } else {
- /*
- * Ensure: Boost is still on, Stream from Analog input and
- * Speaker Protection has been stopped and input is at 0V
- */
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x01);
- usleep_range(1000, 1010);
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_CAL,
- 0x01, 0x00);
- msleep(20);
- snd_soc_update_bits(codec, WSA881X_ANA_CTL,
- 0x03, 0x00);
- usleep_range(200, 210);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x80, 0x00);
- }
- return 0;
-}
-
-static int wsa881x_get_boost(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
-
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->boost_enable;
- return 0;
-}
-
-static int wsa881x_set_boost(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int value = ucontrol->value.integer.value[0];
-
- dev_dbg(codec->dev, "%s: Boost enable current %d, new %d\n",
- __func__, wsa881x->boost_enable, value);
- wsa881x->boost_enable = value;
- return 0;
-}
-
-static int wsa881x_get_visense(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
-
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- ucontrol->value.integer.value[0] = wsa881x->visense_enable;
- return 0;
-}
-
-static int wsa881x_set_visense(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int value = ucontrol->value.integer.value[0];
-
- dev_dbg(codec->dev, "%s: VIsense enable current %d, new %d\n",
- __func__, wsa881x->visense_enable, value);
- wsa881x->visense_enable = value;
- return 0;
-}
-
-static const struct snd_kcontrol_new wsa881x_snd_controls[] = {
- SOC_SINGLE_EXT("BOOST Switch", SND_SOC_NOPM, 0, 1, 0,
- wsa881x_get_boost, wsa881x_set_boost),
-
- SOC_SINGLE_EXT("VISENSE Switch", SND_SOC_NOPM, 0, 1, 0,
- wsa881x_get_visense, wsa881x_set_visense),
-
- SOC_ENUM_EXT("WSA_SPK PA Gain", wsa881x_spk_pa_gain_enum[0],
- wsa881x_spk_pa_gain_get, wsa881x_spk_pa_gain_put),
-};
-
-static const char * const rdac_text[] = {
- "ZERO", "Switch",
-};
-
-static const struct soc_enum rdac_enum =
- SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(rdac_text), rdac_text);
-
-static const struct snd_kcontrol_new rdac_mux[] = {
- SOC_DAPM_ENUM("RDAC", rdac_enum)
-};
-
-static int wsa881x_rdac_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- dev_dbg(codec->dev, "%s: %s %d boost %d visense %d\n",
- __func__, w->name, event,
- wsa881x->boost_enable, wsa881x->visense_enable);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- ret = wsa881x_startup(wsa881x);
- if (ret) {
- pr_err("%s: wsa startup failed ret: %d", __func__, ret);
- return ret;
- }
- wsa881x_clk_ctrl(codec, true);
- snd_soc_update_bits(codec, WSA881X_SPKR_DAC_CTL, 0x02, 0x02);
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_BIAS_REF_CTRL,
- 0x0F, 0x08);
- wsa881x_bandgap_ctrl(codec, true);
- if (!WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_BBM_CTL,
- 0x02, 0x02);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0xC0, 0x80);
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL1, 0x06, 0x06);
- if (!WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_SPKR_MISC_CTL2,
- 0x04, 0x04);
- snd_soc_update_bits(codec, WSA881X_SPKR_BIAS_INT,
- 0x09, 0x09);
- }
- snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT, 0xF0, 0x20);
- if (WSA881X_IS_2_0(wsa881x->version))
- snd_soc_update_bits(codec, WSA881X_SPKR_PA_INT,
- 0x0E, 0x0E);
- if (wsa881x->boost_enable)
- wsa881x_boost_ctrl(codec, true);
- break;
- case SND_SOC_DAPM_POST_PMU:
- wsa881x_rdac_ctrl(codec, true);
- break;
- case SND_SOC_DAPM_PRE_PMD:
- wsa881x_rdac_ctrl(codec, false);
- if (wsa881x->visense_enable) {
- wsa881x_visense_adc_ctrl(codec, false);
- wsa881x_visense_txfe_ctrl(codec, false,
- 0x00, 0x01, 0x00);
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1,
- "wsa_vi");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_vi");
- return ret;
- }
- }
- break;
- case SND_SOC_DAPM_POST_PMD:
- if (wsa881x->boost_enable)
- wsa881x_boost_ctrl(codec, false);
- wsa881x_clk_ctrl(codec, false);
- wsa881x_bandgap_ctrl(codec, false);
- ret = wsa881x_shutdown(wsa881x);
- if (ret < 0) {
- pr_err("%s: wsa shutdown failed ret: %d",
- __func__, ret);
- return ret;
- }
- break;
- default:
- pr_err("%s: invalid event:%d\n", __func__, event);
- return -EINVAL;
- }
- return 0;
-}
-
-static void wsa881x_ocp_ctl_work(struct work_struct *work)
-{
- struct wsa881x_pdata *wsa881x;
- struct delayed_work *dwork;
- struct snd_soc_codec *codec;
- unsigned long temp_val;
-
- dwork = to_delayed_work(work);
- wsa881x = container_of(dwork, struct wsa881x_pdata, ocp_ctl_work);
-
- if (!wsa881x)
- return;
-
- codec = wsa881x->codec;
- wsa881x_get_temp(wsa881x->tz_pdata.tz_dev, &temp_val);
- dev_dbg(codec->dev, " temp = %ld\n", temp_val);
-
- if (temp_val <= WSA881X_OCP_CTL_TEMP_CELSIUS)
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x00);
- else
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
-
- schedule_delayed_work(&wsa881x->ocp_ctl_work,
- msecs_to_jiffies(wsa881x_ocp_poll_timer_sec * 1000));
-}
-
-static int wsa881x_spkr_pa_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- pr_debug("%s: %s %d\n", __func__, w->name, event);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0x80);
- break;
- case SND_SOC_DAPM_POST_PMU:
- wsa881x_spkr_pa_ctrl(codec, true);
- schedule_delayed_work(&wsa881x->ocp_ctl_work,
- msecs_to_jiffies(WSA881X_OCP_CTL_TIMER_SEC * 1000));
- break;
- case SND_SOC_DAPM_PRE_PMD:
- wsa881x_spkr_pa_ctrl(codec, false);
- break;
- case SND_SOC_DAPM_POST_PMD:
- cancel_delayed_work_sync(&wsa881x->ocp_ctl_work);
- snd_soc_update_bits(codec, WSA881X_SPKR_OCP_CTL, 0xC0, 0xC0);
- break;
- default:
- pr_err("%s: invalid event:%d\n", __func__, event);
- return -EINVAL;
- }
- return 0;
-}
-
-
-static const struct snd_soc_dapm_widget wsa881x_dapm_widgets[] = {
- SND_SOC_DAPM_INPUT("WSA_IN"),
-
- SND_SOC_DAPM_DAC_E("RDAC Analog", NULL, SND_SOC_NOPM, 0, 0,
- wsa881x_rdac_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_MUX("WSA_RDAC", SND_SOC_NOPM, 0, 0,
- rdac_mux),
-
- SND_SOC_DAPM_PGA_S("WSA_SPKR PGA", 1, SND_SOC_NOPM, 0, 0,
- wsa881x_spkr_pa_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
- SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_OUTPUT("WSA_SPKR"),
-};
-
-static const struct snd_soc_dapm_route wsa881x_audio_map[] = {
- {"WSA_RDAC", "Switch", "WSA_IN"},
- {"RDAC Analog", NULL, "WSA_RDAC"},
- {"WSA_SPKR PGA", NULL, "RDAC Analog"},
- {"WSA_SPKR", NULL, "WSA_SPKR PGA"},
-};
-
-
-static int wsa881x_startup(struct wsa881x_pdata *pdata)
-{
- int ret = 0;
- struct snd_soc_codec *codec = pdata->codec;
- struct snd_soc_card *card = codec->component.card;
-
- pr_debug("%s(): wsa startup, enable_cnt:%d\n", __func__,
- pdata->enable_cnt);
-
- if (pdata->enable_cnt++ > 0)
- return 0;
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_clk");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_clk");
- return ret;
- }
- if (pdata->enable_mclk) {
- ret = pdata->enable_mclk(card, true);
- if (ret < 0) {
- dev_err_ratelimited(codec->dev,
- "%s: mclk enable failed %d\n",
- __func__, ret);
- return ret;
- }
- }
- ret = wsa881x_reset(pdata, true);
- return ret;
-}
-
-static int wsa881x_shutdown(struct wsa881x_pdata *pdata)
-{
- int ret = 0, reg;
- struct snd_soc_codec *codec = pdata->codec;
- struct snd_soc_card *card = codec->component.card;
-
- pr_debug("%s(): wsa shutdown, enable_cnt:%d\n", __func__,
- pdata->enable_cnt);
- if (--pdata->enable_cnt > 0)
- return 0;
- ret = wsa881x_reset(pdata, false);
- if (ret) {
- pr_err("%s: wsa reset failed suspend %d\n",
- __func__, ret);
- return ret;
- }
-
- if (pdata->enable_mclk) {
- ret = pdata->enable_mclk(card, false);
- if (ret < 0) {
- pr_err("%s: mclk disable failed %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_clk");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_clk");
- return ret;
- }
- if (pdata->codec) {
- /* restore defaults to cache */
- for (reg = 0; reg < ARRAY_SIZE(wsa881x_ana_reg_defaults);
- reg++) {
- if (wsa881x_ana_reg_readable[reg])
- snd_soc_cache_write(pdata->codec,
- wsa881x_ana_reg_defaults[reg].reg,
- wsa881x_ana_reg_defaults[reg].def);
- }
- }
- return 0;
-}
-
-static int32_t wsa881x_resource_acquire(struct snd_soc_codec *codec,
- bool enable)
-{
- int ret = 0;
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (enable) {
- ret = wsa881x_startup(wsa881x);
- if (ret < 0) {
- dev_err_ratelimited(codec->dev,
- "%s: failed to startup\n", __func__);
- return ret;
- }
- }
- wsa881x_clk_ctrl(codec, enable);
- wsa881x_bandgap_ctrl(codec, enable);
- if (!enable) {
- ret = wsa881x_shutdown(wsa881x);
- if (ret < 0)
- dev_err_ratelimited(codec->dev,
- "%s: failed to shutdown\n", __func__);
- }
- return ret;
-}
-
-static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
- struct wsa_temp_register *wsa_temp_reg)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
-
- if (!wsa881x) {
- dev_err(codec->dev, "%s: wsa881x is NULL\n", __func__);
- return -EINVAL;
- }
- ret = wsa881x_resource_acquire(codec, true);
- if (ret) {
- dev_err_ratelimited(codec->dev,
- "%s: resource acquire fail\n", __func__);
- return ret;
- }
-
- if (WSA881X_IS_2_0(wsa881x->version)) {
- snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x00);
- wsa_temp_reg->dmeas_msb = snd_soc_read(codec, WSA881X_TEMP_MSB);
- wsa_temp_reg->dmeas_lsb = snd_soc_read(codec, WSA881X_TEMP_LSB);
- snd_soc_update_bits(codec, WSA881X_TADC_VALUE_CTL, 0x01, 0x01);
- } else {
- wsa_temp_reg->dmeas_msb = snd_soc_read(codec,
- WSA881X_TEMP_DOUT_MSB);
- wsa_temp_reg->dmeas_lsb = snd_soc_read(codec,
- WSA881X_TEMP_DOUT_LSB);
- }
- wsa_temp_reg->d1_msb = snd_soc_read(codec, WSA881X_OTP_REG_1);
- wsa_temp_reg->d1_lsb = snd_soc_read(codec, WSA881X_OTP_REG_2);
- wsa_temp_reg->d2_msb = snd_soc_read(codec, WSA881X_OTP_REG_3);
- wsa_temp_reg->d2_lsb = snd_soc_read(codec, WSA881X_OTP_REG_4);
-
- ret = wsa881x_resource_acquire(codec, false);
- if (ret)
- dev_err_ratelimited(codec->dev,
- "%s: resource release fail\n", __func__);
-
- return ret;
-}
-
-static int wsa881x_probe(struct snd_soc_codec *codec)
-{
- struct i2c_client *client;
- int ret = 0;
- int wsa881x_index = 0;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- char *widget_name = NULL;
- struct snd_soc_card *card = codec->component.card;
- struct snd_soc_codec_conf *codec_conf = card->codec_conf;
-
- client = dev_get_drvdata(codec->dev);
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
- mutex_init(&wsa_pdata[wsa881x_index].bg_lock);
- mutex_init(&wsa_pdata[wsa881x_index].res_lock);
- snprintf(wsa_pdata[wsa881x_index].tz_pdata.name, 100, "%s",
- wsa_tz_names[wsa881x_index]);
- wsa_pdata[wsa881x_index].codec = codec;
- wsa_pdata[wsa881x_index].spk_pa_gain = SPK_GAIN_12DB;
- wsa_pdata[wsa881x_index].codec = codec;
- wsa_pdata[wsa881x_index].tz_pdata.codec = codec;
- wsa_pdata[wsa881x_index].tz_pdata.wsa_temp_reg_read =
- wsa881x_temp_reg_read;
- snd_soc_codec_set_drvdata(codec, &wsa_pdata[wsa881x_index]);
- wsa881x_init_thermal(&wsa_pdata[wsa881x_index].tz_pdata);
- INIT_DELAYED_WORK(&wsa_pdata[wsa881x_index].ocp_ctl_work,
- wsa881x_ocp_ctl_work);
-
- if (codec_conf->name_prefix) {
- widget_name = kcalloc(WIDGET_NAME_MAX_SIZE, sizeof(char),
- GFP_KERNEL);
- if (!widget_name)
- return -ENOMEM;
-
- snprintf(widget_name, WIDGET_NAME_MAX_SIZE,
- "%s WSA_SPKR", codec_conf->name_prefix);
- snd_soc_dapm_ignore_suspend(dapm, widget_name);
- snprintf(widget_name, WIDGET_NAME_MAX_SIZE,
- "%s WSA_IN", codec_conf->name_prefix);
- snd_soc_dapm_ignore_suspend(dapm, widget_name);
- kfree(widget_name);
- } else {
- snd_soc_dapm_ignore_suspend(dapm, "WSA_SPKR");
- snd_soc_dapm_ignore_suspend(dapm, "WSA_IN");
- }
-
- snd_soc_dapm_sync(dapm);
- return 0;
-}
-
-static int wsa881x_remove(struct snd_soc_codec *codec)
-{
- struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
-
- if (wsa881x->tz_pdata.tz_dev)
- wsa881x_deinit_thermal(wsa881x->tz_pdata.tz_dev);
-
- mutex_destroy(&wsa881x->bg_lock);
- mutex_destroy(&wsa881x->res_lock);
- return 0;
-}
-
-static struct snd_soc_codec_driver soc_codec_dev_wsa881x = {
- .probe = wsa881x_probe,
- .remove = wsa881x_remove,
-
- .read = wsa881x_i2c_read,
- .write = wsa881x_i2c_write,
-
- .reg_cache_size = WSA881X_CACHE_SIZE,
- .reg_cache_default = wsa881x_ana_reg_defaults,
- .reg_word_size = 1,
-
- .component_driver = {
- .controls = wsa881x_snd_controls,
- .num_controls = ARRAY_SIZE(wsa881x_snd_controls),
- .dapm_widgets = wsa881x_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wsa881x_dapm_widgets),
- .dapm_routes = wsa881x_audio_map,
- .num_dapm_routes = ARRAY_SIZE(wsa881x_audio_map),
- },
-};
-
-static int wsa881x_reset(struct wsa881x_pdata *pdata, bool enable)
-{
- int ret = 0;
-
- /*
- * shutdown the GPIOs WSA_EN, WSA_MCLK, regulators
- * and restore defaults in soc cache when shutdown.
- * Enable regulators, GPIOs WSA_MCLK, WSA_EN when powerup.
- */
- if (enable) {
- if (pdata->wsa_active)
- return 0;
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended(powerup) %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- ret = msm_gpioset_activate(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- pdata->wsa_active = true;
- } else {
- if (!pdata->wsa_active)
- return 0;
- ret = msm_gpioset_suspend(CLIENT_WSA_BONGO_1, "wsa_reset");
- if (ret) {
- pr_err("%s: gpio set cannot be suspended %s\n",
- __func__, "wsa_reset");
- return ret;
- }
- pdata->wsa_active = false;
- }
- return ret;
-}
-
-int wsa881x_get_client_index(void)
-{
- return wsa881x_i2c_addr;
-}
-EXPORT_SYMBOL(wsa881x_get_client_index);
-
-int wsa881x_get_probing_count(void)
-{
- return wsa881x_probing_count;
-}
-EXPORT_SYMBOL(wsa881x_get_probing_count);
-
-int wsa881x_get_presence_count(void)
-{
- return wsa881x_presence_count;
-}
-EXPORT_SYMBOL(wsa881x_get_presence_count);
-
-int wsa881x_set_mclk_callback(
- int (*enable_mclk_callback)(struct snd_soc_card *, bool))
-{
- int i;
-
- for (i = 0; i < MAX_WSA881X_DEVICE; i++) {
- if (wsa_pdata[i].status == WSA881X_STATUS_I2C)
- wsa_pdata[i].enable_mclk = enable_mclk_callback;
- }
- return 0;
-}
-EXPORT_SYMBOL(wsa881x_set_mclk_callback);
-
-static int check_wsa881x_presence(struct i2c_client *client)
-{
- int ret = 0;
- int wsa881x_index = 0;
-
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
- ret = wsa881x_i2c_read_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL);
- if (ret < 0) {
- dev_err(&client->dev, "failed to read wsa881x with addr %x\n",
- client->addr);
- return ret;
- }
- ret = wsa881x_i2c_write_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL, 0x01);
- if (ret < 0) {
- dev_err(&client->dev, "failed write addr %x reg:0x5 val:0x1\n",
- client->addr);
- return ret;
- }
- /* allow 20ms before trigger next write to verify WSA881x presence */
- msleep(20);
- ret = wsa881x_i2c_write_device(&wsa_pdata[wsa881x_index],
- WSA881X_CDC_RST_CTL, 0x00);
- if (ret < 0) {
- dev_err(&client->dev, "failed write addr %x reg:0x5 val:0x0\n",
- client->addr);
- return ret;
- }
- return ret;
-}
-
-static int wsa881x_populate_dt_pdata(struct device *dev)
-{
- int ret = 0;
-
- /* reading the gpio configurations from dtsi file */
- if (!pinctrl_init) {
- ret = msm_gpioset_initialize(CLIENT_WSA_BONGO_1, dev);
- if (ret < 0) {
- dev_err(dev,
- "%s: error reading dtsi files%d\n", __func__, ret);
- goto err;
- }
- pinctrl_init = true;
- }
-err:
- return ret;
-}
-
-static int wsa881x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret = 0;
- int wsa881x_index = 0;
- struct wsa881x_pdata *pdata = NULL;
-
- ret = wsa881x_i2c_get_client_index(client, &wsa881x_index);
- if (ret != 0) {
- dev_err(&client->dev, "%s: I2C get codec I2C\n"
- "client failed\n", __func__);
- return ret;
- }
-
- pdata = &wsa_pdata[wsa881x_index];
-
- if ((client->addr == WSA881X_I2C_SPK0_SLAVE1_ADDR ||
- client->addr == WSA881X_I2C_SPK1_SLAVE1_ADDR) &&
- (pdata->status == WSA881X_STATUS_PROBING))
- return ret;
-
- if (pdata->status == WSA881X_STATUS_I2C) {
- dev_dbg(&client->dev, "%s:probe for other slaves\n"
- "devices of codec I2C slave Addr = %x\n",
- __func__, client->addr);
-
- dev_dbg(&client->dev, "%s:wsa_idx = %d SLAVE = %d\n",
- __func__, wsa881x_index, WSA881X_ANALOG_SLAVE);
- pdata->regmap[WSA881X_ANALOG_SLAVE] =
- devm_regmap_init_i2c(
- client,
- &wsa881x_ana_regmap_config[WSA881X_ANALOG_SLAVE]);
- regcache_cache_bypass(pdata->regmap[WSA881X_ANALOG_SLAVE],
- true);
- if (IS_ERR(pdata->regmap[WSA881X_ANALOG_SLAVE])) {
- ret = PTR_ERR(pdata->regmap[WSA881X_ANALOG_SLAVE]);
- dev_err(&client->dev,
- "%s: regmap_init failed %d\n",
- __func__, ret);
- }
- client->dev.platform_data = pdata;
- i2c_set_clientdata(client, pdata);
- pdata->client[WSA881X_ANALOG_SLAVE] = client;
- if (pdata->version == WSA881X_2_0)
- wsa881x_update_regmap_2_0(
- pdata->regmap[WSA881X_ANALOG_SLAVE],
- WSA881X_ANALOG_SLAVE);
-
- return ret;
- } else if (pdata->status == WSA881X_STATUS_PROBING) {
- pdata->index = wsa881x_index;
- if (client->dev.of_node) {
- dev_dbg(&client->dev, "%s:Platform data\n"
- "from device tree\n", __func__);
- ret = wsa881x_populate_dt_pdata(&client->dev);
- if (ret < 0) {
- dev_err(&client->dev,
- "%s: Fail to obtain pdata from device tree\n",
- __func__);
- ret = -EINVAL;
- goto err;
- }
- client->dev.platform_data = pdata;
- } else {
- dev_dbg(&client->dev, "%s:Platform data from\n"
- "board file\n", __func__);
- pdata = client->dev.platform_data;
- }
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data?\n");
- ret = -EINVAL;
- goto err;
- }
- i2c_set_clientdata(client, pdata);
- dev_set_drvdata(&client->dev, client);
-
- pdata->regmap[WSA881X_DIGITAL_SLAVE] =
- devm_regmap_init_i2c(
- client,
- &wsa881x_ana_regmap_config[WSA881X_DIGITAL_SLAVE]);
- regcache_cache_bypass(pdata->regmap[WSA881X_DIGITAL_SLAVE],
- true);
- if (IS_ERR(pdata->regmap[WSA881X_DIGITAL_SLAVE])) {
- ret = PTR_ERR(pdata->regmap[WSA881X_DIGITAL_SLAVE]);
- dev_err(&client->dev, "%s: regmap_init failed %d\n",
- __func__, ret);
- goto err;
- }
- /* bus reset sequence */
- ret = wsa881x_reset(pdata, true);
- if (ret < 0) {
- dev_err(&client->dev, "%s: WSA enable Failed %d\n",
- __func__, ret);
- goto err;
- }
- pdata->client[WSA881X_DIGITAL_SLAVE] = client;
- pdata->regmap_flag = true;
- ret = check_wsa881x_presence(client);
- if (ret < 0) {
- dev_err(&client->dev,
- "failed to ping wsa with addr:%x, ret = %d\n",
- client->addr, ret);
- wsa881x_probing_count++;
- goto err1;
- }
- pdata->version = wsa881x_i2c_read_device(pdata,
- WSA881X_CHIP_ID1);
- pr_debug("%s: wsa881x version: %d\n", __func__, pdata->version);
- if (pdata->version == WSA881X_2_0) {
- wsa881x_update_reg_defaults_2_0();
- wsa881x_update_regmap_2_0(
- pdata->regmap[WSA881X_DIGITAL_SLAVE],
- WSA881X_DIGITAL_SLAVE);
- }
- wsa881x_presence_count++;
- wsa881x_probing_count++;
- ret = snd_soc_register_codec(&client->dev,
- &soc_codec_dev_wsa881x,
- NULL, 0);
- if (ret < 0)
- goto err1;
- pdata->status = WSA881X_STATUS_I2C;
- }
-err1:
- wsa881x_reset(pdata, false);
-err:
- return 0;
-}
-
-static int wsa881x_i2c_remove(struct i2c_client *client)
-{
- struct wsa881x_pdata *wsa881x = i2c_get_clientdata(client);
-
- snd_soc_unregister_codec(&client->dev);
- i2c_set_clientdata(client, NULL);
- kfree(wsa881x);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int wsa881x_i2c_suspend(struct device *dev)
-{
- pr_debug("%s: system suspend\n", __func__);
- return 0;
-}
-
-static int wsa881x_i2c_resume(struct device *dev)
-{
- pr_debug("%s: system resume\n", __func__);
- return 0;
-}
-
-static const struct dev_pm_ops wsa881x_i2c_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(wsa881x_i2c_suspend, wsa881x_i2c_resume)
-};
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct i2c_device_id wsa881x_i2c_id[] = {
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK0_SLAVE0_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK0_SLAVE1_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK1_SLAVE0_ADDR},
- {"wsa881x-i2c-dev", WSA881X_I2C_SPK1_SLAVE1_ADDR},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, wsa881x_i2c_id);
-
-
-static const struct of_device_id msm_match_table[] = {
- {.compatible = "qcom,wsa881x-i2c-codec"},
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_match_table);
-
-static struct i2c_driver wsa881x_codec_driver = {
- .driver = {
- .name = "wsa881x-i2c-codec",
- .owner = THIS_MODULE,
-#ifdef CONFIG_PM_SLEEP
- .pm = &wsa881x_i2c_pm_ops,
-#endif
- .of_match_table = msm_match_table,
- },
- .id_table = wsa881x_i2c_id,
- .probe = wsa881x_i2c_probe,
- .remove = wsa881x_i2c_remove,
-};
-
-static int __init wsa881x_codec_init(void)
-{
- int i = 0;
-
- for (i = 0; i < MAX_WSA881X_DEVICE; i++)
- wsa_pdata[i].status = WSA881X_STATUS_PROBING;
- return i2c_add_driver(&wsa881x_codec_driver);
-}
-module_init(wsa881x_codec_init);
-
-static void __exit wsa881x_codec_exit(void)
-{
- i2c_del_driver(&wsa881x_codec_driver);
-}
-
-module_exit(wsa881x_codec_exit);
-
-MODULE_DESCRIPTION("WSA881x Codec driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wsa881x-analog.h b/sound/soc/codecs/wsa881x-analog.h
deleted file mode 100644
index a2ef2a2..0000000
--- a/sound/soc/codecs/wsa881x-analog.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _WSA881X_H
-#define _WSA881X_H
-
-#include <linux/regmap.h>
-#include "wsa881x-registers-analog.h"
-#include <sound/soc.h>
-
-#define WSA881X_I2C_SPK0_SLAVE0_ADDR 0x0E
-#define WSA881X_I2C_SPK0_SLAVE1_ADDR 0x44
-#define WSA881X_I2C_SPK1_SLAVE0_ADDR 0x0F
-#define WSA881X_I2C_SPK1_SLAVE1_ADDR 0x45
-
-#define WSA881X_I2C_SPK0_SLAVE0 0
-#define WSA881X_I2C_SPK1_SLAVE0 1
-#define MAX_WSA881X_DEVICE 2
-#define WSA881X_DIGITAL_SLAVE 0
-#define WSA881X_ANALOG_SLAVE 1
-
-enum {
- WSA881X_1_X = 0,
- WSA881X_2_0,
-};
-
-#define WSA881X_IS_2_0(ver) \
- ((ver == WSA881X_2_0) ? 1 : 0)
-
-extern const u8 wsa881x_ana_reg_readable[WSA881X_CACHE_SIZE];
-extern struct reg_default wsa881x_ana_reg_defaults[WSA881X_CACHE_SIZE];
-extern struct regmap_config wsa881x_ana_regmap_config[2];
-int wsa881x_get_client_index(void);
-int wsa881x_get_probing_count(void);
-int wsa881x_get_presence_count(void);
-int wsa881x_set_mclk_callback(
- int (*enable_mclk_callback)(struct snd_soc_card *, bool));
-void wsa881x_update_reg_defaults_2_0(void);
-void wsa881x_update_regmap_2_0(struct regmap *regmap, int flag);
-
-#endif /* _WSA881X_H */
diff --git a/sound/soc/codecs/wsa881x-irq.c b/sound/soc/codecs/wsa881x-irq.c
deleted file mode 100644
index 9afbd92..0000000
--- a/sound/soc/codecs/wsa881x-irq.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/slab.h>
-#include <linux/ratelimit.h>
-#include <linux/pm_qos.h>
-#include <soc/qcom/pm.h>
-#include "wsa881x-irq.h"
-#include "wsa881x-registers-analog.h"
-
-#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
-#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-
-
-#define WSA_MAX_NUM_IRQS 8
-
-#ifndef NO_IRQ
-#define NO_IRQ (-1)
-#endif
-
-static int virq_to_phyirq(
- struct wsa_resource *wsa_res, int virq);
-static int phyirq_to_virq(
- struct wsa_resource *wsa_res, int irq);
-static unsigned int wsa_irq_get_upstream_irq(
- struct wsa_resource *wsa_res);
-static void wsa_irq_put_upstream_irq(
- struct wsa_resource *wsa_res);
-static int wsa_map_irq(
- struct wsa_resource *wsa_res, int irq);
-
-static struct snd_soc_codec *ptr_codec;
-
-/**
- * wsa_set_codec() - to update codec pointer
- * @codec: codec pointer.
- *
- * To update the codec pointer, which is used to read/write
- * wsa register.
- *
- * Return: void.
- */
-void wsa_set_codec(struct snd_soc_codec *codec)
-{
- if (codec == NULL) {
- pr_err("%s: codec pointer is NULL\n", __func__);
- ptr_codec = NULL;
- return;
- }
- ptr_codec = codec;
- /* Initialize interrupt mask and level registers */
- snd_soc_write(codec, WSA881X_INTR_LEVEL, 0x8F);
- snd_soc_write(codec, WSA881X_INTR_MASK, 0x8F);
-}
-
-static void wsa_irq_lock(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- mutex_lock(&wsa_res->irq_lock);
-}
-
-static void wsa_irq_sync_unlock(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- if (wsa_res->codec == NULL) {
- pr_err("%s: codec pointer not registered\n", __func__);
- if (ptr_codec == NULL) {
- pr_err("%s: did not receive valid codec pointer\n",
- __func__);
- goto unlock;
- } else {
- wsa_res->codec = ptr_codec;
- }
- }
-
- /*
- * If there's been a change in the mask write it back
- * to the hardware.
- */
- if (wsa_res->irq_masks_cur !=
- wsa_res->irq_masks_cache) {
-
- wsa_res->irq_masks_cache =
- wsa_res->irq_masks_cur;
- snd_soc_write(wsa_res->codec,
- WSA881X_INTR_MASK,
- wsa_res->irq_masks_cur);
- }
-unlock:
- mutex_unlock(&wsa_res->irq_lock);
-}
-
-static void wsa_irq_enable(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
- int wsa_irq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: wsa_irq = %d\n", __func__, wsa_irq);
- wsa_res->irq_masks_cur &=
- ~(BYTE_BIT_MASK(wsa_irq));
-}
-
-static void wsa_irq_disable(struct irq_data *data)
-{
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
- int wsa_irq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res pointer is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: wsa_irq = %d\n", __func__, wsa_irq);
- wsa_res->irq_masks_cur
- |= BYTE_BIT_MASK(wsa_irq);
-}
-
-static void wsa_irq_ack(struct irq_data *data)
-{
- int wsa_irq = 0;
- struct wsa_resource *wsa_res =
- irq_data_get_irq_chip_data(data);
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- wsa_irq = virq_to_phyirq(wsa_res, data->irq);
- pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
- __func__, wsa_irq);
-}
-
-static void wsa_irq_mask(struct irq_data *d)
-{
- /* do nothing but required as linux calls irq_mask without NULL check */
-}
-
-static struct irq_chip wsa_irq_chip = {
- .name = "wsa",
- .irq_bus_lock = wsa_irq_lock,
- .irq_bus_sync_unlock = wsa_irq_sync_unlock,
- .irq_disable = wsa_irq_disable,
- .irq_enable = wsa_irq_enable,
- .irq_mask = wsa_irq_mask,
- .irq_ack = wsa_irq_ack,
-};
-
-static irqreturn_t wsa_irq_thread(int irq, void *data)
-{
- struct wsa_resource *wsa_res = data;
- int i;
- u8 status;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return IRQ_HANDLED;
- }
- if (wsa_res->codec == NULL) {
- pr_err("%s: codec pointer not registered\n", __func__);
- if (ptr_codec == NULL) {
- pr_err("%s: did not receive valid codec pointer\n",
- __func__);
- return IRQ_HANDLED;
- }
- wsa_res->codec = ptr_codec;
- }
- status = snd_soc_read(wsa_res->codec, WSA881X_INTR_STATUS);
- /* Apply masking */
- status &= ~wsa_res->irq_masks_cur;
-
- for (i = 0; i < wsa_res->num_irqs; i++) {
- if (status & BYTE_BIT_MASK(i)) {
- mutex_lock(&wsa_res->nested_irq_lock);
- handle_nested_irq(phyirq_to_virq(wsa_res, i));
- mutex_unlock(&wsa_res->nested_irq_lock);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * wsa_free_irq() - to free an interrupt
- * @irq: interrupt number.
- * @data: pointer to wsa resource.
- *
- * To free already requested interrupt.
- *
- * Return: void.
- */
-void wsa_free_irq(int irq, void *data)
-{
- struct wsa_resource *wsa_res = data;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- free_irq(phyirq_to_virq(wsa_res, irq), data);
-}
-
-/**
- * wsa_enable_irq() - to enable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * This function is to enable an interrupt.
- *
- * Return: void.
- */
-void wsa_enable_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- enable_irq(phyirq_to_virq(wsa_res, irq));
-}
-
-/**
- * wsa_disable_irq() - to disable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * To disable an interrupt without waiting for executing
- * handler to complete.
- *
- * Return: void.
- */
-void wsa_disable_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- disable_irq_nosync(phyirq_to_virq(wsa_res, irq));
-}
-
-/**
- * wsa_disable_irq_sync() - to disable an interrupt
- * @wsa_res: pointer to wsa resource.
- * @irq: interrupt number.
- *
- * To disable an interrupt, wait for executing IRQ
- * handler to complete.
- *
- * Return: void.
- */
-void wsa_disable_irq_sync(
- struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- disable_irq(phyirq_to_virq(wsa_res, irq));
-}
-
-static int wsa_irq_setup_downstream_irq(struct wsa_resource *wsa_res)
-{
- int irq, virq, ret;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- pr_debug("%s: enter\n", __func__);
-
- for (irq = 0; irq < wsa_res->num_irqs; irq++) {
- /* Map OF irq */
- virq = wsa_map_irq(wsa_res, irq);
- pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
- if (virq == NO_IRQ) {
- pr_err("%s, No interrupt specifier for irq %d\n",
- __func__, irq);
- return NO_IRQ;
- }
-
- ret = irq_set_chip_data(virq, wsa_res);
- if (ret) {
- pr_err("%s: Failed to configure irq %d (%d)\n",
- __func__, irq, ret);
- return ret;
- }
-
- if (wsa_res->irq_level_high[irq])
- irq_set_chip_and_handler(virq, &wsa_irq_chip,
- handle_level_irq);
- else
- irq_set_chip_and_handler(virq, &wsa_irq_chip,
- handle_edge_irq);
-
- irq_set_nested_thread(virq, 1);
- }
-
- pr_debug("%s: leave\n", __func__);
-
- return 0;
-}
-
-static int wsa_irq_init(struct wsa_resource *wsa_res)
-{
- int i, ret;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- mutex_init(&wsa_res->irq_lock);
- mutex_init(&wsa_res->nested_irq_lock);
-
- wsa_res->irq = wsa_irq_get_upstream_irq(wsa_res);
- if (!wsa_res->irq) {
- pr_warn("%s: irq driver is not yet initialized\n", __func__);
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
- return -EPROBE_DEFER;
- }
- pr_debug("%s: probed irq %d\n", __func__, wsa_res->irq);
-
- /* Setup downstream IRQs */
- ret = wsa_irq_setup_downstream_irq(wsa_res);
- if (ret) {
- pr_err("%s: Failed to setup downstream IRQ\n", __func__);
- goto fail_irq_init;
- }
-
- /* mask all the interrupts */
- for (i = 0; i < wsa_res->num_irqs; i++) {
- wsa_res->irq_masks_cur |= BYTE_BIT_MASK(i);
- wsa_res->irq_masks_cache |= BYTE_BIT_MASK(i);
- }
-
- ret = request_threaded_irq(wsa_res->irq, NULL, wsa_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "wsa", wsa_res);
- if (ret != 0) {
- dev_err(wsa_res->dev, "Failed to request IRQ %d: %d\n",
- wsa_res->irq, ret);
- } else {
- ret = enable_irq_wake(wsa_res->irq);
- if (ret) {
- dev_err(wsa_res->dev,
- "Failed to set wake interrupt on IRQ %d: %d\n",
- wsa_res->irq, ret);
- free_irq(wsa_res->irq, wsa_res);
- }
- }
-
- if (ret)
- goto fail_irq_init;
-
- return ret;
-
-fail_irq_init:
- dev_err(wsa_res->dev,
- "%s: Failed to init wsa irq\n", __func__);
- wsa_irq_put_upstream_irq(wsa_res);
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
- return ret;
-}
-
-/**
- * wsa_request_irq() - to request/register an interrupt
- * @wsa_res: pointer to wsa_resource.
- * @irq: interrupt number.
- * @handler: interrupt handler function pointer.
- * @name: interrupt name.
- * @data: device info.
- *
- * Convert physical irq to virtual irq and then
- * reguest for threaded handler.
- *
- * Return: Retuns success/failure.
- */
-int wsa_request_irq(struct wsa_resource *wsa_res,
- int irq, irq_handler_t handler,
- const char *name, void *data)
-{
- int virq;
-
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- virq = phyirq_to_virq(wsa_res, irq);
-
- /*
- * ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so.
- */
-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- set_irq_flags(virq, IRQF_VALID);
-#else
- set_irq_noprobe(virq);
-#endif
-
- return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
- name, data);
-}
-
-/**
- * wsa_irq_exit() - to disable/clear interrupt/resources
- * @wsa_res: pointer to wsa_resource
- *
- * Disable and free the interrupts and then release resources.
- *
- * Return: void.
- */
-void wsa_irq_exit(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- dev_dbg(wsa_res->dev, "%s: Cleaning up irq %d\n", __func__,
- wsa_res->irq);
-
- if (wsa_res->irq) {
- disable_irq_wake(wsa_res->irq);
- free_irq(wsa_res->irq, wsa_res);
- /* Release parent's of node */
- wsa_irq_put_upstream_irq(wsa_res);
- }
- mutex_destroy(&wsa_res->irq_lock);
- mutex_destroy(&wsa_res->nested_irq_lock);
-}
-
-static int phyirq_to_virq(struct wsa_resource *wsa_res, int offset)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return irq_linear_revmap(wsa_res->domain, offset);
-}
-
-static int virq_to_phyirq(struct wsa_resource *wsa_res, int virq)
-{
- struct irq_data *irq_data = irq_get_irq_data(virq);
-
- if (unlikely(!irq_data)) {
- pr_err("%s: irq_data is NULL\n", __func__);
- return -EINVAL;
- }
- return irq_data->hwirq;
-}
-
-static unsigned int wsa_irq_get_upstream_irq(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return wsa_res->irq;
-}
-
-static void wsa_irq_put_upstream_irq(struct wsa_resource *wsa_res)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return;
- }
- /* Hold parent's of node */
- of_node_put(wsa_res->dev->of_node);
-}
-
-static int wsa_map_irq(struct wsa_resource *wsa_res, int irq)
-{
- if (wsa_res == NULL) {
- pr_err("%s: wsa_res is NULL\n", __func__);
- return -EINVAL;
- }
- return of_irq_to_resource(wsa_res->dev->of_node, irq, NULL);
-}
-
-static int wsa_irq_probe(struct platform_device *pdev)
-{
- int irq;
- struct wsa_resource *wsa_res = NULL;
- int ret = -EINVAL;
-
- irq = platform_get_irq_byname(pdev, "wsa-int");
- if (irq < 0) {
- dev_err(&pdev->dev, "%s: Couldn't find wsa-int node(%d)\n",
- __func__, irq);
- return -EINVAL;
- }
- pr_debug("%s: node %s\n", __func__, pdev->name);
- wsa_res = kzalloc(sizeof(*wsa_res), GFP_KERNEL);
- if (!wsa_res) {
- pr_err("%s: could not allocate memory\n", __func__);
- return -ENOMEM;
- }
- /*
- * wsa interrupt controller supports N to N irq mapping with
- * single cell binding with irq numbers(offsets) only.
- * Use irq_domain_simple_ops that has irq_domain_simple_map and
- * irq_domain_xlate_onetwocell.
- */
- wsa_res->dev = &pdev->dev;
- wsa_res->domain = irq_domain_add_linear(wsa_res->dev->of_node,
- WSA_MAX_NUM_IRQS, &irq_domain_simple_ops,
- wsa_res);
- if (!wsa_res->domain) {
- dev_err(&pdev->dev, "%s: domain is NULL\n", __func__);
- ret = -ENOMEM;
- goto err;
- }
- wsa_res->dev = &pdev->dev;
-
- dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
- wsa_res->irq = irq;
- wsa_res->num_irq_regs = 1;
- wsa_res->num_irqs = WSA_NUM_IRQS;
- ret = wsa_irq_init(wsa_res);
- if (ret < 0) {
- dev_err(&pdev->dev, "%s: failed to do irq init %d\n",
- __func__, ret);
- goto err;
- }
-
- return ret;
-err:
- kfree(wsa_res);
- return ret;
-}
-
-static int wsa_irq_remove(struct platform_device *pdev)
-{
- struct irq_domain *domain;
- struct wsa_resource *data;
-
- domain = irq_find_host(pdev->dev.of_node);
- if (unlikely(!domain)) {
- pr_err("%s: domain is NULL\n", __func__);
- return -EINVAL;
- }
- data = (struct wsa_resource *)domain->host_data;
- data->irq = 0;
-
- return 0;
-}
-
-static const struct of_device_id of_match[] = {
- { .compatible = "qcom,wsa-irq" },
- { }
-};
-
-static struct platform_driver wsa_irq_driver = {
- .probe = wsa_irq_probe,
- .remove = wsa_irq_remove,
- .driver = {
- .name = "wsa_intc",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_match),
- },
-};
-
-static int wsa_irq_drv_init(void)
-{
- return platform_driver_register(&wsa_irq_driver);
-}
-subsys_initcall(wsa_irq_drv_init);
-
-static void wsa_irq_drv_exit(void)
-{
- platform_driver_unregister(&wsa_irq_driver);
-}
-module_exit(wsa_irq_drv_exit);
-
-MODULE_DESCRIPTION("WSA881x IRQ driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/wsa881x-irq.h b/sound/soc/codecs/wsa881x-irq.h
deleted file mode 100644
index 270eb91..0000000
--- a/sound/soc/codecs/wsa881x-irq.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __WSA881X_IRQ_H__
-#define __WSA881X_IRQ_H__
-
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <sound/soc.h>
-
-/**
- * enum wsa_interrupts - wsa interrupt number
- * @WSA_INT_SAF2WAR: Temp irq interrupt, from safe state to warning state.
- * @WSA_INT_WAR2SAF: Temp irq interrupt, from warning state to safe state.
- * @WSA_INT_DISABLE: Disable Temp sensor interrupts.
- * @WSA_INT_OCP: OCP interrupt.
- * @WSA_INT_CLIP: CLIP detect interrupt.
- * @WSA_NUM_IRQS: MAX Interrupt number.
- *
- * WSA IRQ Interrupt numbers.
- */
-enum wsa_interrupts {
- WSA_INT_SAF2WAR = 0,
- WSA_INT_WAR2SAF,
- WSA_INT_DISABLE,
- WSA_INT_OCP,
- WSA_INT_CLIP,
- WSA_NUM_IRQS,
-};
-
-/**
- * struct wsa_resource - the basic wsa_resource structure
- * @irq_lock: lock used by irq_chip functions.
- * @nested_irq_lock: lock used while handling nested interrupts.
- * @irq: interrupt number.
- * @irq_masks_cur: current mask value to be written to mask registers.
- * @irq_masks_cache: cached mask value.
- * @num_irqs: number of supported interrupts.
- * @num_irq_regs: number of irq registers.
- * @parent: parent pointer.
- * @dev: device pointer.
- * @domain: irq domain pointer.
- * codec: codec pointer.
- *
- * Contains required members used in wsa irq driver.
- */
-
-struct wsa_resource {
- struct mutex irq_lock;
- struct mutex nested_irq_lock;
- unsigned int irq;
- u8 irq_masks_cur;
- u8 irq_masks_cache;
- bool irq_level_high[8];
- int num_irqs;
- int num_irq_regs;
- void *parent;
- struct device *dev;
- struct irq_domain *domain;
- struct snd_soc_codec *codec;
-};
-
-void wsa_set_codec(struct snd_soc_codec *codec);
-void wsa_free_irq(int irq, void *data);
-void wsa_enable_irq(struct wsa_resource *wsa_res, int irq);
-void wsa_disable_irq(struct wsa_resource *wsa_res, int irq);
-void wsa_disable_irq_sync(struct wsa_resource *wsa_res, int irq);
-int wsa_request_irq(struct wsa_resource *wsa_res,
- int irq, irq_handler_t handler,
- const char *name, void *data);
-
-void wsa_irq_exit(struct wsa_resource *wsa_res);
-
-#endif /* __WSA881X_IRQ_H__ */
diff --git a/sound/soc/codecs/wsa881x-registers-analog.h b/sound/soc/codecs/wsa881x-registers-analog.h
deleted file mode 100644
index a5ebf8e1..0000000
--- a/sound/soc/codecs/wsa881x-registers-analog.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef WSA881X_REGISTERS_H
-#define WSA881X_REGISTERS_H
-
-#define WSA881X_DIGITAL_BASE 0x0000
-#define WSA881X_ANALOG_BASE 0x0100
-
-#define WSA881X_CHIP_ID0 (WSA881X_DIGITAL_BASE+0x0000)
-#define WSA881X_CHIP_ID1 (WSA881X_DIGITAL_BASE+0x0001)
-#define WSA881X_CHIP_ID2 (WSA881X_DIGITAL_BASE+0x0002)
-#define WSA881X_CHIP_ID3 (WSA881X_DIGITAL_BASE+0x0003)
-#define WSA881X_BUS_ID (WSA881X_DIGITAL_BASE+0x0004)
-#define WSA881X_CDC_RST_CTL (WSA881X_DIGITAL_BASE+0x0005)
-#define WSA881X_CDC_TOP_CLK_CTL (WSA881X_DIGITAL_BASE+0x0006)
-#define WSA881X_CDC_ANA_CLK_CTL (WSA881X_DIGITAL_BASE+0x0007)
-#define WSA881X_CDC_DIG_CLK_CTL (WSA881X_DIGITAL_BASE+0x0008)
-#define WSA881X_CLOCK_CONFIG (WSA881X_DIGITAL_BASE+0x0009)
-#define WSA881X_ANA_CTL (WSA881X_DIGITAL_BASE+0x000A)
-#define WSA881X_SWR_RESET_EN (WSA881X_DIGITAL_BASE+0x000B)
-#define WSA881X_RESET_CTL (WSA881X_DIGITAL_BASE+0x000C)
-#define WSA881X_TADC_VALUE_CTL (WSA881X_DIGITAL_BASE+0x000F)
-#define WSA881X_TEMP_DETECT_CTL (WSA881X_DIGITAL_BASE+0x0010)
-#define WSA881X_TEMP_MSB (WSA881X_DIGITAL_BASE+0x0011)
-#define WSA881X_TEMP_LSB (WSA881X_DIGITAL_BASE+0x0012)
-#define WSA881X_TEMP_CONFIG0 (WSA881X_DIGITAL_BASE+0x0013)
-#define WSA881X_TEMP_CONFIG1 (WSA881X_DIGITAL_BASE+0x0014)
-#define WSA881X_CDC_CLIP_CTL (WSA881X_DIGITAL_BASE+0x0015)
-#define WSA881X_SDM_PDM9_LSB (WSA881X_DIGITAL_BASE+0x0016)
-#define WSA881X_SDM_PDM9_MSB (WSA881X_DIGITAL_BASE+0x0017)
-#define WSA881X_CDC_RX_CTL (WSA881X_DIGITAL_BASE+0x0018)
-#define WSA881X_DEM_BYPASS_DATA0 (WSA881X_DIGITAL_BASE+0x0019)
-#define WSA881X_DEM_BYPASS_DATA1 (WSA881X_DIGITAL_BASE+0x001A)
-#define WSA881X_DEM_BYPASS_DATA2 (WSA881X_DIGITAL_BASE+0x001B)
-#define WSA881X_DEM_BYPASS_DATA3 (WSA881X_DIGITAL_BASE+0x001C)
-#define WSA881X_OTP_CTRL0 (WSA881X_DIGITAL_BASE+0x001D)
-#define WSA881X_OTP_CTRL1 (WSA881X_DIGITAL_BASE+0x001E)
-#define WSA881X_HDRIVE_CTL_GROUP1 (WSA881X_DIGITAL_BASE+0x001F)
-#define WSA881X_INTR_MODE (WSA881X_DIGITAL_BASE+0x0020)
-#define WSA881X_INTR_MASK (WSA881X_DIGITAL_BASE+0x0021)
-#define WSA881X_INTR_STATUS (WSA881X_DIGITAL_BASE+0x0022)
-#define WSA881X_INTR_CLEAR (WSA881X_DIGITAL_BASE+0x0023)
-#define WSA881X_INTR_LEVEL (WSA881X_DIGITAL_BASE+0x0024)
-#define WSA881X_INTR_SET (WSA881X_DIGITAL_BASE+0x0025)
-#define WSA881X_INTR_TEST (WSA881X_DIGITAL_BASE+0x0026)
-#define WSA881X_PDM_TEST_MODE (WSA881X_DIGITAL_BASE+0x0030)
-#define WSA881X_ATE_TEST_MODE (WSA881X_DIGITAL_BASE+0x0031)
-#define WSA881X_PIN_CTL_MODE (WSA881X_DIGITAL_BASE+0x0032)
-#define WSA881X_PIN_CTL_OE (WSA881X_DIGITAL_BASE+0x0033)
-#define WSA881X_PIN_WDATA_IOPAD (WSA881X_DIGITAL_BASE+0x0034)
-#define WSA881X_PIN_STATUS (WSA881X_DIGITAL_BASE+0x0035)
-#define WSA881X_DIG_DEBUG_MODE (WSA881X_DIGITAL_BASE+0x0037)
-#define WSA881X_DIG_DEBUG_SEL (WSA881X_DIGITAL_BASE+0x0038)
-#define WSA881X_DIG_DEBUG_EN (WSA881X_DIGITAL_BASE+0x0039)
-#define WSA881X_SWR_HM_TEST1 (WSA881X_DIGITAL_BASE+0x003B)
-#define WSA881X_SWR_HM_TEST2 (WSA881X_DIGITAL_BASE+0x003C)
-#define WSA881X_TEMP_DETECT_DBG_CTL (WSA881X_DIGITAL_BASE+0x003D)
-#define WSA881X_TEMP_DEBUG_MSB (WSA881X_DIGITAL_BASE+0x003E)
-#define WSA881X_TEMP_DEBUG_LSB (WSA881X_DIGITAL_BASE+0x003F)
-#define WSA881X_SAMPLE_EDGE_SEL (WSA881X_DIGITAL_BASE+0x0044)
-#define WSA881X_IOPAD_CTL (WSA881X_DIGITAL_BASE+0x0045)
-#define WSA881X_SPARE_0 (WSA881X_DIGITAL_BASE+0x0050)
-#define WSA881X_SPARE_1 (WSA881X_DIGITAL_BASE+0x0051)
-#define WSA881X_SPARE_2 (WSA881X_DIGITAL_BASE+0x0052)
-#define WSA881X_OTP_REG_0 (WSA881X_DIGITAL_BASE+0x0080)
-#define WSA881X_OTP_REG_1 (WSA881X_DIGITAL_BASE+0x0081)
-#define WSA881X_OTP_REG_2 (WSA881X_DIGITAL_BASE+0x0082)
-#define WSA881X_OTP_REG_3 (WSA881X_DIGITAL_BASE+0x0083)
-#define WSA881X_OTP_REG_4 (WSA881X_DIGITAL_BASE+0x0084)
-#define WSA881X_OTP_REG_5 (WSA881X_DIGITAL_BASE+0x0085)
-#define WSA881X_OTP_REG_6 (WSA881X_DIGITAL_BASE+0x0086)
-#define WSA881X_OTP_REG_7 (WSA881X_DIGITAL_BASE+0x0087)
-#define WSA881X_OTP_REG_8 (WSA881X_DIGITAL_BASE+0x0088)
-#define WSA881X_OTP_REG_9 (WSA881X_DIGITAL_BASE+0x0089)
-#define WSA881X_OTP_REG_10 (WSA881X_DIGITAL_BASE+0x008A)
-#define WSA881X_OTP_REG_11 (WSA881X_DIGITAL_BASE+0x008B)
-#define WSA881X_OTP_REG_12 (WSA881X_DIGITAL_BASE+0x008C)
-#define WSA881X_OTP_REG_13 (WSA881X_DIGITAL_BASE+0x008D)
-#define WSA881X_OTP_REG_14 (WSA881X_DIGITAL_BASE+0x008E)
-#define WSA881X_OTP_REG_15 (WSA881X_DIGITAL_BASE+0x008F)
-#define WSA881X_OTP_REG_16 (WSA881X_DIGITAL_BASE+0x0090)
-#define WSA881X_OTP_REG_17 (WSA881X_DIGITAL_BASE+0x0091)
-#define WSA881X_OTP_REG_18 (WSA881X_DIGITAL_BASE+0x0092)
-#define WSA881X_OTP_REG_19 (WSA881X_DIGITAL_BASE+0x0093)
-#define WSA881X_OTP_REG_20 (WSA881X_DIGITAL_BASE+0x0094)
-#define WSA881X_OTP_REG_21 (WSA881X_DIGITAL_BASE+0x0095)
-#define WSA881X_OTP_REG_22 (WSA881X_DIGITAL_BASE+0x0096)
-#define WSA881X_OTP_REG_23 (WSA881X_DIGITAL_BASE+0x0097)
-#define WSA881X_OTP_REG_24 (WSA881X_DIGITAL_BASE+0x0098)
-#define WSA881X_OTP_REG_25 (WSA881X_DIGITAL_BASE+0x0099)
-#define WSA881X_OTP_REG_26 (WSA881X_DIGITAL_BASE+0x009A)
-#define WSA881X_OTP_REG_27 (WSA881X_DIGITAL_BASE+0x009B)
-#define WSA881X_OTP_REG_28 (WSA881X_DIGITAL_BASE+0x009C)
-#define WSA881X_OTP_REG_29 (WSA881X_DIGITAL_BASE+0x009D)
-#define WSA881X_OTP_REG_30 (WSA881X_DIGITAL_BASE+0x009E)
-#define WSA881X_OTP_REG_31 (WSA881X_DIGITAL_BASE+0x009F)
-#define WSA881X_OTP_REG_32 (WSA881X_DIGITAL_BASE+0x00A0)
-#define WSA881X_OTP_REG_33 (WSA881X_DIGITAL_BASE+0x00A1)
-#define WSA881X_OTP_REG_34 (WSA881X_DIGITAL_BASE+0x00A2)
-#define WSA881X_OTP_REG_35 (WSA881X_DIGITAL_BASE+0x00A3)
-#define WSA881X_OTP_REG_36 (WSA881X_DIGITAL_BASE+0x00A4)
-#define WSA881X_OTP_REG_37 (WSA881X_DIGITAL_BASE+0x00A5)
-#define WSA881X_OTP_REG_38 (WSA881X_DIGITAL_BASE+0x00A6)
-#define WSA881X_OTP_REG_39 (WSA881X_DIGITAL_BASE+0x00A7)
-#define WSA881X_OTP_REG_40 (WSA881X_DIGITAL_BASE+0x00A8)
-#define WSA881X_OTP_REG_41 (WSA881X_DIGITAL_BASE+0x00A9)
-#define WSA881X_OTP_REG_42 (WSA881X_DIGITAL_BASE+0x00AA)
-#define WSA881X_OTP_REG_43 (WSA881X_DIGITAL_BASE+0x00AB)
-#define WSA881X_OTP_REG_44 (WSA881X_DIGITAL_BASE+0x00AC)
-#define WSA881X_OTP_REG_45 (WSA881X_DIGITAL_BASE+0x00AD)
-#define WSA881X_OTP_REG_46 (WSA881X_DIGITAL_BASE+0x00AE)
-#define WSA881X_OTP_REG_47 (WSA881X_DIGITAL_BASE+0x00AF)
-#define WSA881X_OTP_REG_48 (WSA881X_DIGITAL_BASE+0x00B0)
-#define WSA881X_OTP_REG_49 (WSA881X_DIGITAL_BASE+0x00B1)
-#define WSA881X_OTP_REG_50 (WSA881X_DIGITAL_BASE+0x00B2)
-#define WSA881X_OTP_REG_51 (WSA881X_DIGITAL_BASE+0x00B3)
-#define WSA881X_OTP_REG_52 (WSA881X_DIGITAL_BASE+0x00B4)
-#define WSA881X_OTP_REG_53 (WSA881X_DIGITAL_BASE+0x00B5)
-#define WSA881X_OTP_REG_54 (WSA881X_DIGITAL_BASE+0x00B6)
-#define WSA881X_OTP_REG_55 (WSA881X_DIGITAL_BASE+0x00B7)
-#define WSA881X_OTP_REG_56 (WSA881X_DIGITAL_BASE+0x00B8)
-#define WSA881X_OTP_REG_57 (WSA881X_DIGITAL_BASE+0x00B9)
-#define WSA881X_OTP_REG_58 (WSA881X_DIGITAL_BASE+0x00BA)
-#define WSA881X_OTP_REG_59 (WSA881X_DIGITAL_BASE+0x00BB)
-#define WSA881X_OTP_REG_60 (WSA881X_DIGITAL_BASE+0x00BC)
-#define WSA881X_OTP_REG_61 (WSA881X_DIGITAL_BASE+0x00BD)
-#define WSA881X_OTP_REG_62 (WSA881X_DIGITAL_BASE+0x00BE)
-#define WSA881X_OTP_REG_63 (WSA881X_DIGITAL_BASE+0x00BF)
-/* Analog Register address space */
-#define WSA881X_BIAS_REF_CTRL (WSA881X_ANALOG_BASE+0x0000)
-#define WSA881X_BIAS_TEST (WSA881X_ANALOG_BASE+0x0001)
-#define WSA881X_BIAS_BIAS (WSA881X_ANALOG_BASE+0x0002)
-#define WSA881X_TEMP_OP (WSA881X_ANALOG_BASE+0x0003)
-#define WSA881X_TEMP_IREF_CTRL (WSA881X_ANALOG_BASE+0x0004)
-#define WSA881X_TEMP_ISENS_CTRL (WSA881X_ANALOG_BASE+0x0005)
-#define WSA881X_TEMP_CLK_CTRL (WSA881X_ANALOG_BASE+0x0006)
-#define WSA881X_TEMP_TEST (WSA881X_ANALOG_BASE+0x0007)
-#define WSA881X_TEMP_BIAS (WSA881X_ANALOG_BASE+0x0008)
-#define WSA881X_TEMP_ADC_CTRL (WSA881X_ANALOG_BASE+0x0009)
-#define WSA881X_TEMP_DOUT_MSB (WSA881X_ANALOG_BASE+0x000A)
-#define WSA881X_TEMP_DOUT_LSB (WSA881X_ANALOG_BASE+0x000B)
-#define WSA881X_ADC_EN_MODU_V (WSA881X_ANALOG_BASE+0x0010)
-#define WSA881X_ADC_EN_MODU_I (WSA881X_ANALOG_BASE+0x0011)
-#define WSA881X_ADC_EN_DET_TEST_V (WSA881X_ANALOG_BASE+0x0012)
-#define WSA881X_ADC_EN_DET_TEST_I (WSA881X_ANALOG_BASE+0x0013)
-#define WSA881X_ADC_SEL_IBIAS (WSA881X_ANALOG_BASE+0x0014)
-#define WSA881X_ADC_EN_SEL_IBIAS (WSA881X_ANALOG_BASE+0x0015)
-#define WSA881X_SPKR_DRV_EN (WSA881X_ANALOG_BASE+0x001A)
-#define WSA881X_SPKR_DRV_GAIN (WSA881X_ANALOG_BASE+0x001B)
-#define WSA881X_SPKR_DAC_CTL (WSA881X_ANALOG_BASE+0x001C)
-#define WSA881X_SPKR_DRV_DBG (WSA881X_ANALOG_BASE+0x001D)
-#define WSA881X_SPKR_PWRSTG_DBG (WSA881X_ANALOG_BASE+0x001E)
-#define WSA881X_SPKR_OCP_CTL (WSA881X_ANALOG_BASE+0x001F)
-#define WSA881X_SPKR_CLIP_CTL (WSA881X_ANALOG_BASE+0x0020)
-#define WSA881X_SPKR_BBM_CTL (WSA881X_ANALOG_BASE+0x0021)
-#define WSA881X_SPKR_MISC_CTL1 (WSA881X_ANALOG_BASE+0x0022)
-#define WSA881X_SPKR_MISC_CTL2 (WSA881X_ANALOG_BASE+0x0023)
-#define WSA881X_SPKR_BIAS_INT (WSA881X_ANALOG_BASE+0x0024)
-#define WSA881X_SPKR_PA_INT (WSA881X_ANALOG_BASE+0x0025)
-#define WSA881X_SPKR_BIAS_CAL (WSA881X_ANALOG_BASE+0x0026)
-#define WSA881X_SPKR_BIAS_PSRR (WSA881X_ANALOG_BASE+0x0027)
-#define WSA881X_SPKR_STATUS1 (WSA881X_ANALOG_BASE+0x0028)
-#define WSA881X_SPKR_STATUS2 (WSA881X_ANALOG_BASE+0x0029)
-#define WSA881X_BOOST_EN_CTL (WSA881X_ANALOG_BASE+0x002A)
-#define WSA881X_BOOST_CURRENT_LIMIT (WSA881X_ANALOG_BASE+0x002B)
-#define WSA881X_BOOST_PS_CTL (WSA881X_ANALOG_BASE+0x002C)
-#define WSA881X_BOOST_PRESET_OUT1 (WSA881X_ANALOG_BASE+0x002D)
-#define WSA881X_BOOST_PRESET_OUT2 (WSA881X_ANALOG_BASE+0x002E)
-#define WSA881X_BOOST_FORCE_OUT (WSA881X_ANALOG_BASE+0x002F)
-#define WSA881X_BOOST_LDO_PROG (WSA881X_ANALOG_BASE+0x0030)
-#define WSA881X_BOOST_SLOPE_COMP_ISENSE_FB (WSA881X_ANALOG_BASE+0x0031)
-#define WSA881X_BOOST_RON_CTL (WSA881X_ANALOG_BASE+0x0032)
-#define WSA881X_BOOST_LOOP_STABILITY (WSA881X_ANALOG_BASE+0x0033)
-#define WSA881X_BOOST_ZX_CTL (WSA881X_ANALOG_BASE+0x0034)
-#define WSA881X_BOOST_START_CTL (WSA881X_ANALOG_BASE+0x0035)
-#define WSA881X_BOOST_MISC1_CTL (WSA881X_ANALOG_BASE+0x0036)
-#define WSA881X_BOOST_MISC2_CTL (WSA881X_ANALOG_BASE+0x0037)
-#define WSA881X_BOOST_MISC3_CTL (WSA881X_ANALOG_BASE+0x0038)
-#define WSA881X_BOOST_ATEST_CTL (WSA881X_ANALOG_BASE+0x0039)
-#define WSA881X_SPKR_PROT_FE_GAIN (WSA881X_ANALOG_BASE+0x003A)
-#define WSA881X_SPKR_PROT_FE_CM_LDO_SET (WSA881X_ANALOG_BASE+0x003B)
-#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1 (WSA881X_ANALOG_BASE+0x003C)
-#define WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2 (WSA881X_ANALOG_BASE+0x003D)
-#define WSA881X_SPKR_PROT_ATEST1 (WSA881X_ANALOG_BASE+0x003E)
-#define WSA881X_SPKR_PROT_ATEST2 (WSA881X_ANALOG_BASE+0x003F)
-#define WSA881X_SPKR_PROT_FE_VSENSE_VCM (WSA881X_ANALOG_BASE+0x0040)
-#define WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1 (WSA881X_ANALOG_BASE+0x0041)
-#define WSA881X_BONGO_RESRV_REG1 (WSA881X_ANALOG_BASE+0x0042)
-#define WSA881X_BONGO_RESRV_REG2 (WSA881X_ANALOG_BASE+0x0043)
-#define WSA881X_SPKR_PROT_SAR (WSA881X_ANALOG_BASE+0x0044)
-#define WSA881X_SPKR_STATUS3 (WSA881X_ANALOG_BASE+0x0045)
-
-#define WSA881X_NUM_REGISTERS (WSA881X_SPKR_STATUS3+1)
-#define WSA881X_MAX_REGISTER (WSA881X_NUM_REGISTERS-1)
-#define WSA881X_CACHE_SIZE WSA881X_NUM_REGISTERS
-#endif /* WSA881X_REGISTERS_H */
diff --git a/sound/soc/codecs/wsa881x-regmap-analog.c b/sound/soc/codecs/wsa881x-regmap-analog.c
deleted file mode 100644
index 2bc3c9e..0000000
--- a/sound/soc/codecs/wsa881x-regmap-analog.c
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wsa881x-registers-analog.h"
-#include "wsa881x-analog.h"
-
-struct reg_default wsa881x_ana_reg_defaults[] = {
- {WSA881X_CHIP_ID0, 0x00},
- {WSA881X_CHIP_ID1, 0x00},
- {WSA881X_CHIP_ID2, 0x00},
- {WSA881X_CHIP_ID3, 0x02},
- {WSA881X_BUS_ID, 0x00},
- {WSA881X_CDC_RST_CTL, 0x00},
- {WSA881X_CDC_TOP_CLK_CTL, 0x03},
- {WSA881X_CDC_ANA_CLK_CTL, 0x00},
- {WSA881X_CDC_DIG_CLK_CTL, 0x00},
- {WSA881X_CLOCK_CONFIG, 0x00},
- {WSA881X_ANA_CTL, 0x08},
- {WSA881X_SWR_RESET_EN, 0x00},
- {WSA881X_TEMP_DETECT_CTL, 0x01},
- {WSA881X_TEMP_MSB, 0x00},
- {WSA881X_TEMP_LSB, 0x00},
- {WSA881X_TEMP_CONFIG0, 0x00},
- {WSA881X_TEMP_CONFIG1, 0x00},
- {WSA881X_CDC_CLIP_CTL, 0x03},
- {WSA881X_SDM_PDM9_LSB, 0x00},
- {WSA881X_SDM_PDM9_MSB, 0x00},
- {WSA881X_CDC_RX_CTL, 0x7E},
- {WSA881X_DEM_BYPASS_DATA0, 0x00},
- {WSA881X_DEM_BYPASS_DATA1, 0x00},
- {WSA881X_DEM_BYPASS_DATA2, 0x00},
- {WSA881X_DEM_BYPASS_DATA3, 0x00},
- {WSA881X_OTP_CTRL0, 0x00},
- {WSA881X_OTP_CTRL1, 0x00},
- {WSA881X_HDRIVE_CTL_GROUP1, 0x00},
- {WSA881X_INTR_MODE, 0x00},
- {WSA881X_INTR_MASK, 0x1F},
- {WSA881X_INTR_STATUS, 0x00},
- {WSA881X_INTR_CLEAR, 0x00},
- {WSA881X_INTR_LEVEL, 0x00},
- {WSA881X_INTR_SET, 0x00},
- {WSA881X_INTR_TEST, 0x00},
- {WSA881X_PDM_TEST_MODE, 0x00},
- {WSA881X_ATE_TEST_MODE, 0x00},
- {WSA881X_PIN_CTL_MODE, 0x00},
- {WSA881X_PIN_CTL_OE, 0x00},
- {WSA881X_PIN_WDATA_IOPAD, 0x00},
- {WSA881X_PIN_STATUS, 0x00},
- {WSA881X_DIG_DEBUG_MODE, 0x00},
- {WSA881X_DIG_DEBUG_SEL, 0x00},
- {WSA881X_DIG_DEBUG_EN, 0x00},
- {WSA881X_SWR_HM_TEST1, 0x08},
- {WSA881X_SWR_HM_TEST2, 0x00},
- {WSA881X_TEMP_DETECT_DBG_CTL, 0x00},
- {WSA881X_TEMP_DEBUG_MSB, 0x00},
- {WSA881X_TEMP_DEBUG_LSB, 0x00},
- {WSA881X_SAMPLE_EDGE_SEL, 0x0C},
- {WSA881X_SPARE_0, 0x00},
- {WSA881X_SPARE_1, 0x00},
- {WSA881X_SPARE_2, 0x00},
- {WSA881X_OTP_REG_0, 0x01},
- {WSA881X_OTP_REG_1, 0xFF},
- {WSA881X_OTP_REG_2, 0xC0},
- {WSA881X_OTP_REG_3, 0xFF},
- {WSA881X_OTP_REG_4, 0xC0},
- {WSA881X_OTP_REG_5, 0xFF},
- {WSA881X_OTP_REG_6, 0xFF},
- {WSA881X_OTP_REG_7, 0xFF},
- {WSA881X_OTP_REG_8, 0xFF},
- {WSA881X_OTP_REG_9, 0xFF},
- {WSA881X_OTP_REG_10, 0xFF},
- {WSA881X_OTP_REG_11, 0xFF},
- {WSA881X_OTP_REG_12, 0xFF},
- {WSA881X_OTP_REG_13, 0xFF},
- {WSA881X_OTP_REG_14, 0xFF},
- {WSA881X_OTP_REG_15, 0xFF},
- {WSA881X_OTP_REG_16, 0xFF},
- {WSA881X_OTP_REG_17, 0xFF},
- {WSA881X_OTP_REG_18, 0xFF},
- {WSA881X_OTP_REG_19, 0xFF},
- {WSA881X_OTP_REG_20, 0xFF},
- {WSA881X_OTP_REG_21, 0xFF},
- {WSA881X_OTP_REG_22, 0xFF},
- {WSA881X_OTP_REG_23, 0xFF},
- {WSA881X_OTP_REG_24, 0x03},
- {WSA881X_OTP_REG_25, 0x01},
- {WSA881X_OTP_REG_26, 0x03},
- {WSA881X_OTP_REG_27, 0x11},
- {WSA881X_OTP_REG_28, 0xFF},
- {WSA881X_OTP_REG_29, 0xFF},
- {WSA881X_OTP_REG_30, 0xFF},
- {WSA881X_OTP_REG_31, 0xFF},
- {WSA881X_OTP_REG_63, 0x40},
- /* WSA881x Analog registers */
- {WSA881X_BIAS_REF_CTRL, 0x6C},
- {WSA881X_BIAS_TEST, 0x16},
- {WSA881X_BIAS_BIAS, 0xF0},
- {WSA881X_TEMP_OP, 0x00},
- {WSA881X_TEMP_IREF_CTRL, 0x56},
- {WSA881X_TEMP_ISENS_CTRL, 0x47},
- {WSA881X_TEMP_CLK_CTRL, 0x87},
- {WSA881X_TEMP_TEST, 0x00},
- {WSA881X_TEMP_BIAS, 0x51},
- {WSA881X_TEMP_ADC_CTRL, 0x00},
- {WSA881X_TEMP_DOUT_MSB, 0x00},
- {WSA881X_TEMP_DOUT_LSB, 0x00},
- {WSA881X_ADC_EN_MODU_V, 0x00},
- {WSA881X_ADC_EN_MODU_I, 0x00},
- {WSA881X_ADC_EN_DET_TEST_V, 0x00},
- {WSA881X_ADC_EN_DET_TEST_I, 0x00},
- {WSA881X_ADC_SEL_IBIAS, 0x25},
- {WSA881X_ADC_EN_SEL_IBIAS, 0x10},
- {WSA881X_SPKR_DRV_EN, 0x74},
- {WSA881X_SPKR_DRV_GAIN, 0x01},
- {WSA881X_SPKR_DAC_CTL, 0x40},
- {WSA881X_SPKR_DRV_DBG, 0x15},
- {WSA881X_SPKR_PWRSTG_DBG, 0x00},
- {WSA881X_SPKR_OCP_CTL, 0xD4},
- {WSA881X_SPKR_CLIP_CTL, 0x90},
- {WSA881X_SPKR_BBM_CTL, 0x00},
- {WSA881X_SPKR_MISC_CTL1, 0x80},
- {WSA881X_SPKR_MISC_CTL2, 0x00},
- {WSA881X_SPKR_BIAS_INT, 0x56},
- {WSA881X_SPKR_PA_INT, 0x54},
- {WSA881X_SPKR_BIAS_CAL, 0xAC},
- {WSA881X_SPKR_BIAS_PSRR, 0x54},
- {WSA881X_SPKR_STATUS1, 0x00},
- {WSA881X_SPKR_STATUS2, 0x00},
- {WSA881X_BOOST_EN_CTL, 0x18},
- {WSA881X_BOOST_CURRENT_LIMIT, 0x7A},
- {WSA881X_BOOST_PS_CTL, 0xC0},
- {WSA881X_BOOST_PRESET_OUT1, 0x77},
- {WSA881X_BOOST_PRESET_OUT2, 0x70},
- {WSA881X_BOOST_FORCE_OUT, 0x0E},
- {WSA881X_BOOST_LDO_PROG, 0x16},
- {WSA881X_BOOST_SLOPE_COMP_ISENSE_FB, 0x71},
- {WSA881X_BOOST_RON_CTL, 0x0F},
- {WSA881X_BOOST_LOOP_STABILITY, 0xAD},
- {WSA881X_BOOST_ZX_CTL, 0x34},
- {WSA881X_BOOST_START_CTL, 0x23},
- {WSA881X_BOOST_MISC1_CTL, 0x80},
- {WSA881X_BOOST_MISC2_CTL, 0x00},
- {WSA881X_BOOST_MISC3_CTL, 0x00},
- {WSA881X_BOOST_ATEST_CTL, 0x00},
- {WSA881X_SPKR_PROT_FE_GAIN, 0x46},
- {WSA881X_SPKR_PROT_FE_CM_LDO_SET, 0x3B},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1, 0x8D},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2, 0x8D},
- {WSA881X_SPKR_PROT_ATEST1, 0x01},
- {WSA881X_SPKR_PROT_ATEST2, 0x00},
- {WSA881X_SPKR_PROT_FE_VSENSE_VCM, 0x8D},
- {WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1, 0x4D},
- {WSA881X_BONGO_RESRV_REG1, 0x00},
- {WSA881X_BONGO_RESRV_REG2, 0x00},
- {WSA881X_SPKR_PROT_SAR, 0x00},
- {WSA881X_SPKR_STATUS3, 0x00},
-};
-
-struct reg_default wsa881x_ana_reg_defaults_0[] = {
- {WSA881X_CHIP_ID0, 0x00},
- {WSA881X_CHIP_ID1, 0x00},
- {WSA881X_CHIP_ID2, 0x00},
- {WSA881X_CHIP_ID3, 0x02},
- {WSA881X_BUS_ID, 0x00},
- {WSA881X_CDC_RST_CTL, 0x00},
- {WSA881X_CDC_TOP_CLK_CTL, 0x03},
- {WSA881X_CDC_ANA_CLK_CTL, 0x00},
- {WSA881X_CDC_DIG_CLK_CTL, 0x00},
- {WSA881X_CLOCK_CONFIG, 0x00},
- {WSA881X_ANA_CTL, 0x08},
- {WSA881X_SWR_RESET_EN, 0x00},
- {WSA881X_TEMP_DETECT_CTL, 0x01},
- {WSA881X_TEMP_MSB, 0x00},
- {WSA881X_TEMP_LSB, 0x00},
- {WSA881X_TEMP_CONFIG0, 0x00},
- {WSA881X_TEMP_CONFIG1, 0x00},
- {WSA881X_CDC_CLIP_CTL, 0x03},
- {WSA881X_SDM_PDM9_LSB, 0x00},
- {WSA881X_SDM_PDM9_MSB, 0x00},
- {WSA881X_CDC_RX_CTL, 0x7E},
- {WSA881X_DEM_BYPASS_DATA0, 0x00},
- {WSA881X_DEM_BYPASS_DATA1, 0x00},
- {WSA881X_DEM_BYPASS_DATA2, 0x00},
- {WSA881X_DEM_BYPASS_DATA3, 0x00},
- {WSA881X_OTP_CTRL0, 0x00},
- {WSA881X_OTP_CTRL1, 0x00},
- {WSA881X_HDRIVE_CTL_GROUP1, 0x00},
- {WSA881X_INTR_MODE, 0x00},
- {WSA881X_INTR_MASK, 0x1F},
- {WSA881X_INTR_STATUS, 0x00},
- {WSA881X_INTR_CLEAR, 0x00},
- {WSA881X_INTR_LEVEL, 0x00},
- {WSA881X_INTR_SET, 0x00},
- {WSA881X_INTR_TEST, 0x00},
- {WSA881X_PDM_TEST_MODE, 0x00},
- {WSA881X_ATE_TEST_MODE, 0x00},
- {WSA881X_PIN_CTL_MODE, 0x00},
- {WSA881X_PIN_CTL_OE, 0x00},
- {WSA881X_PIN_WDATA_IOPAD, 0x00},
- {WSA881X_PIN_STATUS, 0x00},
- {WSA881X_DIG_DEBUG_MODE, 0x00},
- {WSA881X_DIG_DEBUG_SEL, 0x00},
- {WSA881X_DIG_DEBUG_EN, 0x00},
- {WSA881X_SWR_HM_TEST1, 0x08},
- {WSA881X_SWR_HM_TEST2, 0x00},
- {WSA881X_TEMP_DETECT_DBG_CTL, 0x00},
- {WSA881X_TEMP_DEBUG_MSB, 0x00},
- {WSA881X_TEMP_DEBUG_LSB, 0x00},
- {WSA881X_SAMPLE_EDGE_SEL, 0x0C},
- {WSA881X_SPARE_0, 0x00},
- {WSA881X_SPARE_1, 0x00},
- {WSA881X_SPARE_2, 0x00},
- {WSA881X_OTP_REG_0, 0x01},
- {WSA881X_OTP_REG_1, 0xFF},
- {WSA881X_OTP_REG_2, 0xC0},
- {WSA881X_OTP_REG_3, 0xFF},
- {WSA881X_OTP_REG_4, 0xC0},
- {WSA881X_OTP_REG_5, 0xFF},
- {WSA881X_OTP_REG_6, 0xFF},
- {WSA881X_OTP_REG_7, 0xFF},
- {WSA881X_OTP_REG_8, 0xFF},
- {WSA881X_OTP_REG_9, 0xFF},
- {WSA881X_OTP_REG_10, 0xFF},
- {WSA881X_OTP_REG_11, 0xFF},
- {WSA881X_OTP_REG_12, 0xFF},
- {WSA881X_OTP_REG_13, 0xFF},
- {WSA881X_OTP_REG_14, 0xFF},
- {WSA881X_OTP_REG_15, 0xFF},
- {WSA881X_OTP_REG_16, 0xFF},
- {WSA881X_OTP_REG_17, 0xFF},
- {WSA881X_OTP_REG_18, 0xFF},
- {WSA881X_OTP_REG_19, 0xFF},
- {WSA881X_OTP_REG_20, 0xFF},
- {WSA881X_OTP_REG_21, 0xFF},
- {WSA881X_OTP_REG_22, 0xFF},
- {WSA881X_OTP_REG_23, 0xFF},
- {WSA881X_OTP_REG_24, 0x03},
- {WSA881X_OTP_REG_25, 0x01},
- {WSA881X_OTP_REG_26, 0x03},
- {WSA881X_OTP_REG_27, 0x11},
- {WSA881X_OTP_REG_28, 0xFF},
- {WSA881X_OTP_REG_29, 0xFF},
- {WSA881X_OTP_REG_30, 0xFF},
- {WSA881X_OTP_REG_31, 0xFF},
- {WSA881X_OTP_REG_63, 0x40},
-};
-
-struct reg_default wsa881x_ana_reg_defaults_1[] = {
- {WSA881X_BIAS_REF_CTRL - WSA881X_ANALOG_BASE, 0x6C},
- {WSA881X_BIAS_TEST - WSA881X_ANALOG_BASE, 0x16},
- {WSA881X_BIAS_BIAS - WSA881X_ANALOG_BASE, 0xF0},
- {WSA881X_TEMP_OP - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_IREF_CTRL - WSA881X_ANALOG_BASE, 0x56},
- {WSA881X_TEMP_ISENS_CTRL - WSA881X_ANALOG_BASE, 0x47},
- {WSA881X_TEMP_CLK_CTRL - WSA881X_ANALOG_BASE, 0x87},
- {WSA881X_TEMP_TEST - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_BIAS - WSA881X_ANALOG_BASE, 0x51},
- {WSA881X_TEMP_ADC_CTRL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_DOUT_MSB - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_TEMP_DOUT_LSB - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_MODU_V - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_MODU_I - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_DET_TEST_V - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_EN_DET_TEST_I - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_ADC_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x25},
- {WSA881X_ADC_EN_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x10},
- {WSA881X_SPKR_DRV_EN - WSA881X_ANALOG_BASE, 0x74},
- {WSA881X_SPKR_DRV_GAIN - WSA881X_ANALOG_BASE, 0x01},
- {WSA881X_SPKR_DAC_CTL - WSA881X_ANALOG_BASE, 0x40},
- {WSA881X_SPKR_DRV_DBG - WSA881X_ANALOG_BASE, 0x15},
- {WSA881X_SPKR_PWRSTG_DBG - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_OCP_CTL - WSA881X_ANALOG_BASE, 0xD4},
- {WSA881X_SPKR_CLIP_CTL - WSA881X_ANALOG_BASE, 0x90},
- {WSA881X_SPKR_BBM_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_MISC_CTL1 - WSA881X_ANALOG_BASE, 0x80},
- {WSA881X_SPKR_MISC_CTL2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_BIAS_INT - WSA881X_ANALOG_BASE, 0x56},
- {WSA881X_SPKR_PA_INT - WSA881X_ANALOG_BASE, 0x54},
- {WSA881X_SPKR_BIAS_CAL - WSA881X_ANALOG_BASE, 0xAC},
- {WSA881X_SPKR_BIAS_PSRR - WSA881X_ANALOG_BASE, 0x54},
- {WSA881X_SPKR_STATUS1 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_STATUS2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_EN_CTL - WSA881X_ANALOG_BASE, 0x18},
- {WSA881X_BOOST_CURRENT_LIMIT - WSA881X_ANALOG_BASE, 0x7A},
- {WSA881X_BOOST_PS_CTL - WSA881X_ANALOG_BASE, 0xC0},
- {WSA881X_BOOST_PRESET_OUT1 - WSA881X_ANALOG_BASE, 0x77},
- {WSA881X_BOOST_PRESET_OUT2 - WSA881X_ANALOG_BASE, 0x70},
- {WSA881X_BOOST_FORCE_OUT - WSA881X_ANALOG_BASE, 0x0E},
- {WSA881X_BOOST_LDO_PROG - WSA881X_ANALOG_BASE, 0x16},
- {WSA881X_BOOST_SLOPE_COMP_ISENSE_FB - WSA881X_ANALOG_BASE, 0x71},
- {WSA881X_BOOST_RON_CTL - WSA881X_ANALOG_BASE, 0x0F},
- {WSA881X_BOOST_LOOP_STABILITY - WSA881X_ANALOG_BASE, 0xAD},
- {WSA881X_BOOST_ZX_CTL - WSA881X_ANALOG_BASE, 0x34},
- {WSA881X_BOOST_START_CTL - WSA881X_ANALOG_BASE, 0x23},
- {WSA881X_BOOST_MISC1_CTL - WSA881X_ANALOG_BASE, 0x80},
- {WSA881X_BOOST_MISC2_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_MISC3_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BOOST_ATEST_CTL - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_FE_GAIN - WSA881X_ANALOG_BASE, 0x46},
- {WSA881X_SPKR_PROT_FE_CM_LDO_SET - WSA881X_ANALOG_BASE, 0x3B},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1 - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2 - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_ATEST1 - WSA881X_ANALOG_BASE, 0x01},
- {WSA881X_SPKR_PROT_ATEST2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_FE_VSENSE_VCM - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1 - WSA881X_ANALOG_BASE, 0x4D},
- {WSA881X_BONGO_RESRV_REG1 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_BONGO_RESRV_REG2 - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_PROT_SAR - WSA881X_ANALOG_BASE, 0x00},
- {WSA881X_SPKR_STATUS3 - WSA881X_ANALOG_BASE, 0x00},
-};
-
-struct reg_default wsa881x_rev_2_0_dig[] = {
- {WSA881X_RESET_CTL, 0x00},
- {WSA881X_TADC_VALUE_CTL, 0x01},
- {WSA881X_INTR_MASK, 0x1B},
- {WSA881X_IOPAD_CTL, 0x00},
- {WSA881X_OTP_REG_28, 0x3F},
- {WSA881X_OTP_REG_29, 0x3F},
- {WSA881X_OTP_REG_30, 0x01},
- {WSA881X_OTP_REG_31, 0x01},
-};
-
-struct reg_default wsa881x_rev_2_0_ana[] = {
- {WSA881X_TEMP_ADC_CTRL, 0x03},
- {WSA881X_ADC_SEL_IBIAS, 0x45},
- {WSA881X_SPKR_DRV_GAIN, 0xC1},
- {WSA881X_SPKR_DAC_CTL, 0x42},
- {WSA881X_SPKR_BBM_CTL, 0x02},
- {WSA881X_SPKR_MISC_CTL1, 0x40},
- {WSA881X_SPKR_MISC_CTL2, 0x07},
- {WSA881X_SPKR_BIAS_INT, 0x5F},
- {WSA881X_SPKR_BIAS_PSRR, 0x44},
- {WSA881X_BOOST_PS_CTL, 0xA0},
- {WSA881X_BOOST_PRESET_OUT1, 0xB7},
- {WSA881X_BOOST_LOOP_STABILITY, 0x8D},
- {WSA881X_SPKR_PROT_ATEST2, 0x02},
- {WSA881X_BONGO_RESRV_REG1, 0x5E},
- {WSA881X_BONGO_RESRV_REG2, 0x07},
-};
-
-struct reg_default wsa881x_rev_2_0_regmap_ana[] = {
- {WSA881X_TEMP_ADC_CTRL - WSA881X_ANALOG_BASE, 0x03},
- {WSA881X_ADC_SEL_IBIAS - WSA881X_ANALOG_BASE, 0x45},
- {WSA881X_SPKR_DRV_GAIN - WSA881X_ANALOG_BASE, 0xC1},
- {WSA881X_SPKR_DAC_CTL - WSA881X_ANALOG_BASE, 0x42},
- {WSA881X_SPKR_BBM_CTL - WSA881X_ANALOG_BASE, 0x02},
- {WSA881X_SPKR_MISC_CTL1 - WSA881X_ANALOG_BASE, 0x40},
- {WSA881X_SPKR_MISC_CTL2 - WSA881X_ANALOG_BASE, 0x07},
- {WSA881X_SPKR_BIAS_INT - WSA881X_ANALOG_BASE, 0x5F},
- {WSA881X_SPKR_BIAS_PSRR - WSA881X_ANALOG_BASE, 0x44},
- {WSA881X_BOOST_PS_CTL - WSA881X_ANALOG_BASE, 0xA0},
- {WSA881X_BOOST_PRESET_OUT1 - WSA881X_ANALOG_BASE, 0xB7},
- {WSA881X_BOOST_LOOP_STABILITY - WSA881X_ANALOG_BASE, 0x8D},
- {WSA881X_SPKR_PROT_ATEST2 - WSA881X_ANALOG_BASE, 0x02},
- {WSA881X_BONGO_RESRV_REG1 - WSA881X_ANALOG_BASE, 0x5E},
- {WSA881X_BONGO_RESRV_REG2 - WSA881X_ANALOG_BASE, 0x07},
-};
-
-/**
- * wsa881x_update_reg_defaults_2_0 - update default values of regs for v2.0
- *
- * WSA881x v2.0 has different default values for certain analog and digital
- * registers compared to v1.x. Therefore, update the values of these registers
- * with the values from tables defined above for v2.0.
- */
-void wsa881x_update_reg_defaults_2_0(void)
-{
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(wsa881x_rev_2_0_dig); i++) {
- for (j = 0; j < ARRAY_SIZE(wsa881x_ana_reg_defaults); j++)
- if (wsa881x_ana_reg_defaults[j].reg ==
- wsa881x_rev_2_0_dig[i].reg)
- wsa881x_ana_reg_defaults[j].def =
- wsa881x_rev_2_0_dig[i].def;
- }
- for (i = 0; i < ARRAY_SIZE(wsa881x_rev_2_0_ana); i++) {
- for (j = 0; j < ARRAY_SIZE(wsa881x_ana_reg_defaults); j++)
- if (wsa881x_ana_reg_defaults[j].reg ==
- wsa881x_rev_2_0_ana[i].reg)
- wsa881x_ana_reg_defaults[j].def =
- wsa881x_rev_2_0_ana[i].def;
- }
-}
-EXPORT_SYMBOL(wsa881x_update_reg_defaults_2_0);
-
-/**
- * wsa881x_update_regmap_2_0 - update regmap framework with new tables
- * @regmap: pointer to WSA881x regmap structure
- * @flag: indicates digital or analog WSA881x slave
- *
- * WSA881x v2.0 has some new registers for both analog and digital slaves.
- * Update the regmap framework with all the new registers.
- */
-void wsa881x_update_regmap_2_0(struct regmap *regmap, int flag)
-{
- u16 ret = 0;
-
- switch (flag) {
- case WSA881X_DIGITAL_SLAVE:
- ret = regmap_register_patch(regmap, wsa881x_rev_2_0_dig,
- ARRAY_SIZE(wsa881x_rev_2_0_dig));
- break;
- case WSA881X_ANALOG_SLAVE:
- ret = regmap_register_patch(regmap, wsa881x_rev_2_0_ana,
- ARRAY_SIZE(wsa881x_rev_2_0_ana));
- break;
- default:
- pr_debug("%s: unknown version", __func__);
- ret = -EINVAL;
- break;
- }
- if (ret)
- pr_err("%s: Failed to update regmap defaults ret= %d\n",
- __func__, ret);
-}
-EXPORT_SYMBOL(wsa881x_update_regmap_2_0);
-
-static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
-{
- return wsa881x_ana_reg_readable[reg];
-}
-
-static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case WSA881X_CHIP_ID0:
- case WSA881X_CHIP_ID1:
- case WSA881X_CHIP_ID2:
- case WSA881X_CHIP_ID3:
- case WSA881X_BUS_ID:
- case WSA881X_TEMP_MSB:
- case WSA881X_TEMP_LSB:
- case WSA881X_SDM_PDM9_LSB:
- case WSA881X_SDM_PDM9_MSB:
- case WSA881X_OTP_REG_0:
- case WSA881X_OTP_REG_1:
- case WSA881X_OTP_REG_2:
- case WSA881X_OTP_REG_3:
- case WSA881X_OTP_REG_4:
- case WSA881X_OTP_REG_5:
- case WSA881X_OTP_REG_31:
- case WSA881X_TEMP_DOUT_MSB:
- case WSA881X_TEMP_DOUT_LSB:
- case WSA881X_TEMP_OP:
- case WSA881X_OTP_CTRL1:
- case WSA881X_INTR_STATUS:
- case WSA881X_ATE_TEST_MODE:
- case WSA881X_PIN_STATUS:
- case WSA881X_SWR_HM_TEST2:
- case WSA881X_SPKR_STATUS1:
- case WSA881X_SPKR_STATUS2:
- case WSA881X_SPKR_STATUS3:
- case WSA881X_SPKR_PROT_SAR:
- return true;
- default:
- return false;
- }
-}
-
-struct regmap_config wsa881x_ana_regmap_config[] = {
-{
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_NONE,
- .reg_defaults = wsa881x_ana_reg_defaults_0,
- .num_reg_defaults = ARRAY_SIZE(wsa881x_ana_reg_defaults_0),
- .max_register = WSA881X_MAX_REGISTER,
- .volatile_reg = wsa881x_volatile_register,
- .readable_reg = wsa881x_readable_register,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
-},
-{
- .reg_bits = 8,
- .val_bits = 8,
- .cache_type = REGCACHE_NONE,
- .reg_defaults = wsa881x_ana_reg_defaults_1,
- .num_reg_defaults = ARRAY_SIZE(wsa881x_ana_reg_defaults_1),
- .max_register = WSA881X_MAX_REGISTER,
- .volatile_reg = wsa881x_volatile_register,
- .readable_reg = wsa881x_readable_register,
- .reg_format_endian = REGMAP_ENDIAN_NATIVE,
- .val_format_endian = REGMAP_ENDIAN_NATIVE,
-}
-};
diff --git a/sound/soc/codecs/wsa881x-tables-analog.c b/sound/soc/codecs/wsa881x-tables-analog.c
deleted file mode 100644
index 061ed6f..0000000
--- a/sound/soc/codecs/wsa881x-tables-analog.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/regmap.h>
-#include <linux/device.h>
-#include "wsa881x-registers-analog.h"
-
-const u8 wsa881x_ana_reg_readable[WSA881X_CACHE_SIZE] = {
- [WSA881X_CHIP_ID0] = 1,
- [WSA881X_CHIP_ID1] = 1,
- [WSA881X_CHIP_ID2] = 1,
- [WSA881X_CHIP_ID3] = 1,
- [WSA881X_BUS_ID] = 1,
- [WSA881X_CDC_RST_CTL] = 1,
- [WSA881X_CDC_TOP_CLK_CTL] = 1,
- [WSA881X_CDC_ANA_CLK_CTL] = 1,
- [WSA881X_CDC_DIG_CLK_CTL] = 1,
- [WSA881X_CLOCK_CONFIG] = 1,
- [WSA881X_ANA_CTL] = 1,
- [WSA881X_SWR_RESET_EN] = 1,
- [WSA881X_RESET_CTL] = 1,
- [WSA881X_TADC_VALUE_CTL] = 1,
- [WSA881X_TEMP_DETECT_CTL] = 1,
- [WSA881X_TEMP_MSB] = 1,
- [WSA881X_TEMP_LSB] = 1,
- [WSA881X_TEMP_CONFIG0] = 1,
- [WSA881X_TEMP_CONFIG1] = 1,
- [WSA881X_CDC_CLIP_CTL] = 1,
- [WSA881X_SDM_PDM9_LSB] = 1,
- [WSA881X_SDM_PDM9_MSB] = 1,
- [WSA881X_CDC_RX_CTL] = 1,
- [WSA881X_DEM_BYPASS_DATA0] = 1,
- [WSA881X_DEM_BYPASS_DATA1] = 1,
- [WSA881X_DEM_BYPASS_DATA2] = 1,
- [WSA881X_DEM_BYPASS_DATA3] = 1,
- [WSA881X_OTP_CTRL0] = 1,
- [WSA881X_OTP_CTRL1] = 1,
- [WSA881X_HDRIVE_CTL_GROUP1] = 1,
- [WSA881X_INTR_MODE] = 1,
- [WSA881X_INTR_MASK] = 1,
- [WSA881X_INTR_STATUS] = 1,
- [WSA881X_INTR_CLEAR] = 1,
- [WSA881X_INTR_LEVEL] = 1,
- [WSA881X_INTR_SET] = 1,
- [WSA881X_INTR_TEST] = 1,
- [WSA881X_PDM_TEST_MODE] = 1,
- [WSA881X_ATE_TEST_MODE] = 1,
- [WSA881X_PIN_CTL_MODE] = 1,
- [WSA881X_PIN_CTL_OE] = 1,
- [WSA881X_PIN_WDATA_IOPAD] = 1,
- [WSA881X_PIN_STATUS] = 1,
- [WSA881X_DIG_DEBUG_MODE] = 1,
- [WSA881X_DIG_DEBUG_SEL] = 1,
- [WSA881X_DIG_DEBUG_EN] = 1,
- [WSA881X_SWR_HM_TEST1] = 1,
- [WSA881X_SWR_HM_TEST2] = 1,
- [WSA881X_TEMP_DETECT_DBG_CTL] = 1,
- [WSA881X_TEMP_DEBUG_MSB] = 1,
- [WSA881X_TEMP_DEBUG_LSB] = 1,
- [WSA881X_SAMPLE_EDGE_SEL] = 1,
- [WSA881X_IOPAD_CTL] = 1,
- [WSA881X_SPARE_0] = 1,
- [WSA881X_SPARE_1] = 1,
- [WSA881X_SPARE_2] = 1,
- [WSA881X_OTP_REG_0] = 1,
- [WSA881X_OTP_REG_1] = 1,
- [WSA881X_OTP_REG_2] = 1,
- [WSA881X_OTP_REG_3] = 1,
- [WSA881X_OTP_REG_4] = 1,
- [WSA881X_OTP_REG_5] = 1,
- [WSA881X_OTP_REG_6] = 1,
- [WSA881X_OTP_REG_7] = 1,
- [WSA881X_OTP_REG_8] = 1,
- [WSA881X_OTP_REG_9] = 1,
- [WSA881X_OTP_REG_10] = 1,
- [WSA881X_OTP_REG_11] = 1,
- [WSA881X_OTP_REG_12] = 1,
- [WSA881X_OTP_REG_13] = 1,
- [WSA881X_OTP_REG_14] = 1,
- [WSA881X_OTP_REG_15] = 1,
- [WSA881X_OTP_REG_16] = 1,
- [WSA881X_OTP_REG_17] = 1,
- [WSA881X_OTP_REG_18] = 1,
- [WSA881X_OTP_REG_19] = 1,
- [WSA881X_OTP_REG_20] = 1,
- [WSA881X_OTP_REG_21] = 1,
- [WSA881X_OTP_REG_22] = 1,
- [WSA881X_OTP_REG_23] = 1,
- [WSA881X_OTP_REG_24] = 1,
- [WSA881X_OTP_REG_25] = 1,
- [WSA881X_OTP_REG_26] = 1,
- [WSA881X_OTP_REG_27] = 1,
- [WSA881X_OTP_REG_28] = 1,
- [WSA881X_OTP_REG_29] = 1,
- [WSA881X_OTP_REG_30] = 1,
- [WSA881X_OTP_REG_31] = 1,
- [WSA881X_OTP_REG_63] = 1,
- /* Analog Registers */
- [WSA881X_BIAS_REF_CTRL] = 1,
- [WSA881X_BIAS_TEST] = 1,
- [WSA881X_BIAS_BIAS] = 1,
- [WSA881X_TEMP_OP] = 1,
- [WSA881X_TEMP_IREF_CTRL] = 1,
- [WSA881X_TEMP_ISENS_CTRL] = 1,
- [WSA881X_TEMP_CLK_CTRL] = 1,
- [WSA881X_TEMP_TEST] = 1,
- [WSA881X_TEMP_BIAS] = 1,
- [WSA881X_TEMP_ADC_CTRL] = 1,
- [WSA881X_TEMP_DOUT_MSB] = 1,
- [WSA881X_TEMP_DOUT_LSB] = 1,
- [WSA881X_ADC_EN_MODU_V] = 1,
- [WSA881X_ADC_EN_MODU_I] = 1,
- [WSA881X_ADC_EN_DET_TEST_V] = 1,
- [WSA881X_ADC_EN_DET_TEST_I] = 1,
- [WSA881X_ADC_SEL_IBIAS] = 1,
- [WSA881X_ADC_EN_SEL_IBIAS] = 1,
- [WSA881X_SPKR_DRV_EN] = 1,
- [WSA881X_SPKR_DRV_GAIN] = 1,
- [WSA881X_SPKR_DAC_CTL] = 1,
- [WSA881X_SPKR_DRV_DBG] = 1,
- [WSA881X_SPKR_PWRSTG_DBG] = 1,
- [WSA881X_SPKR_OCP_CTL] = 1,
- [WSA881X_SPKR_CLIP_CTL] = 1,
- [WSA881X_SPKR_BBM_CTL] = 1,
- [WSA881X_SPKR_MISC_CTL1] = 1,
- [WSA881X_SPKR_MISC_CTL2] = 1,
- [WSA881X_SPKR_BIAS_INT] = 1,
- [WSA881X_SPKR_PA_INT] = 1,
- [WSA881X_SPKR_BIAS_CAL] = 1,
- [WSA881X_SPKR_BIAS_PSRR] = 1,
- [WSA881X_SPKR_STATUS1] = 1,
- [WSA881X_SPKR_STATUS2] = 1,
- [WSA881X_BOOST_EN_CTL] = 1,
- [WSA881X_BOOST_CURRENT_LIMIT] = 1,
- [WSA881X_BOOST_PS_CTL] = 1,
- [WSA881X_BOOST_PRESET_OUT1] = 1,
- [WSA881X_BOOST_PRESET_OUT2] = 1,
- [WSA881X_BOOST_FORCE_OUT] = 1,
- [WSA881X_BOOST_LDO_PROG] = 1,
- [WSA881X_BOOST_SLOPE_COMP_ISENSE_FB] = 1,
- [WSA881X_BOOST_RON_CTL] = 1,
- [WSA881X_BOOST_LOOP_STABILITY] = 1,
- [WSA881X_BOOST_ZX_CTL] = 1,
- [WSA881X_BOOST_START_CTL] = 1,
- [WSA881X_BOOST_MISC1_CTL] = 1,
- [WSA881X_BOOST_MISC2_CTL] = 1,
- [WSA881X_BOOST_MISC3_CTL] = 1,
- [WSA881X_BOOST_ATEST_CTL] = 1,
- [WSA881X_SPKR_PROT_FE_GAIN] = 1,
- [WSA881X_SPKR_PROT_FE_CM_LDO_SET] = 1,
- [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1] = 1,
- [WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2] = 1,
- [WSA881X_SPKR_PROT_ATEST1] = 1,
- [WSA881X_SPKR_PROT_ATEST2] = 1,
- [WSA881X_SPKR_PROT_FE_VSENSE_VCM] = 1,
- [WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1] = 1,
- [WSA881X_BONGO_RESRV_REG1] = 1,
- [WSA881X_BONGO_RESRV_REG2] = 1,
- [WSA881X_SPKR_PROT_SAR] = 1,
- [WSA881X_SPKR_STATUS3] = 1,
-};
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index ef493a8..77aea10 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1365,6 +1365,7 @@
/* Retry after 1 msec delay */
usleep_range(1000, 1100);
}
+ pdev->dev_num = devnum;
regcache_mark_dirty(wsa881x->regmap);
regcache_sync(wsa881x->regmap);
return 0;
diff --git a/sound/soc/codecs/wsa881x.h b/sound/soc/codecs/wsa881x.h
index be234ac..fbc60d8 100644
--- a/sound/soc/codecs/wsa881x.h
+++ b/sound/soc/codecs/wsa881x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,9 +20,10 @@
#define WSA881X_MAX_SWR_PORTS 4
+#if IS_ENABLED(CONFIG_SND_SOC_WSA881X)
extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
- u8 num_port, unsigned int *ch_mask,
- unsigned int *ch_rate);
+ u8 num_port, unsigned int *ch_mask,
+ unsigned int *ch_rate);
extern const u8 wsa881x_reg_readable[WSA881X_CACHE_SIZE];
extern struct regmap_config wsa881x_regmap_config;
@@ -31,4 +32,25 @@
struct snd_soc_codec *codec);
void wsa881x_regmap_defaults(struct regmap *regmap, u8 version);
+#else
+extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
+ u8 num_port, unsigned int *ch_mask,
+ unsigned int *ch_rate)
+{
+ return 0;
+}
+
+extern int wsa881x_codec_info_create_codec_entry(
+ struct snd_info_entry *codec_root,
+ struct snd_soc_codec *codec)
+{
+ return 0;
+}
+
+void wsa881x_regmap_defaults(struct regmap *regmap, u8 version)
+{
+}
+
+#endif
+
#endif /* _WSA881X_H */
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 18585749..abadd08 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -112,14 +112,12 @@
select MSM_CDC_PINCTRL
select SND_SOC_MSM_SDW
select SND_SOC_SDM660_CDC
- select SND_SOC_MSM_HDMI_CODEC_RX
select QTI_PP
select DTS_SRS_TM
select DOLBY_LICENSE
select SND_HWDEP
select MSM_ULTRASOUND
select DTS_EAGLE
- select SND_SOC_SDM660_COMMON
select SND_SOC_COMPRESS
select PINCTRL_LPI
help
@@ -144,7 +142,6 @@
select SND_SOC_WCD9335
select SND_SOC_WCD934X
select SND_SOC_WSA881X
- select SND_SOC_MSM_HDMI_CODEC_RX
select MFD_CORE
select QTI_PP
select DTS_SRS_TM
@@ -154,7 +151,6 @@
select SND_HWDEP
select MSM_ULTRASOUND
select DTS_EAGLE
- select SND_SOC_SDM660_COMMON
select SND_SOC_COMPRESS
select PINCTRL_LPI
help
@@ -192,8 +188,21 @@
the machine driver and the corresponding
DAI-links
-config SND_SOC_MSM8998
+config SND_SOC_MACHINE_MSM8998
tristate "SoC Machine driver for MSM8998 boards"
+ select SND_SOC_WSA881X
+ select SND_SOC_WCD9335
+ select SND_SOC_WCD934X
+ select SND_SOC_CPE
+
+ help
+ To enable the machine driver and the
+ corresponding DAI-links on MSM8998.
+ All platform specific audio modules are
+ enabled here.
+
+config SND_SOC_MSM8998
+ tristate "Sound SoC drivers to interface with DSP"
depends on ARCH_QCOM
select SND_SOC_COMPRESS
select SND_SOC_QDSP6V2
@@ -205,13 +214,9 @@
select MSM_QDSP6_PDR
select MSM_QDSP6_NOTIFIER
select MSM_QDSP6V2_CODECS
- select SND_SOC_WCD9335
- select SND_SOC_WCD934X
- select SND_SOC_WSA881X
select SND_SOC_MSM_HDMI_CODEC_RX
select DTS_SRS_TM
select QTI_PP
- select SND_SOC_CPE
select MSM_ULTRASOUND
select DOLBY_LICENSE
select SND_HWDEP
@@ -224,17 +229,28 @@
DAI-links
config SND_SOC_660
- tristate "SoC Machine driver for SDM660 boards"
- depends on ARCH_SDM660
+ tristate "SoC Machine driver for SDM660/670 boards"
+ depends on ARCH_SDM660 || ARCH_SDM670
select SND_SOC_INT_CODEC
select SND_SOC_EXT_CODEC
help
- To add support for SoC audio on SDM660.
+ To add support for SoC audio on SDM660/670.
This will enable sound soc drivers which
interfaces with DSP, also it will enable
the machine driver and the corresponding
DAI-links
+config SND_SOC_MACHINE_SDM845
+ tristate "SoC Machine driver for SDM845 boards"
+ select SND_SOC_WSA881X
+ select SND_SOC_WCD934X
+
+ help
+ To enable the machine driver and the
+ corresponding DAI-links on SDM845.
+ All platform specific audio modules are
+ enabled here.
+
config SND_SOC_SDM845
tristate "SoC Machine driver for SDM845 boards"
depends on ARCH_QCOM
@@ -248,8 +264,6 @@
select MSM_QDSP6_PDR
select MSM_QDSP6_NOTIFIER
select MSM_QDSP6V2_CODECS
- select SND_SOC_WCD934X
- select SND_SOC_WSA881X
select DTS_SRS_TM
select QTI_PP
select MSM_ULTRASOUND
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 5105cd9..312bb45 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -18,11 +18,11 @@
# for MSM8998 sound card driver
snd-soc-msm8998-objs := msm8998.o
-obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
+obj-$(CONFIG_SND_SOC_MACHINE_MSM8998) += snd-soc-msm8998.o
# for SDM660 sound card driver
snd-soc-sdm660-common-objs := sdm660-common.o
-obj-$(CONFIG_SND_SOC_SDM660_COMMON) += snd-soc-sdm660-common.o
+obj-$(CONFIG_SND_SOC_660) += snd-soc-sdm660-common.o
# for SDM660 sound card driver
snd-soc-int-codec-objs := sdm660-internal.o
@@ -36,4 +36,4 @@
# for SDM845 sound card driver
snd-soc-sdm845-objs := sdm845.o
-obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
+obj-$(CONFIG_SND_SOC_MACHINE_SDM845) += snd-soc-sdm845.o
diff --git a/sound/soc/msm/msm-audio-pinctrl.c b/sound/soc/msm/msm-audio-pinctrl.c
deleted file mode 100644
index f0fba84..0000000
--- a/sound/soc/msm/msm-audio-pinctrl.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include "msm-audio-pinctrl.h"
-
-/*
- * pinctrl -- handle to query pinctrl apis
- * cdc lines -- stores pinctrl handles for pinctrl states
- * active_set -- maintain the overall pinctrl state
- */
-struct cdc_pinctrl_info {
- struct pinctrl *pinctrl;
- struct pinctrl_state **cdc_lines;
- int active_set;
-};
-
-/*
- * gpiosets -- stores all gpiosets mentioned in dtsi file
- * gpiosets_comb_names -- stores all possible gpioset combinations
- * gpioset_state -- maintains counter for each gpioset
- * gpiosets_max -- maintain the total supported gpiosets
- * gpiosets_comb_max -- maintain the total gpiosets combinations
- */
-struct cdc_gpioset_info {
- char **gpiosets;
- char **gpiosets_comb_names;
- uint8_t *gpioset_state;
- int gpiosets_max;
- int gpiosets_comb_max;
-};
-
-static struct cdc_pinctrl_info pinctrl_info[MAX_PINCTRL_CLIENT];
-static struct cdc_gpioset_info gpioset_info[MAX_PINCTRL_CLIENT];
-
-/* Finds the index for the gpio set in the dtsi file */
-int msm_get_gpioset_index(enum pinctrl_client client, char *keyword)
-{
- int i;
-
- for (i = 0; i < gpioset_info[client].gpiosets_max; i++) {
- if (!(strcmp(gpioset_info[client].gpiosets[i], keyword)))
- break;
- }
- /* Checking if the keyword is present in dtsi or not */
- if (i != gpioset_info[client].gpiosets_max)
- return i;
- else
- return -EINVAL;
-}
-
-/*
- * This function reads the following from dtsi file
- * 1. All gpio sets
- * 2. All combinations of gpio sets
- * 3. Pinctrl handles to gpio sets
- *
- * Returns error if there is
- * 1. Problem reading from dtsi file
- * 2. Memory allocation failure
- */
-int msm_gpioset_initialize(enum pinctrl_client client,
- struct device *dev)
-{
- struct pinctrl *pinctrl;
- const char *gpioset_names = "qcom,msm-gpios";
- const char *gpioset_combinations = "qcom,pinctrl-names";
- const char *gpioset_names_str = NULL;
- const char *gpioset_comb_str = NULL;
- int num_strings = 0;
- int ret = 0;
- int i = 0;
-
- pr_debug("%s\n", __func__);
- pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(pinctrl)) {
- pr_err("%s: Unable to get pinctrl handle\n",
- __func__);
- return -EINVAL;
- }
- pinctrl_info[client].pinctrl = pinctrl;
-
- /* Reading of gpio sets */
- num_strings = of_property_count_strings(dev->of_node,
- gpioset_names);
- if (num_strings < 0) {
- dev_err(dev,
- "%s: missing %s in dt node or length is incorrect\n",
- __func__, gpioset_names);
- goto err;
- }
- gpioset_info[client].gpiosets_max = num_strings;
- gpioset_info[client].gpiosets = devm_kzalloc(dev,
- gpioset_info[client].gpiosets_max *
- sizeof(char *), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets) {
- dev_err(dev, "Can't allocate memory for gpio set names\n");
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < num_strings; i++) {
- ret = of_property_read_string_index(dev->of_node,
- gpioset_names, i, &gpioset_names_str);
-
- gpioset_info[client].gpiosets[i] = devm_kzalloc(dev,
- (strlen(gpioset_names_str) + 1), GFP_KERNEL);
-
- if (!gpioset_info[client].gpiosets[i]) {
- dev_err(dev, "%s: Can't allocate gpiosets[%d] data\n",
- __func__, i);
- ret = -ENOMEM;
- goto err;
- }
- strlcpy(gpioset_info[client].gpiosets[i],
- gpioset_names_str, strlen(gpioset_names_str)+1);
- gpioset_names_str = NULL;
- }
- num_strings = 0;
-
- /* Allocating memory for gpio set counter */
- gpioset_info[client].gpioset_state = devm_kzalloc(dev,
- gpioset_info[client].gpiosets_max *
- sizeof(uint8_t), GFP_KERNEL);
- if (!gpioset_info[client].gpioset_state) {
- dev_err(dev, "Can't allocate memory for gpio set counter\n");
- ret = -ENOMEM;
- goto err;
- }
-
- /* Reading of all combinations of gpio sets */
- num_strings = of_property_count_strings(dev->of_node,
- gpioset_combinations);
- if (num_strings < 0) {
- dev_err(dev,
- "%s: missing %s in dt node or length is incorrect\n",
- __func__, gpioset_combinations);
- goto err;
- }
- gpioset_info[client].gpiosets_comb_max = num_strings;
- gpioset_info[client].gpiosets_comb_names = devm_kzalloc(dev,
- num_strings * sizeof(char *), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets_comb_names) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) {
- ret = of_property_read_string_index(dev->of_node,
- gpioset_combinations, i, &gpioset_comb_str);
-
- gpioset_info[client].gpiosets_comb_names[i] = devm_kzalloc(dev,
- (strlen(gpioset_comb_str) + 1), GFP_KERNEL);
- if (!gpioset_info[client].gpiosets_comb_names[i]) {
- ret = -ENOMEM;
- goto err;
- }
-
- strlcpy(gpioset_info[client].gpiosets_comb_names[i],
- gpioset_comb_str,
- strlen(gpioset_comb_str)+1);
- pr_debug("%s: GPIO configuration %s\n",
- __func__,
- gpioset_info[client].gpiosets_comb_names[i]);
- gpioset_comb_str = NULL;
- }
-
- /* Allocating memory for handles to pinctrl states */
- pinctrl_info[client].cdc_lines = devm_kzalloc(dev,
- num_strings * sizeof(char *), GFP_KERNEL);
- if (!pinctrl_info[client].cdc_lines) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Get pinctrl handles for gpio sets in dtsi file */
- for (i = 0; i < num_strings; i++) {
- pinctrl_info[client].cdc_lines[i] = pinctrl_lookup_state(
- pinctrl,
- (const char *)gpioset_info[client].
- gpiosets_comb_names[i]);
- if (IS_ERR(pinctrl_info[client].cdc_lines[i]))
- pr_err("%s: Unable to get pinctrl handle for %s\n",
- __func__, gpioset_info[client].
- gpiosets_comb_names[i]);
- }
- goto success;
-
-err:
- /* Free up memory allocated for gpio set combinations */
- for (i = 0; i < gpioset_info[client].gpiosets_max; i++) {
- if (gpioset_info[client].gpiosets[i] != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets[i]);
- gpioset_info[client].gpiosets[i] = NULL;
- }
- }
- if (gpioset_info[client].gpiosets != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets);
- gpioset_info[client].gpiosets = NULL;
- }
-
- /* Free up memory allocated for gpio set combinations */
- for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) {
- if (gpioset_info[client].gpiosets_comb_names[i] != NULL) {
- devm_kfree(dev,
- gpioset_info[client].gpiosets_comb_names[i]);
- gpioset_info[client].gpiosets_comb_names[i] = NULL;
- }
- }
- if (gpioset_info[client].gpiosets_comb_names != NULL) {
- devm_kfree(dev, gpioset_info[client].gpiosets_comb_names);
- gpioset_info[client].gpiosets_comb_names = NULL;
- }
-
- /* Free up memory allocated for handles to pinctrl states */
- if (pinctrl_info[client].cdc_lines != NULL) {
- devm_kfree(dev, pinctrl_info[client].cdc_lines);
- pinctrl_info[client].cdc_lines = NULL;
- }
-
- /* Free up memory allocated for counter of gpio sets */
- if (gpioset_info[client].gpioset_state != NULL) {
- devm_kfree(dev, gpioset_info[client].gpioset_state);
- gpioset_info[client].gpioset_state = NULL;
- }
-
-success:
- return ret;
-}
-
-int msm_gpioset_activate(enum pinctrl_client client, char *keyword)
-{
- int ret = 0;
- int gp_set = 0;
- int active_set = 0;
-
- gp_set = msm_get_gpioset_index(client, keyword);
- if (gp_set < 0) {
- pr_err("%s: gpio set name does not exist\n",
- __func__);
- return gp_set;
- }
-
- if (!gpioset_info[client].gpioset_state[gp_set]) {
- /*
- * If pinctrl pointer is not valid,
- * no need to proceed further
- */
- active_set = pinctrl_info[client].active_set;
- if (IS_ERR(pinctrl_info[client].cdc_lines[active_set]))
- return 0;
-
- pinctrl_info[client].active_set |= (1 << gp_set);
- active_set = pinctrl_info[client].active_set;
- pr_debug("%s: pinctrl.active_set: %d\n", __func__, active_set);
-
- /* Select the appropriate pinctrl state */
- ret = pinctrl_select_state(pinctrl_info[client].pinctrl,
- pinctrl_info[client].cdc_lines[active_set]);
- }
- gpioset_info[client].gpioset_state[gp_set]++;
-
- return ret;
-}
-
-int msm_gpioset_suspend(enum pinctrl_client client, char *keyword)
-{
- int ret = 0;
- int gp_set = 0;
- int active_set = 0;
-
- gp_set = msm_get_gpioset_index(client, keyword);
- if (gp_set < 0) {
- pr_err("%s: gpio set name does not exist\n",
- __func__);
- return gp_set;
- }
-
- if (gpioset_info[client].gpioset_state[gp_set] == 1) {
- pinctrl_info[client].active_set &= ~(1 << gp_set);
- /*
- * If pinctrl pointer is not valid,
- * no need to proceed further
- */
- active_set = pinctrl_info[client].active_set;
- if (IS_ERR(pinctrl_info[client].cdc_lines[active_set]))
- return -EINVAL;
-
- pr_debug("%s: pinctrl.active_set: %d\n", __func__,
- pinctrl_info[client].active_set);
- /* Select the appropriate pinctrl state */
- ret = pinctrl_select_state(pinctrl_info[client].pinctrl,
- pinctrl_info[client].cdc_lines[pinctrl_info[client].
- active_set]);
- }
- if (!(gpioset_info[client].gpioset_state[gp_set])) {
- pr_err("%s: Invalid call to de activate gpios: %d\n", __func__,
- gpioset_info[client].gpioset_state[gp_set]);
- return -EINVAL;
- }
-
- gpioset_info[client].gpioset_state[gp_set]--;
-
- return ret;
-}
diff --git a/sound/soc/msm/msm-audio-pinctrl.h b/sound/soc/msm/msm-audio-pinctrl.h
deleted file mode 100644
index ec7c6aa..0000000
--- a/sound/soc/msm/msm-audio-pinctrl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_AUDIO_PINCTRL_H
-#define __MSM_AUDIO_PINCTRL_H
-
-enum pinctrl_client {
- CLIENT_WCD,
- CLIENT_WSA_BONGO_1,
- CLIENT_WSA_BONGO_2,
- MAX_PINCTRL_CLIENT,
-};
-
-
-/* finds the index for the gpio set in the dtsi file */
-int msm_get_gpioset_index(enum pinctrl_client client, char *keyword);
-
-/*
- * this function reads the following from dtsi file
- * 1. all gpio sets
- * 2. all combinations of gpio sets
- * 3. pinctrl handles to gpio sets
- *
- * returns error if there is
- * 1. problem reading from dtsi file
- * 2. memory allocation failure
- */
-int msm_gpioset_initialize(enum pinctrl_client client, struct device *dev);
-
-int msm_gpioset_activate(enum pinctrl_client client, char *keyword);
-
-int msm_gpioset_suspend(enum pinctrl_client client, char *keyword);
-
-#endif /* __MSM_AUDIO_PINCTRL_H */
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index c319ccf..89a9cc2 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2376,8 +2376,20 @@
.rate_min = 8000,
.rate_max = 384000,
},
+ .capture = {
+ .stream_name = "MultiMedia10 Capture",
+ .aif_name = "MM_UL10",
+ .rates = (SNDRV_PCM_RATE_8000_48000 |
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
- .compress_new = snd_soc_new_compress,
.name = "MultiMedia10",
.probe = fe_dai_probe,
},
@@ -2496,8 +2508,21 @@
.rate_min = 8000,
.rate_max = 384000,
},
+ .capture = {
+ .stream_name = "MultiMedia16 Capture",
+ .aif_name = "MM_UL16",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
.ops = &msm_fe_Multimedia_dai_ops,
- .compress_new = snd_soc_new_compress,
.name = "MultiMedia16",
.probe = fe_dai_probe,
},
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index 45c5479..0890037 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -2915,12 +2915,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = "MSM8996 Compress9",
- .stream_name = "Compress9",
+ .name = "MSM8996 ULL NOIRQ_2",
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 222c65a..05b7d30 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -5036,12 +5036,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA7,
},
{
- .name = MSM_DAILINK_NAME(Compress3),
- .stream_name = "Compress3",
+ .name = MSM_DAILINK_NAME(MultiMedia10),
+ .stream_name = "MultiMedia10",
.cpu_dai_name = "MultiMedia10",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
@@ -5312,12 +5313,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index f5c6d6f..7e69a7f 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -356,6 +356,15 @@
return cal_type;
}
+/**
+ * cal_utils_create_cal_types
+ *
+ * @num_cal_types: number of types
+ * @cal_type: pointer to the cal types pointer
+ * @info: pointer to info
+ *
+ * Returns 0 on success, EINVAL otherwise
+ */
int cal_utils_create_cal_types(int num_cal_types,
struct cal_type_data **cal_type,
struct cal_type_info *info)
@@ -411,6 +420,7 @@
done:
return ret;
}
+EXPORT_SYMBOL(cal_utils_create_cal_types);
static void delete_cal_block(struct cal_block_data *cal_block)
{
@@ -497,6 +507,13 @@
return;
}
+/**
+ * cal_utils_get_only_cal_block
+ *
+ * @cal_type: pointer to the cal type
+ *
+ * Returns cal_block structure
+ */
struct cal_block_data *cal_utils_get_only_cal_block(
struct cal_type_data *cal_type)
{
@@ -516,7 +533,16 @@
done:
return cal_block;
}
+EXPORT_SYMBOL(cal_utils_get_only_cal_block);
+/**
+ * cal_utils_get_only_cal_block
+ *
+ * @cal_block: pointer to cal block struct
+ * @user_data: pointer to user data
+ *
+ * Returns true on match
+ */
bool cal_utils_match_buf_num(struct cal_block_data *cal_block,
void *user_data)
{
@@ -528,6 +554,7 @@
return ret;
}
+EXPORT_SYMBOL(cal_utils_match_buf_num);
static struct cal_block_data *get_matching_cal_block(
struct cal_type_data *cal_type,
@@ -759,6 +786,17 @@
return ret;
}
+/**
+ * cal_utils_alloc_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ * @client_info_size: client info size
+ * @client_info: pointer to client info
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int cal_utils_alloc_cal(size_t data_size, void *data,
struct cal_type_data *cal_type,
size_t client_info_size, void *client_info)
@@ -827,7 +865,17 @@
done:
return ret;
}
+EXPORT_SYMBOL(cal_utils_alloc_cal);
+/**
+ * cal_utils_dealloc_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int cal_utils_dealloc_cal(size_t data_size, void *data,
struct cal_type_data *cal_type)
{
@@ -887,7 +935,19 @@
done:
return ret;
}
+EXPORT_SYMBOL(cal_utils_dealloc_cal);
+/**
+ * cal_utils_set_cal
+ *
+ * @data_size: size of data to allocate
+ * @data: data pointer
+ * @cal_type: pointer to the cal type
+ * @client_info_size: client info size
+ * @client_info: pointer to client info
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int cal_utils_set_cal(size_t data_size, void *data,
struct cal_type_data *cal_type,
size_t client_info_size, void *client_info)
@@ -967,3 +1027,4 @@
done:
return ret;
}
+EXPORT_SYMBOL(cal_utils_set_cal);
diff --git a/sound/soc/msm/qdsp6v2/audio_calibration.c b/sound/soc/msm/qdsp6v2/audio_calibration.c
index 808a0e4..d709b09 100644
--- a/sound/soc/msm/qdsp6v2/audio_calibration.c
+++ b/sound/soc/msm/qdsp6v2/audio_calibration.c
@@ -460,6 +460,12 @@
data->cal_type.cal_hdr.buffer_number);
ret = -EINVAL;
goto done;
+ } else if ((data->hdr.cal_type_size + sizeof(data->hdr)) > size) {
+ pr_err("%s: cal type hdr size %zd + cal type size %d is greater than user buffer size %d\n",
+ __func__, sizeof(data->hdr), data->hdr.cal_type_size,
+ size);
+ ret = -EFAULT;
+ goto done;
}
@@ -497,13 +503,7 @@
goto unlock;
if (data == NULL)
goto unlock;
- if ((sizeof(data->hdr) + data->hdr.cal_type_size) > size) {
- pr_err("%s: header size %zd plus cal type size %d are greater than data buffer size %d\n",
- __func__, sizeof(data->hdr),
- data->hdr.cal_type_size, size);
- ret = -EFAULT;
- goto unlock;
- } else if (copy_to_user((void *)arg, data,
+ if (copy_to_user(arg, data,
sizeof(data->hdr) + data->hdr.cal_type_size)) {
pr_err("%s: Could not copy cal type to user\n",
__func__);
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
index 9f08222..9b845ee 100644
--- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -154,7 +154,7 @@
MAX_INBAND_PARAM_SZ,
"VIRT ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -182,7 +182,7 @@
MAX_INBAND_PARAM_SZ,
"VIRT STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -210,7 +210,7 @@
MAX_INBAND_PARAM_SZ,
"VIRT OUT_TYPE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -238,7 +238,7 @@
MAX_INBAND_PARAM_SZ,
"VIRT GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -316,7 +316,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -344,7 +344,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -372,7 +372,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_PRESET", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -400,7 +400,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_WET_MIX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -428,7 +428,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -456,7 +456,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -484,7 +484,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_HF_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -512,7 +512,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_TIME", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -540,7 +540,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_HF_RATIO", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -568,7 +568,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -596,7 +596,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -624,7 +624,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -652,7 +652,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -680,7 +680,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_DIFFUSION", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -708,7 +708,7 @@
MAX_INBAND_PARAM_SZ,
"REVERB_DENSITY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -787,7 +787,7 @@
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -815,7 +815,7 @@
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -843,7 +843,7 @@
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -920,7 +920,7 @@
MAX_INBAND_PARAM_SZ,
"PBE_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -946,7 +946,7 @@
MAX_INBAND_PARAM_SZ,
"PBE_PARAM", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -1031,7 +1031,7 @@
MAX_INBAND_PARAM_SZ,
"EQ_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1099,7 +1099,7 @@
MAX_INBAND_PARAM_SZ,
"EQ_CONFIG", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1150,7 +1150,7 @@
MAX_INBAND_PARAM_SZ,
"EQ_BAND_INDEX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1182,7 +1182,7 @@
MAX_INBAND_PARAM_SZ,
"EQ_SINGLE_BAND_FREQ", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1271,7 +1271,7 @@
"VOLUME/VOLUME2_GAIN_2CH",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;
@@ -1320,7 +1320,7 @@
"VOLUME/VOLUME2_GAIN_MASTER",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 3a6cbe6..ef3475c 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1682,7 +1682,7 @@
dev_err(rtd->dev,
"%s REG_SND_MODEL failed err %d\n",
__func__, err);
- return err;
+ goto done;
}
break;
case SNDRV_LSM_SET_PARAMS: {
@@ -1852,13 +1852,15 @@
dev_err(rtd->dev,
"%s: Invalid params event_status_v3\n",
__func__);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&userarg, arg, sizeof(userarg))) {
dev_err(rtd->dev,
"%s: err copyuser event_status_v3\n",
__func__);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg.payload_size >
@@ -1866,7 +1868,8 @@
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(struct snd_lsm_event_status_v3) +
@@ -1876,7 +1879,8 @@
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
user->payload_size = userarg.payload_size;
err = msm_lsm_ioctl_shared(substream, cmd, user);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 325d642..75a2bff 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -30,9 +30,12 @@
#include <sound/control.h>
#include <sound/q6audio-v2.h>
#include <sound/timer.h>
+#include <sound/hwdep.h>
+
#include <asm/dma.h>
#include <sound/tlv.h>
#include <sound/pcm_params.h>
+#include <sound/devdep_params.h>
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
@@ -421,6 +424,42 @@
return ret;
}
+
+static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
+ struct snd_pcm_mmap_fd *mmap_fd)
+{
+ struct msm_audio *prtd;
+ struct audio_port_data *apd;
+ struct audio_buffer *ab;
+ int dir = -1;
+
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ return -EFAULT;
+ }
+
+ prtd = substream->runtime->private_data;
+ if (!prtd || !prtd->audio_client || !prtd->mmap_flag) {
+ pr_err("%s no audio client or not an mmap session\n", __func__);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = IN;
+ else
+ dir = OUT;
+
+ apd = prtd->audio_client->port;
+ ab = &(apd[dir].buf[0]);
+ mmap_fd->fd = ion_share_dma_buf_fd(ab->client, ab->handle);
+ if (mmap_fd->fd >= 0) {
+ mmap_fd->dir = dir;
+ mmap_fd->actual_size = ab->actual_size;
+ mmap_fd->size = ab->size;
+ }
+ return mmap_fd->fd < 0 ? -EFAULT : 0;
+}
+
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
@@ -445,6 +484,15 @@
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
+#ifdef CONFIG_COMPAT
+static int msm_pcm_compat_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ /* we only handle RESET which is common for both modes */
+ return msm_pcm_ioctl(substream, cmd, arg);
+}
+#endif
+
static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -994,6 +1042,101 @@
return 0;
}
+static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct snd_pcm *pcm = hw->private_data;
+ struct snd_pcm_mmap_fd __user *_mmap_fd = NULL;
+ struct snd_pcm_mmap_fd mmap_fd;
+ struct snd_pcm_substream *substream = NULL;
+ int32_t dir = -1;
+
+ switch (cmd) {
+ case SNDRV_PCM_IOCTL_MMAP_DATA_FD:
+ _mmap_fd = (struct snd_pcm_mmap_fd __user *)arg;
+ if (get_user(dir, (int32_t __user *)&(_mmap_fd->dir))) {
+ pr_err("%s: error copying mmap_fd from user\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (dir != OUT && dir != IN) {
+ pr_err("%s invalid stream dir\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+ substream = pcm->streams[dir].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -ENODEV;
+ break;
+ }
+ pr_debug("%s : %s MMAP Data fd\n", __func__,
+ dir == 0 ? "P" : "C");
+ if (msm_pcm_mmap_fd(substream, &mmap_fd) < 0) {
+ pr_err("%s: error getting fd\n",
+ __func__);
+ ret = -EFAULT;
+ break;
+ }
+ if (put_user(mmap_fd.fd, &_mmap_fd->fd) ||
+ put_user(mmap_fd.size, &_mmap_fd->size) ||
+ put_user(mmap_fd.actual_size, &_mmap_fd->actual_size)) {
+ pr_err("%s: error copying fd\n", __func__);
+ return -EFAULT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support mmap fd. Handling is common in both modes */
+ return msm_pcm_hwdep_ioctl(hw, file, cmd, arg);
+}
+#else
+static int msm_pcm_hwdep_compat_ioctl(struct snd_hwdep *hw,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif
+
+static int msm_pcm_add_hwdep_dev(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_hwdep *hwdep;
+ int rc;
+ char id[] = "NOIRQ_NN";
+
+ snprintf(id, sizeof(id), "NOIRQ_%d", runtime->pcm->device);
+ pr_debug("%s: pcm dev %d\n", __func__, runtime->pcm->device);
+ rc = snd_hwdep_new(runtime->card->snd_card,
+ &id[0],
+ HWDEP_FE_BASE + runtime->pcm->device,
+ &hwdep);
+ if (!hwdep || rc < 0) {
+ pr_err("%s: hwdep intf failed to create %s - hwdep\n", __func__,
+ id);
+ return rc;
+ }
+
+ hwdep->iface = SNDRV_HWDEP_IFACE_AUDIO_BE; /* for lack of a FE iface */
+ hwdep->private_data = runtime->pcm; /* of type struct snd_pcm */
+ hwdep->ops.ioctl = msm_pcm_hwdep_ioctl;
+ hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
+ return 0;
+}
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
@@ -1027,7 +1170,9 @@
pr_err("%s: Could not add app type controls failed %d\n",
__func__, ret);
}
-
+ ret = msm_pcm_add_hwdep_dev(rtd);
+ if (ret)
+ pr_err("%s: Could not add hw dep node\n", __func__);
pcm->nonatomic = true;
exit:
return ret;
@@ -1040,6 +1185,9 @@
.copy = msm_pcm_copy,
.hw_params = msm_pcm_hw_params,
.ioctl = msm_pcm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_pcm_compat_ioctl,
+#endif
.trigger = msm_pcm_trigger,
.pointer = msm_pcm_pointer,
.mmap = msm_pcm_mmap,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 019cbae..16f82ce 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1093,7 +1093,7 @@
port_type = MSM_AFE_PORT_TYPE_RX;
} else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) {
session_type = SESSION_TYPE_TX;
- if (passthr_mode != LEGACY_PCM)
+ if ((passthr_mode != LEGACY_PCM) && (passthr_mode != LISTEN))
path_type = ADM_PATH_COMPRESSED_TX;
else
path_type = ADM_PATH_LIVE_REC;
@@ -1768,9 +1768,8 @@
static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1810,9 +1809,8 @@
static int msm_routing_put_listen_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1928,9 +1926,8 @@
static int msm_routing_put_voice_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -1972,9 +1969,8 @@
static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -2075,9 +2071,8 @@
static int msm_routing_put_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: FM Switch enable %ld\n", __func__,
@@ -2104,9 +2099,8 @@
static int msm_routing_put_hfp_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: HFP Switch enable %ld\n", __func__,
@@ -2133,9 +2127,8 @@
static int msm_routing_put_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
@@ -2162,9 +2155,8 @@
static int msm_routing_put_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
@@ -2191,9 +2183,8 @@
static int msm_routing_put_usb_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: USB Switch enable %ld\n", __func__,
@@ -2220,9 +2211,8 @@
static int msm_routing_put_pri_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: PRI MI2S Switch enable %ld\n", __func__,
@@ -2249,9 +2239,8 @@
static int msm_routing_put_sec_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: SEC MI2S Switch enable %ld\n", __func__,
@@ -2280,9 +2269,8 @@
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: TERT MI2S Switch enable %ld\n", __func__,
@@ -2311,9 +2299,8 @@
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: QUAT MI2S Switch enable %ld\n", __func__,
@@ -2340,9 +2327,8 @@
static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct snd_soc_dapm_update *update = NULL;
pr_debug("%s: FM Switch enable %ld\n", __func__,
@@ -3490,9 +3476,8 @@
struct snd_ctl_elem_value *ucontrol)
{
int ec_ref_port_id;
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
@@ -3655,6 +3640,16 @@
msm_route_ec_ref_rx_enum[0],
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul16 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL16 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul10 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL10 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
static const struct snd_kcontrol_new ext_ec_ref_mux_ul17 =
SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL17 MUX Mux",
msm_route_ec_ref_rx_enum[0],
@@ -3684,9 +3679,8 @@
static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_widget *widget =
+ snd_soc_dapm_kcontrol_widget(kcontrol);
int mux = ucontrol->value.enumerated.item[0];
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
int ret = 1;
@@ -7103,6 +7097,114 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul16_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new mmul9_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
@@ -7154,6 +7256,59 @@
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul10_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
static const struct snd_kcontrol_new mmul17_mixer_controls[] = {
SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
@@ -10605,10 +10760,60 @@
return 0;
}
+static int msm_routing_put_app_type_gain_control(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int j, fe_id, be_id, port_type;
+ int ret = 0;
+ unsigned long copp;
+ struct msm_pcm_routing_bdai_data *bedai;
+ int dir = ucontrol->value.integer.value[0] ? SESSION_TYPE_TX :
+ SESSION_TYPE_RX;
+ int app_type = ucontrol->value.integer.value[1];
+ int gain = (ucontrol->value.integer.value[2] +
+ ucontrol->value.integer.value[3])/2;
+
+ port_type = (dir == SESSION_TYPE_RX) ? MSM_AFE_PORT_TYPE_RX :
+ MSM_AFE_PORT_TYPE_TX;
+
+ mutex_lock(&routing_lock);
+ for (be_id = 0; be_id < MSM_BACKEND_DAI_MAX; be_id++) {
+ if (is_be_dai_extproc(be_id))
+ continue;
+
+ bedai = &msm_bedais[be_id];
+ if (afe_get_port_type(bedai->port_id) != port_type)
+ continue;
+
+ if (!bedai->active)
+ continue;
+
+ for (fe_id = 0; fe_id < MSM_FRONTEND_DAI_MAX; fe_id++) {
+ if (!test_bit(fe_id, &bedai->fe_sessions[0]))
+ continue;
+
+ if (app_type !=
+ fe_dai_app_type_cfg[fe_id][dir][be_id].app_type)
+ continue;
+
+ copp = session_copp_map[fe_id][dir][be_id];
+ for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+ if (!test_bit(j, &copp))
+ continue;
+ ret |= adm_set_volume(bedai->port_id, j, gain);
+ }
+ }
+ }
+ mutex_unlock(&routing_lock);
+ return ret ? -EINVAL : 0;
+}
+
static const struct snd_kcontrol_new app_type_cfg_controls[] = {
SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0,
0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control,
msm_routing_put_app_type_cfg_control),
+ SOC_SINGLE_MULTI_EXT("App Type Gain", SND_SOC_NOPM, 0,
+ 0x2000, 0, 4, NULL, msm_routing_put_app_type_gain_control)
};
static int msm_routing_get_lsm_app_type_cfg_control(
@@ -11342,6 +11547,8 @@
SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL10", "MultiMedia10 Capture", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("MM_UL16", "MultiMedia16 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
@@ -12077,6 +12284,10 @@
mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0,
mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia10 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul10_mixer_controls, ARRAY_SIZE(mmul10_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia16 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul16_mixer_controls, ARRAY_SIZE(mmul16_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0,
mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0,
@@ -12407,6 +12618,10 @@
&ext_ec_ref_mux_ul8),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul9),
+ SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL10 MUX", SND_SOC_NOPM, 0, 0,
+ &ext_ec_ref_mux_ul10),
+ SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL16 MUX", SND_SOC_NOPM, 0, 0,
+ &ext_ec_ref_mux_ul16),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL17 MUX", SND_SOC_NOPM, 0, 0,
&ext_ec_ref_mux_ul17),
SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL18 MUX", SND_SOC_NOPM, 0, 0,
@@ -12658,8 +12873,11 @@
{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia10 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia16 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
{"MultiMedia5 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
+ {"MultiMedia10 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"},
{"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"MI2S_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"MI2S_RX Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -13226,6 +13444,8 @@
{"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia3 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia10 Mixer", "MI2S_TX", "MI2S_TX"},
+ {"MultiMedia16 Mixer", "MI2S_TX", "MI2S_TX"},
{"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
@@ -13241,15 +13461,22 @@
{"MultiMedia1 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia3 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
{"MultiMedia5 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+ {"MultiMedia10 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
{"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia10 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
+ {"MultiMedia10 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"},
{"MultiMedia1 Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+ {"MultiMedia10 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
+ {"MultiMedia16 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"},
{"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"},
{"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
@@ -13261,12 +13488,17 @@
{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"MultiMedia3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"MultiMedia5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia10 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"MultiMedia6 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia3 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia5 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+ {"MultiMedia10 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"},
{"MultiMedia6 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+ {"MultiMedia10 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
+ {"MultiMedia16 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
@@ -13400,6 +13632,14 @@
{"MultiMedia9 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
{"MultiMedia9 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia10 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia10 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia10 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia10 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia10 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia10 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia10 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia10 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
{"MultiMedia20 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia20 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"MultiMedia20 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -13427,6 +13667,25 @@
{"MultiMedia5 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia6 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"MultiMedia8 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+ {"MultiMedia10 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
+
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+ {"MultiMedia16 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+ {"MultiMedia16 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -13505,13 +13764,16 @@
{"MultiMedia1 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia3 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia4 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia10 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia17 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia18 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia16 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
@@ -13522,11 +13784,13 @@
{"MultiMedia1 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia3 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia4 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia10 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia17 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia18 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MM_UL1", NULL, "MultiMedia1 Mixer"},
{"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MM_UL2", NULL, "MultiMedia2 Mixer"},
@@ -13536,6 +13800,8 @@
{"MM_UL6", NULL, "MultiMedia6 Mixer"},
{"MM_UL8", NULL, "MultiMedia8 Mixer"},
{"MM_UL9", NULL, "MultiMedia9 Mixer"},
+ {"MM_UL10", NULL, "MultiMedia10 Mixer"},
+ {"MM_UL16", NULL, "MultiMedia16 Mixer"},
{"MM_UL17", NULL, "MultiMedia17 Mixer"},
{"MM_UL18", NULL, "MultiMedia18 Mixer"},
{"MM_UL19", NULL, "MultiMedia19 Mixer"},
@@ -13923,6 +14189,16 @@
{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "SLIM_1_TX", "SLIMBUS_1_TX"},
+ {"AUDIO_REF_EC_UL10 MUX", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+ {"AUDIO_REF_EC_UL10 MUX", "QUAT_TDM_RX_0", "QUAT_TDM_RX_0"},
+ {"AUDIO_REF_EC_UL10 MUX", "QUAT_TDM_RX_1", "QUAT_TDM_RX_1"},
+ {"AUDIO_REF_EC_UL10 MUX", "QUAT_TDM_RX_2", "QUAT_TDM_RX_2"},
+ {"AUDIO_REF_EC_UL10 MUX", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
{"AUDIO_REF_EC_UL17 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL17 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL17 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -13946,6 +14222,8 @@
{"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"},
{"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"},
{"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"},
+ {"MM_UL10", NULL, "AUDIO_REF_EC_UL10 MUX"},
+ {"MM_UL16", NULL, "AUDIO_REF_EC_UL16 MUX"},
{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 1590605..46bc540 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2388,7 +2388,8 @@
struct adm_cmd_device_open_v5 open;
struct adm_cmd_device_open_v6 open_v6;
int ret = 0;
- int port_idx, copp_idx, flags;
+ int port_idx, flags;
+ int copp_idx = -1;
int tmp_port = q6audio_get_port_id(port_id);
pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n",
@@ -2442,8 +2443,17 @@
(topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
rate = 16000;
- copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, perf_mode,
- rate, bit_width, app_type);
+ /*
+ * Routing driver reuses the same adm for streams with the same
+ * app_type, sample_rate etc.
+ * This isn't allowed for ULL streams as per the DSP interface
+ */
+ if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE)
+ copp_idx = adm_get_idx_if_copp_exists(port_idx, topology,
+ perf_mode,
+ rate, bit_width,
+ app_type);
+
if (copp_idx < 0) {
copp_idx = adm_get_next_available_copp(port_idx);
if (copp_idx >= MAX_COPPS_PER_PORT) {
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index ebb8eff..e1ce947 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -2125,6 +2125,7 @@
return ret;
}
+EXPORT_SYMBOL(afe_set_config);
/*
* afe_clear_config - If SSR happens ADSP loses AFE configs, let AFE driver know
@@ -2135,6 +2136,7 @@
{
clear_bit(config, &afe_configured_cmd);
}
+EXPORT_SYMBOL(afe_clear_config);
bool afe_has_config(enum afe_config_type config)
{
@@ -5749,6 +5751,14 @@
return ret;
}
+/**
+ * afe_set_lpass_clk_cfg - Set AFE clk config
+ *
+ * @index: port index
+ * @cfg: pointer to clk set struct
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg)
{
struct afe_lpass_clk_config_command_v2 clk_cfg;
@@ -5829,7 +5839,16 @@
mutex_unlock(&this_afe.afe_cmd_lock);
return ret;
}
+EXPORT_SYMBOL(afe_set_lpass_clk_cfg);
+/**
+ * afe_set_lpass_clock_v2 - Enable AFE lpass clock
+ *
+ * @port_id: AFE port id
+ * @cfg: pointer to clk set struct
+ *
+ * Returns 0 on success, appropriate error code otherwise
+ */
int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg)
{
int index = 0;
@@ -5855,6 +5874,7 @@
return ret;
}
+EXPORT_SYMBOL(afe_set_lpass_clock_v2);
int afe_set_lpass_internal_digital_codec_clock(u16 port_id,
struct afe_digital_clk_cfg *cfg)
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index f6675a2..4c3a3a1 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -21,6 +21,8 @@
#include <linux/qdsp6v2/apr.h>
#include <sound/q6core.h>
#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
+#include <sound/apr_audio-v2.h>
#define TIMEOUT_MS 1000
/*
@@ -36,16 +38,30 @@
CORE_MAX_CAL
};
+enum ver_query_status {
+ VER_QUERY_UNATTEMPTED,
+ VER_QUERY_UNSUPPORTED,
+ VER_QUERY_SUPPORTED
+};
+
+struct q6core_avcs_ver_info {
+ enum ver_query_status status;
+ struct avcs_fwk_ver_info ver_info;
+};
+
struct q6core_str {
struct apr_svc *core_handle_q;
wait_queue_head_t bus_bw_req_wait;
wait_queue_head_t cmd_req_wait;
+ wait_queue_head_t avcs_fwk_ver_req_wait;
u32 bus_bw_resp_received;
enum cmd_flags {
FLAG_NONE,
FLAG_CMDRSP_LICENSE_RESULT
} cmd_resp_received_flag;
+ u32 avcs_fwk_ver_resp_received;
struct mutex cmd_lock;
+ struct mutex ver_lock;
union {
struct avcs_cmdrsp_get_license_validation_result
cmdrsp_license_result;
@@ -54,6 +70,7 @@
struct cal_type_data *cal_data[CORE_MAX_CAL];
uint32_t mem_map_cal_handle;
int32_t adsp_status;
+ struct q6core_avcs_ver_info q6core_avcs_ver_info;
};
static struct q6core_str q6core_lcl;
@@ -65,9 +82,61 @@
};
static struct generic_get_data_ *generic_get_data;
+static int parse_fwk_version_info(uint32_t *payload)
+{
+ size_t fwk_ver_size;
+ size_t svc_size;
+ int num_services;
+ int ret = 0;
+
+ pr_debug("%s: Payload info num services %d\n",
+ __func__, payload[4]);
+ /*
+ * payload1[4] is the number of services running on DSP
+ * Based on this info, we copy the payload into core
+ * avcs version info structure.
+ */
+ num_services = payload[4];
+ q6core_lcl.q6core_avcs_ver_info.ver_info.avcs_fwk_version.
+ num_services = num_services;
+ if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
+ pr_err("%s: num_services: %d greater than max services: %d\n",
+ __func__, num_services, VSS_MAX_AVCS_NUM_SERVICES);
+ ret = -EINVAL;
+ goto done;
+ }
+ fwk_ver_size = sizeof(struct avcs_get_fwk_version);
+ svc_size = num_services * sizeof(struct avs_svc_api_info);
+ /*
+ * Dynamically allocate memory for all
+ * the services based on num_services
+ */
+ q6core_lcl.q6core_avcs_ver_info.ver_info.services = NULL;
+ q6core_lcl.q6core_avcs_ver_info.ver_info.services =
+ kzalloc(svc_size, GFP_ATOMIC);
+ if (q6core_lcl.q6core_avcs_ver_info.ver_info.services == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ /*
+ * memcpy is done twice because the memory allocated for
+ * q6core_lcl.q6core_avcs_ver_info.ver_info is not
+ * contiguous.
+ */
+ memcpy(&q6core_lcl.q6core_avcs_ver_info.ver_info,
+ (uint8_t *)payload, fwk_ver_size);
+ memcpy(q6core_lcl.q6core_avcs_ver_info.ver_info.services,
+ (uint8_t *)&payload[sizeof(struct avcs_get_fwk_version)/
+ sizeof(uint32_t)], svc_size);
+ ret = 0;
+done:
+ return ret;
+}
+
static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
{
uint32_t *payload1;
+ int ret = 0;
if (data == NULL) {
pr_err("%s: data argument is null\n", __func__);
@@ -118,6 +187,17 @@
q6core_lcl.bus_bw_resp_received = 1;
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
+ __func__, adsp_err_get_err_str(payload1[1]));
+ /* ADSP status to match Linux error standard */
+ q6core_lcl.adsp_status = -payload1[1];
+ if (payload1[1] == ADSP_EUNSUPPORTED)
+ q6core_lcl.q6core_avcs_ver_info.status =
+ VER_QUERY_UNSUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
__func__,
@@ -130,7 +210,7 @@
case RESET_EVENTS:{
pr_debug("%s: Reset event received in Core service\n",
__func__);
- apr_reset(q6core_lcl.core_handle_q);
+ /* no reset done as the data will not change after SSR*/
q6core_lcl.core_handle_q = NULL;
break;
}
@@ -161,6 +241,18 @@
q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
wake_up(&q6core_lcl.cmd_req_wait);
break;
+ case AVCS_CMDRSP_GET_FWK_VERSION:
+ pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
+ __func__);
+ payload1 = data->payload;
+ q6core_lcl.q6core_avcs_ver_info.status = VER_QUERY_SUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ ret = parse_fwk_version_info(payload1);
+ if (ret < 0)
+ pr_err("%s: Failed to parse payload:%d\n",
+ __func__, ret);
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Message id from adsp core svc: 0x%x\n",
__func__, data->opcode);
@@ -217,6 +309,157 @@
return NULL;
}
+static int q6core_send_get_avcs_fwk_ver_cmd(void)
+{
+ struct apr_hdr avcs_ver_cmd;
+ int ret;
+
+ avcs_ver_cmd.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
+ avcs_ver_cmd.src_port = 0;
+ avcs_ver_cmd.dest_port = 0;
+ avcs_ver_cmd.token = 0;
+ avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ q6core_lcl.adsp_status = 0;
+ q6core_lcl.avcs_fwk_ver_resp_received = 0;
+
+ ret = apr_send_pkt(q6core_lcl.core_handle_q,
+ (uint32_t *) &avcs_ver_cmd);
+ if (ret < 0) {
+ pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
+ (q6core_lcl.avcs_fwk_ver_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout for AVCS fwk version info\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (q6core_lcl.adsp_status < 0) {
+ /*
+ * adsp_err_get_err_str expects a positive value but we store
+ * the DSP error as negative to match the Linux error standard.
+ * Pass in the negated value so adsp_err_get_err_str returns
+ * the correct string.
+ */
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(-q6core_lcl.adsp_status));
+ ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+int q6core_get_service_version(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info,
+ size_t size)
+{
+ int i;
+ uint32_t num_services;
+ size_t svc_size;
+
+ svc_size = q6core_get_avcs_service_size(service_id);
+ if (svc_size != size) {
+ pr_err("%s: Expected size: %ld, Provided size: %ld",
+ __func__, svc_size, size);
+ return -EINVAL;
+ }
+
+ num_services =
+ q6core_lcl.q6core_avcs_ver_info.ver_info.
+ avcs_fwk_version.num_services;
+
+ if (ver_info == NULL) {
+ pr_err("%s: NULL parameter ver_info\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ver_info, &q6core_lcl.q6core_avcs_ver_info.
+ ver_info.avcs_fwk_version, sizeof(struct avcs_get_fwk_version));
+
+ if (service_id == AVCS_SERVICE_ID_ALL) {
+ memcpy(&ver_info->services[0], &q6core_lcl.
+ q6core_avcs_ver_info.ver_info.services[0],
+ (num_services * sizeof(struct avs_svc_api_info)));
+ } else {
+ for (i = 0; i < num_services; i++) {
+ if (q6core_lcl.q6core_avcs_ver_info.
+ ver_info.services[i].service_id == service_id) {
+ memcpy(&ver_info->services[0],
+ &q6core_lcl.q6core_avcs_ver_info.
+ ver_info.services[i], size);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(q6core_get_service_version);
+
+size_t q6core_get_avcs_service_size(uint32_t service_id)
+{
+ int ret = 0;
+ uint32_t num_services;
+
+ num_services =
+ q6core_lcl.q6core_avcs_ver_info.ver_info.
+ avcs_fwk_version.num_services;
+
+ mutex_lock(&(q6core_lcl.ver_lock));
+ pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+
+ switch (q6core_lcl.q6core_avcs_ver_info.status) {
+ case VER_QUERY_SUPPORTED:
+ pr_debug("%s: AVCS FWK version query already attempted\n",
+ __func__);
+ ret = num_services * sizeof(struct avs_svc_api_info);
+ break;
+ case VER_QUERY_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case VER_QUERY_UNATTEMPTED:
+ pr_debug("%s: Attempting AVCS FWK version query\n", __func__);
+ if (q6core_is_adsp_ready()) {
+ ret = q6core_send_get_avcs_fwk_ver_cmd();
+ if (ret == 0)
+ ret = num_services *
+ sizeof(struct avs_svc_api_info);
+ } else {
+ pr_err("%s: ADSP is not ready to query version\n",
+ __func__);
+ ret = -ENODEV;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid version query status %d\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&(q6core_lcl.ver_lock));
+
+ if (service_id != AVCS_SERVICE_ID_ALL)
+ return sizeof(struct avs_svc_api_info);
+
+ return ret;
+}
+EXPORT_SYMBOL(q6core_get_avcs_service_size);
+
int32_t core_set_license(uint32_t key, uint32_t module_id)
{
struct avcs_cmd_set_license *cmd_setl = NULL;
@@ -381,6 +624,11 @@
return rc;
}
+/**
+ * q6core_is_adsp_ready - check adsp ready status
+ *
+ * Returns true if adsp is ready otherwise returns false
+ */
bool q6core_is_adsp_ready(void)
{
int rc = 0;
@@ -419,7 +667,7 @@
mutex_unlock(&(q6core_lcl.cmd_lock));
return ret;
}
-
+EXPORT_SYMBOL(q6core_is_adsp_ready);
static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
@@ -822,18 +1070,16 @@
static int __init core_init(void)
{
+ memset(&q6core_lcl, 0, sizeof(struct q6core_str));
init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
- q6core_lcl.bus_bw_resp_received = 0;
-
- q6core_lcl.core_handle_q = NULL;
-
init_waitqueue_head(&q6core_lcl.cmd_req_wait);
+ init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
mutex_init(&q6core_lcl.cmd_lock);
- q6core_lcl.mem_map_cal_handle = 0;
- q6core_lcl.adsp_status = 0;
+ mutex_init(&q6core_lcl.ver_lock);
q6core_init_cal_data();
+
return 0;
}
module_init(core_init);
@@ -841,6 +1087,7 @@
static void __exit core_exit(void)
{
mutex_destroy(&q6core_lcl.cmd_lock);
+ mutex_destroy(&q6core_lcl.ver_lock);
q6core_delete_cal_data();
}
module_exit(core_exit);
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 15c9e13..0d444d0 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -21,6 +21,7 @@
#include <soc/qcom/socinfo.h>
#include <linux/qdsp6v2/apr_tal.h>
+#include "sound/q6core.h"
#include "sound/q6audio-v2.h"
#include "sound/apr_audio-v2.h"
#include "sound/q6afe-v2.h"
@@ -33,6 +34,9 @@
#define CMD_STATUS_SUCCESS 0
#define CMD_STATUS_FAIL 1
+#define NUM_CHANNELS_MONO 1
+#define NUM_CHANNELS_STEREO 2
+#define CVP_VERSION_2 2
enum {
VOC_TOKEN_NONE,
@@ -83,6 +87,11 @@
static int voice_send_cvp_media_format_cmd(struct voice_data *v,
uint32_t param_type);
static int voice_send_cvp_topology_commit_cmd(struct voice_data *v);
+static int voice_send_cvp_channel_info_cmd(struct voice_data *v);
+static int voice_send_cvp_channel_info_v2(struct voice_data *v,
+ uint32_t param_type);
+static int voice_get_avcs_version_per_service(uint32_t service_id);
+
static int voice_cvs_stop_playback(struct voice_data *v);
static int voice_cvs_start_playback(struct voice_data *v);
@@ -3793,6 +3802,295 @@
return result;
}
+static int voice_send_cvp_channel_info_v2(struct voice_data *v,
+ uint32_t param_type)
+{
+ int ret;
+ struct cvp_set_channel_info_cmd_v2 cvp_set_channel_info_cmd;
+ void *apr_cvp;
+ u16 cvp_handle;
+ struct vss_icommon_param_data_channel_info_v2_t
+ *channel_info_param_data =
+ &cvp_set_channel_info_cmd.
+ cvp_set_ch_info_param_v2.param_data;
+ struct vss_param_vocproc_dev_channel_info_t *channel_info =
+ &channel_info_param_data->channel_info;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ apr_cvp = common.apr_q6_cvp;
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ cvp_handle = voice_get_cvp_handle(v);
+ memset(&cvp_set_channel_info_cmd, 0, sizeof(cvp_set_channel_info_cmd));
+
+ cvp_set_channel_info_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_set_channel_info_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_set_channel_info_cmd) - APR_HDR_SIZE);
+ cvp_set_channel_info_cmd.hdr.src_svc = 0;
+ cvp_set_channel_info_cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cvp_set_channel_info_cmd.hdr.src_port =
+ voice_get_idx_for_session(v->session_id);
+ cvp_set_channel_info_cmd.hdr.dest_svc = 0;
+ cvp_set_channel_info_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cvp_set_channel_info_cmd.hdr.dest_port = cvp_handle;
+ cvp_set_channel_info_cmd.hdr.token = 0;
+ cvp_set_channel_info_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
+
+ cvp_set_channel_info_cmd.cvp_set_ch_info_param_v2.mem_size =
+ sizeof(struct vss_icommon_param_data_channel_info_v2_t);
+
+ channel_info_param_data->module_id = VSS_MODULE_CVD_GENERIC;
+ channel_info_param_data->param_size =
+ sizeof(struct vss_param_vocproc_dev_channel_info_t);
+
+ /* Device specific data */
+ switch (param_type) {
+ case RX_PATH:
+ channel_info_param_data->param_id =
+ VSS_PARAM_VOCPROC_RX_CHANNEL_INFO;
+ channel_info->num_channels = v->dev_rx.no_of_channels;
+ channel_info->bits_per_sample = v->dev_rx.bits_per_sample;
+ break;
+
+ case TX_PATH:
+ channel_info_param_data->param_id =
+ VSS_PARAM_VOCPROC_TX_CHANNEL_INFO;
+ channel_info->num_channels = v->dev_tx.no_of_channels;
+ channel_info->bits_per_sample = v->dev_tx.bits_per_sample;
+ break;
+
+ case EC_REF_PATH:
+ channel_info_param_data->param_id =
+ VSS_PARAM_VOCPROC_EC_REF_CHANNEL_INFO;
+ channel_info->num_channels = v->dev_rx.no_of_channels;
+ channel_info->bits_per_sample = v->dev_rx.bits_per_sample;
+ break;
+ default:
+ pr_err("%s: Invalid param type\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (channel_info->num_channels == NUM_CHANNELS_MONO) {
+ channel_info->channel_mapping[0] = PCM_CHANNEL_FC;
+ } else if (channel_info->num_channels == NUM_CHANNELS_STEREO) {
+ channel_info->channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_info->channel_mapping[1] = PCM_CHANNEL_FR;
+ } else {
+ pr_err("%s: Unsupported num channels: %d\n",
+ __func__, channel_info->num_channels);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ v->async_err = 0;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_set_channel_info_cmd);
+ if (ret < 0) {
+ pr_err("%s: Failed to send VSS_ICOMMON_CMD_SET_PARAM_V2\n",
+ __func__);
+ goto done;
+ }
+
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (v->async_err > 0) {
+ pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
+ adsp_err_get_err_str(v->async_err), cvp_handle);
+ ret = adsp_err_get_lnx_err_code(v->async_err);
+ goto done;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+static int voice_send_cvp_channel_info_cmd(struct voice_data *v)
+{
+ int ret = 0;
+
+ ret = voice_send_cvp_channel_info_v2(v, RX_PATH);
+ if (ret < 0) {
+ pr_err("%s: Error in sending cvp_channel_info RX: %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ret = voice_send_cvp_channel_info_v2(v, TX_PATH);
+ if (ret < 0) {
+ pr_err("%s: Error in sending cvp_channel_info TX: %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ret = voice_send_cvp_channel_info_v2(v, EC_REF_PATH);
+ if (ret < 0) {
+ pr_err("%s: Error in sending cvp_channel_info EC Ref: %d\n",
+ __func__, ret);
+ goto done;
+ }
+done:
+ return ret;
+}
+
+static int voice_send_cvp_mfc_config_v2(struct voice_data *v)
+{
+ int ret;
+ struct cvp_set_mfc_config_cmd_v2 cvp_set_mfc_config_cmd;
+ void *apr_cvp;
+ u16 cvp_handle;
+ struct vss_icommon_param_data_mfc_config_v2_t *cvp_config_param_data =
+ &cvp_set_mfc_config_cmd.cvp_set_mfc_param_v2.param_data;
+ struct vss_param_mfc_config_info_t *mfc_config_info =
+ &cvp_config_param_data->mfc_config_info;
+
+ if (v == NULL) {
+ pr_err("%s: v is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ apr_cvp = common.apr_q6_cvp;
+ if (!apr_cvp) {
+ pr_err("%s: apr_cvp is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ cvp_handle = voice_get_cvp_handle(v);
+ memset(&cvp_set_mfc_config_cmd, 0, sizeof(cvp_set_mfc_config_cmd));
+
+ cvp_set_mfc_config_cmd.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ cvp_set_mfc_config_cmd.hdr.pkt_size =
+ APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(cvp_set_mfc_config_cmd) - APR_HDR_SIZE);
+ cvp_set_mfc_config_cmd.hdr.src_svc = 0;
+ cvp_set_mfc_config_cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cvp_set_mfc_config_cmd.hdr.src_port =
+ voice_get_idx_for_session(v->session_id);
+ cvp_set_mfc_config_cmd.hdr.dest_svc = 0;
+ cvp_set_mfc_config_cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cvp_set_mfc_config_cmd.hdr.dest_port = cvp_handle;
+ cvp_set_mfc_config_cmd.hdr.token = 0;
+ cvp_set_mfc_config_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
+ cvp_set_mfc_config_cmd.cvp_set_mfc_param_v2.mem_size =
+ sizeof(struct vss_icommon_param_data_mfc_config_v2_t);
+
+ cvp_config_param_data->module_id = AUDPROC_MODULE_ID_MFC;
+ cvp_config_param_data->param_id =
+ AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+ cvp_config_param_data->param_size =
+ sizeof(struct vss_param_mfc_config_info_t);
+
+ mfc_config_info->num_channels = v->dev_rx.no_of_channels;
+ mfc_config_info->bits_per_sample = 16;
+ mfc_config_info->sample_rate = v->dev_rx.sample_rate;
+
+ if (mfc_config_info->num_channels == NUM_CHANNELS_MONO) {
+ mfc_config_info->channel_type[0] = PCM_CHANNEL_FC;
+ } else if (mfc_config_info->num_channels == NUM_CHANNELS_STEREO) {
+ mfc_config_info->channel_type[0] = PCM_CHANNEL_FL;
+ mfc_config_info->channel_type[1] = PCM_CHANNEL_FR;
+ } else {
+ pr_err("%s: Unsupported num channels: %d\n",
+ __func__, mfc_config_info->num_channels);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ v->cvp_state = CMD_STATUS_FAIL;
+ v->async_err = 0;
+ ret = apr_send_pkt(apr_cvp, (uint32_t *)&cvp_set_mfc_config_cmd);
+ if (ret < 0) {
+ pr_err("%s: Failed to send VSS_ICOMMON_CMD_SET_PARAM_V2 %d\n",
+ __func__, ret);
+ goto done;
+ }
+ ret = wait_event_timeout(v->cvp_wait,
+ (v->cvp_state == CMD_STATUS_SUCCESS),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (v->async_err > 0) {
+ pr_err("%s: DSP returned error[%s] handle = %d\n", __func__,
+ adsp_err_get_err_str(v->async_err), cvp_handle);
+ ret = adsp_err_get_lnx_err_code(v->async_err);
+ goto done;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+static int voice_send_cvp_mfc_config_cmd(struct voice_data *v)
+{
+ int ret = 0;
+
+ if (common.cvp_version >= CVP_VERSION_2) {
+ ret = voice_send_cvp_mfc_config_v2(v);
+ } else {
+ pr_warn("%s: CVP Version not supported\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int voice_get_avcs_version_per_service(uint32_t service_id)
+{
+ int ret = 0;
+ size_t svc_size;
+ struct avcs_fwk_ver_info ver_info = {{0}, NULL};
+
+ if (service_id == AVCS_SERVICE_ID_ALL) {
+ pr_err("%s: Invalid service id: %d", __func__,
+ AVCS_SERVICE_ID_ALL);
+ return -EINVAL;
+ }
+
+ svc_size = sizeof(struct avs_svc_api_info);
+ ver_info.services = kzalloc(svc_size, GFP_KERNEL);
+ if (ver_info.services == NULL)
+ return -ENOMEM;
+
+ ret = q6core_get_service_version(service_id, &ver_info, svc_size);
+ if (ret < 0)
+ goto done;
+
+ ret = ver_info.services[0].api_version;
+ common.is_avcs_version_queried = true;
+done:
+ kfree(ver_info.services);
+ return ret;
+}
+
static int voice_setup_vocproc(struct voice_data *v)
{
int ret = 0;
@@ -3803,6 +4101,18 @@
goto fail;
}
+ if (common.is_avcs_version_queried == false)
+ common.cvp_version = voice_get_avcs_version_per_service(
+ APRV2_IDS_SERVICE_ID_ADSP_CVP_V);
+
+ if (common.cvp_version < 0) {
+ pr_err("%s: Invalid CVP version %d\n",
+ __func__, common.cvp_version);
+ ret = -EINVAL;
+ goto fail;
+ }
+ pr_debug("%s: CVP Version %d\n", __func__, common.cvp_version);
+
ret = voice_send_cvp_media_fmt_info_cmd(v);
if (ret < 0) {
pr_err("%s: Set media format info failed err:%d\n", __func__,
@@ -3817,6 +4127,15 @@
goto fail;
}
+ /* Send MFC config only when the no of channels are more than 1 */
+ if (v->dev_rx.no_of_channels > NUM_CHANNELS_MONO) {
+ ret = voice_send_cvp_mfc_config_cmd(v);
+ if (ret < 0) {
+ pr_warn("%s: Set mfc config failed err:%d\n",
+ __func__, ret);
+ }
+ }
+
voice_send_cvs_register_cal_cmd(v);
voice_send_cvp_register_dev_cfg_cmd(v);
voice_send_cvp_register_cal_cmd(v);
@@ -3962,11 +4281,18 @@
static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
{
- int ret;
+ int ret = 0;
- ret = voice_send_cvp_device_channels_cmd(v);
- if (ret < 0)
+ if (common.cvp_version < CVP_VERSION_2)
+ ret = voice_send_cvp_device_channels_cmd(v);
+ else
+ ret = voice_send_cvp_channel_info_cmd(v);
+
+ if (ret < 0) {
+ pr_err("%s: Set channel info failed err: %d\n", __func__,
+ ret);
goto done;
+ }
if (voice_get_cvd_int_version(common.cvd_version) >=
CVD_INT_VERSION_2_3) {
@@ -3994,7 +4320,7 @@
void *apr_cvp;
u16 cvp_handle;
struct vss_icommon_param_data_t *media_fmt_param_data =
- &cvp_set_media_format_cmd.cvp_set_param_v2.param_data;
+ &cvp_set_media_format_cmd.cvp_set_media_param_v2.param_data;
struct vss_param_endpoint_media_format_info_t *media_fmt_info =
&media_fmt_param_data->media_format_info;
@@ -4032,7 +4358,7 @@
cvp_set_media_format_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_PARAM_V2;
/* Fill param data */
- cvp_set_media_format_cmd.cvp_set_param_v2.mem_size =
+ cvp_set_media_format_cmd.cvp_set_media_param_v2.mem_size =
sizeof(struct vss_icommon_param_data_t);
media_fmt_param_data->module_id = VSS_MODULE_CVD_GENERIC;
media_fmt_param_data->param_size =
@@ -6197,6 +6523,15 @@
goto done;
}
+ /* Send MFC config only when the no of channels are > 1 */
+ if (v->dev_rx.no_of_channels > NUM_CHANNELS_MONO) {
+ ret = voice_send_cvp_mfc_config_cmd(v);
+ if (ret < 0) {
+ pr_warn("%s: Set mfc config failed err: %d\n",
+ __func__, ret);
+ }
+ }
+
voice_send_cvp_register_dev_cfg_cmd(v);
voice_send_cvp_register_cal_cmd(v);
voice_send_cvp_register_vol_cal_cmd(v);
@@ -7054,7 +7389,8 @@
case VSS_ICOMMON_CMD_SET_PARAM_V2:
switch (data->token) {
case VOC_SET_MEDIA_FORMAT_PARAM_TOKEN:
- pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called by voice_send_cvp_media_format_cmd\n",
+ case VOC_GENERIC_SET_PARAM_TOKEN:
+ pr_debug("%s: VSS_ICOMMON_CMD_SET_PARAM_V2 called\n",
__func__);
v->cvp_state = CMD_STATUS_SUCCESS;
v->async_err = ptr[1];
@@ -8566,7 +8902,8 @@
common.default_vol_step_val = 0;
common.default_vol_ramp_duration_ms = DEFAULT_VOLUME_RAMP_DURATION;
common.default_mute_ramp_duration_ms = DEFAULT_MUTE_RAMP_DURATION;
-
+ common.cvp_version = 0;
+ common.is_avcs_version_queried = false;
/* Initialize EC Ref media format info */
common.ec_ref_ext = false;
common.ec_media_fmt_info.port_id = AFE_PORT_INVALID;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.h b/sound/soc/msm/qdsp6v2/q6voice.h
index 74d80be..db48091 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.h
+++ b/sound/soc/msm/qdsp6v2/q6voice.h
@@ -124,7 +124,7 @@
};
enum {
- VOC_NO_SET_PARAM_TOKEN = 0,
+ VOC_GENERIC_SET_PARAM_TOKEN = 0,
VOC_RTAC_SET_PARAM_TOKEN,
VOC_SET_MEDIA_FORMAT_PARAM_TOKEN,
VOC_SET_PARAM_TOKEN_MAX
@@ -239,6 +239,19 @@
uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
} __packed;
+struct vss_param_vocproc_dev_channel_info_t {
+ uint32_t num_channels;
+ uint32_t bits_per_sample;
+ uint8_t channel_mapping[VSS_NUM_CHANNELS_MAX];
+} __packed;
+
+struct vss_param_mfc_config_info_t {
+ uint32_t sample_rate;
+ uint16_t bits_per_sample;
+ uint16_t num_channels;
+ uint16_t channel_type[VSS_NUM_CHANNELS_MAX];
+} __packed;
+
struct vss_icommon_param_data_t {
/* Valid ID of the module. */
uint32_t module_id;
@@ -260,6 +273,88 @@
};
} __packed;
+struct vss_icommon_param_data_channel_info_v2_t {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+ /*
+ * Data size of the structure relating to the param_id/module_id
+ * combination in uint8_t bytes.
+ */
+ uint16_t param_size;
+ /* This field must be set to zero. */
+ uint16_t reserved;
+ struct vss_param_vocproc_dev_channel_info_t channel_info;
+} __packed;
+
+struct vss_icommon_cmd_set_param_channel_info_v2_t {
+ /*
+ * Pointer to the unique identifier for an address (physical/virtual).
+ *
+ * If the parameter data payload is within the message payload
+ * (in-band), set this field to 0. The parameter data begins at the
+ * specified data payload address.
+ *
+ * If the parameter data is out-of-band, this field is the handle to
+ * the physical address in the shared memory that holds the parameter
+ * data.
+ */
+ uint32_t mem_handle;
+ /*
+ * Location of the parameter data payload.
+ *
+ * The payload is an array of vss_icommon_param_data_t. If the
+ * mem_handle is 0, this field is ignored.
+ */
+ uint64_t mem_address;
+ /* Size of the parameter data payload in bytes. */
+ uint32_t mem_size;
+ struct vss_icommon_param_data_channel_info_v2_t param_data;
+} __packed;
+
+struct vss_icommon_param_data_mfc_config_v2_t {
+ /* Valid ID of the module. */
+ uint32_t module_id;
+ /* Valid ID of the parameter. */
+ uint32_t param_id;
+ /*
+ * Data size of the structure relating to the param_id/module_id
+ * combination in uint8_t bytes.
+ */
+ uint16_t param_size;
+ /* This field must be set to zero. */
+ uint16_t reserved;
+ struct vss_param_mfc_config_info_t mfc_config_info;
+} __packed;
+
+struct vss_icommon_cmd_set_param_mfc_config_v2_t {
+ /*
+ * Pointer to the unique identifier for an address (physical/virtual).
+ *
+ * If the parameter data payload is within the message payload
+ * (in-band), set this field to 0. The parameter data begins at the
+ * specified data payload address.
+ *
+ * If the parameter data is out-of-band, this field is the handle to
+ * the physical address in the shared memory that holds the parameter
+ * data.
+ */
+
+ uint32_t mem_handle;
+ /*
+ * Location of the parameter data payload.
+ *
+ * The payload is an array of vss_icommon_param_data_t. If the
+ * mem_handle is 0, this field is ignored.
+ */
+ uint64_t mem_address;
+ /* Size of the parameter data payload in bytes. */
+ uint32_t mem_size;
+
+ struct vss_icommon_param_data_mfc_config_v2_t param_data;
+} __packed;
+
/* Payload structure for the VSS_ICOMMON_CMD_SET_PARAM_V2 command. */
struct vss_icommon_cmd_set_param_v2_t {
/*
@@ -674,6 +769,12 @@
#define VSS_IRECORD_MODE_TX_RX_MIXING 0x00010F7B
/* Select mixed Tx and Rx paths. */
+#define VSS_PARAM_VOCPROC_TX_CHANNEL_INFO 0x0001328E
+
+#define VSS_PARAM_VOCPROC_RX_CHANNEL_INFO 0x0001328F
+
+#define VSS_PARAM_VOCPROC_EC_REF_CHANNEL_INFO 0x00013290
+
#define VSS_PARAM_TX_PORT_ENDPOINT_MEDIA_INFO 0x00013253
#define VSS_PARAM_RX_PORT_ENDPOINT_MEDIA_INFO 0x00013254
@@ -1485,7 +1586,18 @@
struct cvp_set_media_format_cmd {
struct apr_hdr hdr;
- struct vss_icommon_cmd_set_param_v2_t cvp_set_param_v2;
+ struct vss_icommon_cmd_set_param_v2_t cvp_set_media_param_v2;
+} __packed;
+
+struct cvp_set_channel_info_cmd_v2 {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_param_channel_info_v2_t
+ cvp_set_ch_info_param_v2;
+} __packed;
+
+struct cvp_set_mfc_config_cmd_v2 {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_param_mfc_config_v2_t cvp_set_mfc_param_v2;
} __packed;
struct cvp_set_vp3_data_cmd {
@@ -1756,6 +1868,8 @@
bool srvcc_rec_flag;
bool is_destroy_cvd;
char cvd_version[CVD_VERSION_STRING_MAX_SIZE];
+ int cvp_version;
+ bool is_avcs_version_queried;
bool is_per_vocoder_cal_enabled;
bool is_sound_focus_resp_success;
bool is_source_tracking_resp_success;
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index eddcb45..43df772 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -12,6 +12,9 @@
#include <linux/input.h>
#include <linux/of_gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/mfd/msm-cdc-pinctrl.h>
#include <sound/pcm_params.h>
#include <sound/q6afe-v2.h>
@@ -41,6 +44,8 @@
EXT_DISP_RX_IDX_MAX,
};
+bool codec_reg_done;
+
/* TDM default config */
static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
{ /* PRI TDM */
@@ -190,7 +195,7 @@
static struct snd_soc_aux_dev *msm_aux_dev;
static struct snd_soc_codec_conf *msm_codec_conf;
-static bool msm_swap_gnd_mic(struct snd_soc_codec *codec);
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active);
static struct wcd_mbhc_config mbhc_cfg = {
.read_fw_bin = false,
@@ -2013,6 +2018,12 @@
}
EXPORT_SYMBOL(msm_common_snd_controls_size);
+void msm_set_codec_reg_done(bool done)
+{
+ codec_reg_done = done;
+}
+EXPORT_SYMBOL(msm_set_codec_reg_done);
+
static inline int param_is_mask(int p)
{
return (p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
@@ -2038,16 +2049,16 @@
}
}
-static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+static int msm_ext_disp_get_idx_from_beid(int32_t id)
{
int idx;
- switch (be_id) {
+ switch (id) {
case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
idx = DP_RX_IDX;
break;
default:
- pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+ pr_err("%s: Incorrect ext_disp id %d\n", __func__, id);
idx = -EINVAL;
break;
}
@@ -2077,7 +2088,7 @@
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
- switch (dai_link->be_id) {
+ switch (dai_link->id) {
case MSM_BACKEND_DAI_USB_RX:
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
usb_rx_cfg.bit_format);
@@ -2093,8 +2104,8 @@
break;
case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
- idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
- if (IS_ERR_VALUE(idx)) {
+ idx = msm_ext_disp_get_idx_from_beid(dai_link->id);
+ if (idx < 0) {
pr_err("%s: Incorrect ext disp idx %d\n",
__func__, idx);
rc = idx;
@@ -2341,11 +2352,11 @@
}
EXPORT_SYMBOL(msm_aux_pcm_snd_shutdown);
-static int msm_get_port_id(int be_id)
+static int msm_get_port_id(int id)
{
int afe_port_id;
- switch (be_id) {
+ switch (id) {
case MSM_BACKEND_DAI_PRI_MI2S_RX:
afe_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX;
break;
@@ -2371,7 +2382,7 @@
afe_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
break;
default:
- pr_err("%s: Invalid be_id: %d\n", __func__, be_id);
+ pr_err("%s: Invalid id: %d\n", __func__, id);
afe_port_id = -EINVAL;
}
@@ -2422,7 +2433,7 @@
int port_id = 0;
int index = cpu_dai->id;
- port_id = msm_get_port_id(rtd->dai_link->be_id);
+ port_id = msm_get_port_id(rtd->dai_link->id);
if (port_id < 0) {
dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
ret = port_id;
@@ -2461,7 +2472,7 @@
int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int port_id = msm_get_port_id(rtd->dai_link->be_id);
+ int port_id = msm_get_port_id(rtd->dai_link->id);
int index = cpu_dai->id;
unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
@@ -2539,7 +2550,7 @@
{
int ret;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int port_id = msm_get_port_id(rtd->dai_link->be_id);
+ int port_id = msm_get_port_id(rtd->dai_link->id);
int index = rtd->cpu_dai->id;
pr_debug("%s(): substream = %s stream = %d\n", __func__,
@@ -2594,7 +2605,7 @@
return ret;
}
-static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
{
struct snd_soc_card *card = codec->component.card;
struct msm_asoc_mach_data *pdata =
@@ -2699,13 +2710,13 @@
dai_link[i].codec_name = NULL;
}
if (pdata->snd_card_val == INT_SND_CARD) {
- if ((dai_link[i].be_id ==
+ if ((dai_link[i].id ==
MSM_BACKEND_DAI_INT0_MI2S_RX) ||
- (dai_link[i].be_id ==
+ (dai_link[i].id ==
MSM_BACKEND_DAI_INT1_MI2S_RX) ||
- (dai_link[i].be_id ==
+ (dai_link[i].id ==
MSM_BACKEND_DAI_INT2_MI2S_TX) ||
- (dai_link[i].be_id ==
+ (dai_link[i].id ==
MSM_BACKEND_DAI_INT3_MI2S_TX)) {
index = of_property_match_string(cdev->of_node,
"asoc-codec-names",
@@ -3024,6 +3035,12 @@
.data = "tasha_codec"},
{ .compatible = "qcom,sdm660-asoc-snd-tavil",
.data = "tavil_codec"},
+ { .compatible = "qcom,sdm670-asoc-snd",
+ .data = "internal_codec"},
+ { .compatible = "qcom,sdm670-asoc-snd-tasha",
+ .data = "tasha_codec"},
+ { .compatible = "qcom,sdm670-asoc-snd-tavil",
+ .data = "tavil_codec"},
{},
};
@@ -3041,6 +3058,7 @@
if (!pdata)
return -ENOMEM;
+ msm_set_codec_reg_done(false);
match = of_match_node(sdm660_asoc_machine_of_match,
pdev->dev.of_node);
if (!match)
diff --git a/sound/soc/msm/sdm660-common.h b/sound/soc/msm/sdm660-common.h
index bca8cd7..ffe77bc 100644
--- a/sound/soc/msm/sdm660-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -122,4 +122,5 @@
int msm_mi2s_snd_startup(struct snd_pcm_substream *substream);
void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
int msm_common_snd_controls_size(void);
+void msm_set_codec_reg_done(bool done);
#endif
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 1c03d8c..68a0f37 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -29,8 +29,15 @@
#define WCN_CDC_SLIM_RX_CH_MAX 2
#define WCN_CDC_SLIM_TX_CH_MAX 3
-static struct snd_soc_card snd_soc_card_msm_card_tavil;
-static struct snd_soc_card snd_soc_card_msm_card_tasha;
+static struct snd_soc_card snd_soc_card_msm_card_tavil = {
+ .name = "sdm670-tavil-snd-card",
+ .late_probe = msm_snd_card_tavil_late_probe,
+};
+
+static struct snd_soc_card snd_soc_card_msm_card_tasha = {
+ .name = "sdm670-tasha-snd-card",
+ .late_probe = msm_snd_card_tasha_late_probe,
+};
static struct snd_soc_ops msm_ext_slimbus_be_ops = {
.hw_params = msm_snd_hw_params,
@@ -86,8 +93,8 @@
goto exit;
}
- dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) be_id %d\n",
- __func__, tx_ch_cnt, dai_link->be_id);
+ dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) id %d\n",
+ __func__, tx_ch_cnt, dai_link->id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
tx_ch_cnt, tx_ch, rx_ch_cnt, rx_ch);
@@ -279,7 +286,7 @@
.platform_name = "msm-pcm-hostless",
.codec_name = "tasha_codec",
.codec_dai_name = "tasha_vifeedback",
- .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
@@ -368,7 +375,7 @@
.platform_name = "msm-pcm-hostless",
.codec_name = "tavil_codec",
.codec_dai_name = "tavil_vifeedback",
- .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
@@ -411,7 +418,7 @@
.codec_dai_name = "tasha_mix_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
.init = &msm_audrx_init,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
/* this dainlink has playback support */
@@ -428,7 +435,7 @@
.codec_dai_name = "tasha_tx1",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
.ops = &msm_ext_slimbus_be_ops,
@@ -442,7 +449,7 @@
.codec_dai_name = "tasha_mix_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -458,7 +465,7 @@
.codec_dai_name = "tasha_tx3",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -472,7 +479,7 @@
.codec_dai_name = "tasha_mix_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -489,7 +496,7 @@
.no_pcm = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -503,7 +510,7 @@
.codec_dai_name = "tasha_mix_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -519,7 +526,7 @@
.codec_dai_name = "tasha_rx3",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -536,7 +543,7 @@
.codec_dai_name = "tasha_mad1",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -550,7 +557,7 @@
.codec_dai_name = "tasha_rx4",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -569,7 +576,7 @@
.codec_dai_name = "tavil_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
.init = &msm_audrx_init,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
/* this dainlink has playback support */
@@ -586,7 +593,7 @@
.codec_dai_name = "tavil_tx1",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
.ops = &msm_ext_slimbus_be_ops,
@@ -600,7 +607,7 @@
.codec_dai_name = "tavil_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_1_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -616,7 +623,7 @@
.codec_dai_name = "tavil_tx3",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_1_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -630,7 +637,7 @@
.codec_dai_name = "tavil_rx2",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_2_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_2_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_pmdown_time = 1,
@@ -645,7 +652,7 @@
.codec_dai_name = "tavil_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_3_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -661,7 +668,7 @@
.codec_dai_name = "tavil_tx1",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_3_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -675,7 +682,7 @@
.codec_dai_name = "tavil_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_4_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -691,7 +698,7 @@
.codec_dai_name = "tavil_rx3",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_5_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -708,7 +715,7 @@
.codec_dai_name = "tavil_mad1",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
.ignore_suspend = 1,
@@ -722,7 +729,7 @@
.codec_dai_name = "tavil_rx4",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_6_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_ext_slimbus_be_ops,
/* dai link has playback support */
@@ -748,7 +755,7 @@
.dpcm_capture = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{/* hw:x,1 */
.name = MSM_DAILINK_NAME(Media2),
@@ -765,7 +772,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA2,
},
{/* hw:x,2 */
.name = "VoiceMMode1",
@@ -782,7 +789,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_VOICEMMODE1,
+ .id = MSM_FRONTEND_DAI_VOICEMMODE1,
},
{/* hw:x,3 */
.name = "MSM VoIP",
@@ -799,7 +806,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_VOIP,
+ .id = MSM_FRONTEND_DAI_VOIP,
},
{/* hw:x,4 */
.name = MSM_DAILINK_NAME(ULL),
@@ -815,7 +822,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
/* Hostless PCM purpose */
{/* hw:x,5 */
@@ -871,7 +878,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA4,
},
{/* hw:x,9*/
.name = "AUXPCM Hostless",
@@ -953,7 +960,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA5,
},
/* LSM FE */
{/* hw:x,14 */
@@ -970,7 +977,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM1,
+ .id = MSM_FRONTEND_DAI_LSM1,
},
{/* hw:x,15 */
.name = MSM_DAILINK_NAME(Compress2),
@@ -987,16 +994,17 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA7,
},
{/* hw:x,16 */
- .name = MSM_DAILINK_NAME(Compress3),
- .stream_name = "Compress3",
+ .name = MSM_DAILINK_NAME(MultiMedia10),
+ .stream_name = "MultiMedia10",
.cpu_dai_name = "MultiMedia10",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
@@ -1004,7 +1012,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA10,
},
{/* hw:x,17 */
.name = MSM_DAILINK_NAME(ULL_NOIRQ),
@@ -1021,7 +1029,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA8,
},
{/* hw:x,18 */
.name = "HDMI_RX_HOSTLESS",
@@ -1053,7 +1061,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_VOICEMMODE2,
+ .id = MSM_FRONTEND_DAI_VOICEMMODE2,
},
{/* hw:x,20 */
.name = "Listen 2 Audio Service",
@@ -1069,7 +1077,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM2,
+ .id = MSM_FRONTEND_DAI_LSM2,
},
{/* hw:x,21 */
.name = "Listen 3 Audio Service",
@@ -1085,7 +1093,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM3,
+ .id = MSM_FRONTEND_DAI_LSM3,
},
{/* hw:x,22 */
.name = "Listen 4 Audio Service",
@@ -1101,7 +1109,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM4,
+ .id = MSM_FRONTEND_DAI_LSM4,
},
{/* hw:x,23 */
.name = "Listen 5 Audio Service",
@@ -1117,7 +1125,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM5,
+ .id = MSM_FRONTEND_DAI_LSM5,
},
{/* hw:x,24 */
.name = "Listen 6 Audio Service",
@@ -1133,7 +1141,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM6
+ .id = MSM_FRONTEND_DAI_LSM6
},
{/* hw:x,25 */
.name = "Listen 7 Audio Service",
@@ -1149,7 +1157,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM7,
+ .id = MSM_FRONTEND_DAI_LSM7,
},
{/* hw:x,26 */
.name = "Listen 8 Audio Service",
@@ -1165,7 +1173,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM8,
+ .id = MSM_FRONTEND_DAI_LSM8,
},
{/* hw:x,27 */
.name = MSM_DAILINK_NAME(Media9),
@@ -1182,7 +1190,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA9,
},
{/* hw:x,28 */
.name = MSM_DAILINK_NAME(Compress4),
@@ -1199,7 +1207,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA11,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA11,
},
{/* hw:x,29 */
.name = MSM_DAILINK_NAME(Compress5),
@@ -1216,7 +1224,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA12,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA12,
},
{/* hw:x,30 */
.name = MSM_DAILINK_NAME(Compress6),
@@ -1233,7 +1241,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA13,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA13,
},
{/* hw:x,31 */
.name = MSM_DAILINK_NAME(Compress7),
@@ -1250,7 +1258,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA14,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA14,
},
{/* hw:x,32 */
.name = MSM_DAILINK_NAME(Compress8),
@@ -1267,13 +1275,13 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
@@ -1284,7 +1292,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA16,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA16,
},
{/* hw:x,34 */
.name = "SLIMBUS_8 Hostless",
@@ -1332,7 +1340,7 @@
.ignore_suspend = 1,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA6,
},
};
@@ -1346,7 +1354,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
+ .id = MSM_BACKEND_DAI_AFE_PCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
@@ -1361,7 +1369,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
+ .id = MSM_BACKEND_DAI_AFE_PCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1375,7 +1383,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1389,7 +1397,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1403,7 +1411,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1417,7 +1425,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+ .id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1430,7 +1438,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_USB_RX,
+ .id = MSM_BACKEND_DAI_USB_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1444,7 +1452,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_USB_TX,
+ .id = MSM_BACKEND_DAI_USB_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -1457,7 +1465,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1471,7 +1479,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1485,7 +1493,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1499,7 +1507,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1513,7 +1521,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1527,7 +1535,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1541,7 +1549,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1555,7 +1563,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -1572,7 +1580,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_PRI_MI2S_RX,
+ .id = MSM_BACKEND_DAI_PRI_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1587,7 +1595,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_PRI_MI2S_TX,
+ .id = MSM_BACKEND_DAI_PRI_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1601,7 +1609,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1616,7 +1624,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1630,7 +1638,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1645,7 +1653,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1659,7 +1667,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1674,7 +1682,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -1692,7 +1700,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1707,7 +1715,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1723,7 +1731,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1738,7 +1746,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -1754,7 +1762,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_TERT_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1769,7 +1777,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_TERT_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -1785,7 +1793,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -1800,7 +1808,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -1822,7 +1830,7 @@
.codec_dai_name = "btfm_bt_sco_a2dp_slim_rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_7_RX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_wcn_ops,
/* dai link has playback support */
@@ -1838,7 +1846,7 @@
.codec_dai_name = "btfm_bt_sco_slim_tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_7_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_7_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.ops = &msm_wcn_ops,
.ignore_suspend = 1,
@@ -1852,7 +1860,7 @@
.codec_dai_name = "btfm_fm_slim_tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_8_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_8_TX,
.be_hw_params_fixup = msm_ext_be_hw_params_fixup,
.init = &msm_wcn_init,
.ops = &msm_wcn_ops,
@@ -1871,7 +1879,7 @@
.codec_dai_name = "msm_dp_audio_codec_rx_dai",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 2c3d7fc..4224289 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -20,7 +20,6 @@
#include <sound/q6core.h>
#include <linux/qdsp6v2/audio_notifier.h>
#include "qdsp6v2/msm-pcm-routing-v2.h"
-#include "msm-audio-pinctrl.h"
#include "sdm660-common.h"
#include "sdm660-external.h"
#include "../codecs/wcd9335.h"
@@ -56,7 +55,6 @@
static int msm_ext_spk_control = 1;
static struct wcd_mbhc_config *wcd_mbhc_cfg_ptr;
-bool codec_reg_done;
struct msm_asoc_wcd93xx_codec {
void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
@@ -604,23 +602,23 @@
static void *def_ext_mbhc_cal(void)
{
- void *tavil_wcd_cal;
+ void *wcd_mbhc_cal;
struct wcd_mbhc_btn_detect_cfg *btn_cfg;
u16 *btn_high;
- tavil_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
+ wcd_mbhc_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
- if (!tavil_wcd_cal)
+ if (!wcd_mbhc_cal)
return NULL;
-#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(wcd_mbhc_cal)->X) = (Y))
S(v_hs_max, 1600);
#undef S
-#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal)->X) = (Y))
S(num_btn, WCD_MBHC_DEF_BUTTONS);
#undef S
- btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal);
+ btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal);
btn_high = ((void *)&btn_cfg->_v_btn_low) +
(sizeof(btn_cfg->_v_btn_low[0]) * btn_cfg->num_btn);
@@ -633,7 +631,7 @@
btn_high[6] = 500;
btn_high[7] = 500;
- return tavil_wcd_cal;
+ return wcd_mbhc_cal;
}
static inline int param_is_mask(int p)
@@ -747,11 +745,11 @@
msm_bt_sample_rate_put),
};
-static int msm_slim_get_ch_from_beid(int32_t be_id)
+static int msm_slim_get_ch_from_beid(int32_t id)
{
int ch_id = 0;
- switch (be_id) {
+ switch (id) {
case MSM_BACKEND_DAI_SLIMBUS_0_RX:
ch_id = SLIM_RX_0;
break;
@@ -821,14 +819,14 @@
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
- switch (dai_link->be_id) {
+ switch (dai_link->id) {
case MSM_BACKEND_DAI_SLIMBUS_0_RX:
case MSM_BACKEND_DAI_SLIMBUS_1_RX:
case MSM_BACKEND_DAI_SLIMBUS_2_RX:
case MSM_BACKEND_DAI_SLIMBUS_3_RX:
case MSM_BACKEND_DAI_SLIMBUS_4_RX:
case MSM_BACKEND_DAI_SLIMBUS_6_RX:
- idx = msm_slim_get_ch_from_beid(dai_link->be_id);
+ idx = msm_slim_get_ch_from_beid(dai_link->id);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
slim_rx_cfg[idx].bit_format);
rate->min = rate->max = slim_rx_cfg[idx].sample_rate;
@@ -837,7 +835,7 @@
case MSM_BACKEND_DAI_SLIMBUS_0_TX:
case MSM_BACKEND_DAI_SLIMBUS_3_TX:
- idx = msm_slim_get_ch_from_beid(dai_link->be_id);
+ idx = msm_slim_get_ch_from_beid(dai_link->id);
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
slim_tx_cfg[idx].bit_format);
rate->min = rate->max = slim_tx_cfg[idx].sample_rate;
@@ -938,15 +936,15 @@
__func__, ret);
goto err_ch_map;
}
- if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_5_RX) {
+ if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_5_RX) {
pr_debug("%s: rx_5_ch=%d\n", __func__,
slim_rx_cfg[5].channels);
rx_ch_count = slim_rx_cfg[5].channels;
- } else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_2_RX) {
+ } else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_2_RX) {
pr_debug("%s: rx_2_ch=%d\n", __func__,
slim_rx_cfg[2].channels);
rx_ch_count = slim_rx_cfg[2].channels;
- } else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_6_RX) {
+ } else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_6_RX) {
pr_debug("%s: rx_6_ch=%d\n", __func__,
slim_rx_cfg[6].channels);
rx_ch_count = slim_rx_cfg[6].channels;
@@ -973,19 +971,19 @@
goto err_ch_map;
}
/* For <codec>_tx1 case */
- if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_0_TX)
+ if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_0_TX)
user_set_tx_ch = slim_tx_cfg[0].channels;
/* For <codec>_tx3 case */
- else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_1_TX)
+ else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_1_TX)
user_set_tx_ch = slim_tx_cfg[1].channels;
- else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_4_TX)
+ else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_4_TX)
user_set_tx_ch = msm_vi_feed_tx_ch;
else
user_set_tx_ch = tx_ch_cnt;
- pr_debug("%s: msm_slim_0_tx_ch(%d) user_set_tx_ch(%d) tx_ch_cnt(%d), be_id (%d)\n",
+ pr_debug("%s: msm_slim_0_tx_ch(%d) user_set_tx_ch(%d) tx_ch_cnt(%d), id (%d)\n",
__func__, slim_tx_cfg[0].channels, user_set_tx_ch,
- tx_ch_cnt, dai_link->be_id);
+ tx_ch_cnt, dai_link->id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
user_set_tx_ch, tx_ch, 0, 0);
@@ -1097,8 +1095,8 @@
goto end;
}
- pr_debug("%s: tx_ch_cnt(%d) be_id %d\n",
- __func__, tx_ch_cnt, dai_link->be_id);
+ pr_debug("%s: tx_ch_cnt(%d) id %d\n",
+ __func__, tx_ch_cnt, dai_link->id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
tx_ch_cnt, tx_ch, 0, 0);
@@ -1479,6 +1477,79 @@
{"MIC BIAS4", NULL, "MCLK"},
};
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card)
+{
+ const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+ struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
+ void *mbhc_calibration;
+
+ rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+ if (!rtd) {
+ dev_err(card->dev,
+ "%s: snd_soc_get_pcm_runtime for %s failed!\n",
+ __func__, be_dl_name);
+ ret = -EINVAL;
+ goto err_pcm_runtime;
+ }
+
+ mbhc_calibration = def_ext_mbhc_cal();
+ if (!mbhc_calibration) {
+ ret = -ENOMEM;
+ goto err_mbhc_cal;
+ }
+ wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+ ret = tasha_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+ if (ret) {
+ dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+ __func__, ret);
+ goto err_hs_detect;
+ }
+ return 0;
+
+err_hs_detect:
+ kfree(mbhc_calibration);
+err_mbhc_cal:
+err_pcm_runtime:
+ return ret;
+}
+
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card)
+{
+ const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+ struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
+ void *mbhc_calibration;
+
+ rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+ if (!rtd) {
+ dev_err(card->dev,
+ "%s: snd_soc_get_pcm_runtime for %s failed!\n",
+ __func__, be_dl_name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mbhc_calibration = def_ext_mbhc_cal();
+ if (!mbhc_calibration) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+ ret = tavil_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+ if (ret) {
+ dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+ __func__, ret);
+ goto err_free_mbhc_cal;
+ }
+ return 0;
+
+err_free_mbhc_cal:
+ kfree(mbhc_calibration);
+err:
+ return ret;
+}
+
/**
* msm_audrx_init - Audio init function of sound card instantiate.
*
@@ -1495,7 +1566,7 @@
snd_soc_codec_get_dapm(codec);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
+ struct snd_soc_component *aux_comp;
struct snd_card *card;
struct snd_info_entry *entry;
struct msm_asoc_mach_data *pdata =
@@ -1678,13 +1749,20 @@
* Send speaker configuration only for WSA8810.
* Defalut configuration is for WSA8815.
*/
+ pr_debug("%s: Number of aux devices: %d\n",
+ __func__, rtd->card->num_aux_devs);
+
if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
- if (rtd_aux && rtd_aux->component)
- if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
- !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+ if (rtd->card->num_aux_devs &&
+ !list_empty(&rtd->card->aux_comp_list)) {
+ aux_comp = list_first_entry(&rtd->card->aux_comp_list,
+ struct snd_soc_component, list_aux);
+ if (!strcmp(aux_comp->name, WSA8810_NAME_1) ||
+ !strcmp(aux_comp->name, WSA8810_NAME_2)) {
tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1);
tavil_set_spkr_gain_offset(rtd->codec,
RX_GAIN_OFFSET_M1P5_DB);
+ }
}
card = rtd->card->snd_card;
entry = snd_info_create_subdir(card->module, "codecs",
@@ -1692,18 +1770,21 @@
if (!entry) {
pr_debug("%s: Cannot create codecs module entry\n",
__func__);
- pdata->codec_root = NULL;
goto done;
}
pdata->codec_root = entry;
tavil_codec_info_create_codec_entry(pdata->codec_root, codec);
} else {
- if (rtd_aux && rtd_aux->component)
- if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
- !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+ if (rtd->card->num_aux_devs &&
+ !list_empty(&rtd->card->aux_comp_list)) {
+ aux_comp = list_first_entry(&rtd->card->aux_comp_list,
+ struct snd_soc_component, list_aux);
+ if (!strcmp(aux_comp->name, WSA8810_NAME_1) ||
+ !strcmp(aux_comp->name, WSA8810_NAME_2)) {
tasha_set_spkr_mode(rtd->codec, SPKR_MODE_1);
tasha_set_spkr_gain_offset(rtd->codec,
RX_GAIN_OFFSET_M1P5_DB);
+ }
}
card = rtd->card->snd_card;
entry = snd_info_create_subdir(card->module, "codecs",
@@ -1711,50 +1792,17 @@
if (!entry) {
pr_debug("%s: Cannot create codecs module entry\n",
__func__);
- ret = 0;
- goto err_snd_module;
+ goto done;
}
pdata->codec_root = entry;
tasha_codec_info_create_codec_entry(pdata->codec_root, codec);
tasha_mbhc_zdet_gpio_ctrl(msm_config_hph_en0_gpio, rtd->codec);
}
-
- wcd_mbhc_cfg_ptr->calibration = def_ext_mbhc_cal();
- if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
- if (wcd_mbhc_cfg_ptr->calibration) {
- pdata->codec = codec;
- ret = tavil_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
- if (ret < 0)
- pr_err("%s: Failed to intialise mbhc %d\n",
- __func__, ret);
- } else {
- pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
- __func__);
- ret = -ENOMEM;
- goto err_mbhc_cal;
- }
- } else {
- if (wcd_mbhc_cfg_ptr->calibration) {
- pdata->codec = codec;
- ret = tasha_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
- if (ret < 0)
- pr_err("%s: Failed to intialise mbhc %d\n",
- __func__, ret);
- } else {
- pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
- __func__);
- ret = -ENOMEM;
- goto err_mbhc_cal;
- }
-
- }
- codec_reg_done = true;
done:
+ msm_set_codec_reg_done(true);
return 0;
-err_snd_module:
err_afe_cfg:
-err_mbhc_cal:
return ret;
}
EXPORT_SYMBOL(msm_audrx_init);
diff --git a/sound/soc/msm/sdm660-external.h b/sound/soc/msm/sdm660-external.h
index acf5735..d53e7c7 100644
--- a/sound/soc/msm/sdm660-external.h
+++ b/sound/soc/msm/sdm660-external.h
@@ -30,6 +30,8 @@
int snd_card_val);
int msm_ext_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card);
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card);
#ifdef CONFIG_SND_SOC_EXT_CODEC
int msm_ext_cdc_init(struct platform_device *, struct msm_asoc_mach_data *,
struct snd_soc_card **, struct wcd_mbhc_config *);
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 802137b..14e7308 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -539,11 +539,11 @@
return 0;
}
-static int int_mi2s_get_idx_from_beid(int32_t be_id)
+static int int_mi2s_get_idx_from_beid(int32_t id)
{
int idx = 0;
- switch (be_id) {
+ switch (id) {
case MSM_BACKEND_DAI_INT0_MI2S_RX:
idx = INT0_MI2S;
break;
@@ -596,13 +596,13 @@
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
- switch (dai_link->be_id) {
+ switch (dai_link->id) {
case MSM_BACKEND_DAI_INT0_MI2S_RX:
case MSM_BACKEND_DAI_INT2_MI2S_TX:
case MSM_BACKEND_DAI_INT3_MI2S_TX:
case MSM_BACKEND_DAI_INT4_MI2S_RX:
case MSM_BACKEND_DAI_INT5_MI2S_TX:
- idx = int_mi2s_get_idx_from_beid(dai_link->be_id);
+ idx = int_mi2s_get_idx_from_beid(dai_link->id);
rate->min = rate->max = int_mi2s_cfg[idx].sample_rate;
channels->min = channels->max =
int_mi2s_cfg[idx].channels;
@@ -625,7 +625,7 @@
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- switch (dai_link->be_id) {
+ switch (dai_link->id) {
case MSM_BACKEND_DAI_SLIMBUS_7_RX:
case MSM_BACKEND_DAI_SLIMBUS_7_TX:
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
@@ -982,11 +982,11 @@
return 0;
}
-static int int_mi2s_get_port_id(int be_id)
+static int int_mi2s_get_port_id(int id)
{
int afe_port_id;
- switch (be_id) {
+ switch (id) {
case MSM_BACKEND_DAI_INT0_MI2S_RX:
afe_port_id = AFE_PORT_ID_INT0_MI2S_RX;
break;
@@ -1003,7 +1003,7 @@
afe_port_id = AFE_PORT_ID_INT5_MI2S_TX;
break;
default:
- pr_err("%s: Invalid be_id: %d\n", __func__, be_id);
+ pr_err("%s: Invalid id: %d\n", __func__, id);
afe_port_id = -EINVAL;
}
@@ -1073,7 +1073,7 @@
int port_id = 0;
int index;
- port_id = int_mi2s_get_port_id(rtd->dai_link->be_id);
+ port_id = int_mi2s_get_port_id(rtd->dai_link->id);
if (port_id < 0) {
dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
ret = port_id;
@@ -1303,7 +1303,7 @@
card = rtd->card->snd_card;
if (!codec_root)
- codec_root = snd_register_module_info(card->module, "codecs",
+ codec_root = snd_info_create_subdir(card->module, "codecs",
card->proc_root);
if (!codec_root) {
pr_debug("%s: Cannot create codecs module entry\n",
@@ -1314,6 +1314,7 @@
msm_dig_codec_info_create_codec_entry(codec_root, dig_cdc);
msm_anlg_codec_info_create_codec_entry(codec_root, ana_cdc);
done:
+ msm_set_codec_reg_done(true);
return 0;
}
@@ -1323,7 +1324,7 @@
struct snd_soc_dapm_context *dapm =
snd_soc_codec_get_dapm(codec);
struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux;
+ struct snd_soc_component *aux_comp;
struct snd_card *card;
snd_soc_add_codec_controls(codec, msm_sdw_controls,
@@ -1342,16 +1343,22 @@
* Send speaker configuration only for WSA8810.
* Default configuration is for WSA8815.
*/
- if (rtd_aux && rtd_aux->component)
- if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
- !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
+ pr_debug("%s: Number of aux devices: %d\n",
+ __func__, rtd->card->num_aux_devs);
+ if (rtd->card->num_aux_devs &&
+ !list_empty(&rtd->card->aux_comp_list)) {
+ aux_comp = list_first_entry(&rtd->card->aux_comp_list,
+ struct snd_soc_component, list_aux);
+ if (!strcmp(aux_comp->name, WSA8810_NAME_1) ||
+ !strcmp(aux_comp->name, WSA8810_NAME_2)) {
msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1);
msm_sdw_set_spkr_gain_offset(rtd->codec,
RX_GAIN_OFFSET_M1P5_DB);
+ }
}
card = rtd->card->snd_card;
if (!codec_root)
- codec_root = snd_register_module_info(card->module, "codecs",
+ codec_root = snd_info_create_subdir(card->module, "codecs",
card->proc_root);
if (!codec_root) {
pr_debug("%s: Cannot create codecs module entry\n",
@@ -1396,8 +1403,8 @@
goto exit;
}
- dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) be_id %d\n",
- __func__, tx_ch_cnt, dai_link->be_id);
+ dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) id %d\n",
+ __func__, tx_ch_cnt, dai_link->id);
ret = snd_soc_dai_set_channel_map(cpu_dai,
tx_ch_cnt, tx_ch, rx_ch_cnt, rx_ch);
@@ -1669,7 +1676,7 @@
.dpcm_capture = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA1
},
{/* hw:x,1 */
.name = MSM_DAILINK_NAME(Media2),
@@ -1686,7 +1693,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA2,
},
{/* hw:x,2 */
.name = "VoiceMMode1",
@@ -1703,7 +1710,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_VOICEMMODE1,
+ .id = MSM_FRONTEND_DAI_VOICEMMODE1,
},
{/* hw:x,3 */
.name = "MSM VoIP",
@@ -1720,7 +1727,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_VOIP,
+ .id = MSM_FRONTEND_DAI_VOIP,
},
{/* hw:x,4 */
.name = MSM_DAILINK_NAME(ULL),
@@ -1736,7 +1743,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA3,
},
/* Hostless PCM purpose */
{/* hw:x,5 */
@@ -1792,7 +1799,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA4,
},
{/* hw:x,9*/
.name = "AUXPCM Hostless",
@@ -1873,7 +1880,7 @@
.ignore_suspend = 1,
/* this dai link has playback support */
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA5,
},
/* LSM FE */
{/* hw:x,14 */
@@ -1890,7 +1897,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM1,
+ .id = MSM_FRONTEND_DAI_LSM1,
},
{/* hw:x,15 */
.name = MSM_DAILINK_NAME(Compress2),
@@ -1905,16 +1912,17 @@
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.ignore_suspend = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA7,
},
{/* hw:x,16 */
- .name = MSM_DAILINK_NAME(Compress3),
- .stream_name = "Compress3",
+ .name = MSM_DAILINK_NAME(MultiMedia10),
+ .stream_name = "MultiMedia10",
.cpu_dai_name = "MultiMedia10",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
@@ -1922,7 +1930,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA10,
},
{/* hw:x,17 */
.name = MSM_DAILINK_NAME(ULL_NOIRQ),
@@ -1939,7 +1947,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA8,
},
{/* hw:x,18 */
.name = "HDMI_RX_HOSTLESS",
@@ -1971,7 +1979,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_VOICEMMODE2,
+ .id = MSM_FRONTEND_DAI_VOICEMMODE2,
},
{/* hw:x,20 */
.name = "Listen 2 Audio Service",
@@ -1987,7 +1995,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM2,
+ .id = MSM_FRONTEND_DAI_LSM2,
},
{/* hw:x,21 */
.name = "Listen 3 Audio Service",
@@ -2003,7 +2011,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM3,
+ .id = MSM_FRONTEND_DAI_LSM3,
},
{/* hw:x,22 */
.name = "Listen 4 Audio Service",
@@ -2019,7 +2027,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM4,
+ .id = MSM_FRONTEND_DAI_LSM4,
},
{/* hw:x,23 */
.name = "Listen 5 Audio Service",
@@ -2035,7 +2043,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM5,
+ .id = MSM_FRONTEND_DAI_LSM5,
},
{/* hw:x,24 */
.name = "Listen 6 Audio Service",
@@ -2051,7 +2059,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM6
+ .id = MSM_FRONTEND_DAI_LSM6
},
{/* hw:x,25 */
.name = "Listen 7 Audio Service",
@@ -2067,7 +2075,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM7,
+ .id = MSM_FRONTEND_DAI_LSM7,
},
{/* hw:x,26 */
.name = "Listen 8 Audio Service",
@@ -2083,7 +2091,7 @@
.ignore_pmdown_time = 1,
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
- .be_id = MSM_FRONTEND_DAI_LSM8,
+ .id = MSM_FRONTEND_DAI_LSM8,
},
{/* hw:x,27 */
.name = MSM_DAILINK_NAME(Media9),
@@ -2100,7 +2108,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA9,
},
{/* hw:x,28 */
.name = MSM_DAILINK_NAME(Compress4),
@@ -2117,7 +2125,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA11,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA11,
},
{/* hw:x,29 */
.name = MSM_DAILINK_NAME(Compress5),
@@ -2134,7 +2142,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA12,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA12,
},
{/* hw:x,30 */
.name = MSM_DAILINK_NAME(Compress6),
@@ -2151,7 +2159,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA13,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA13,
},
{/* hw:x,31 */
.name = MSM_DAILINK_NAME(Compress7),
@@ -2168,7 +2176,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA14,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA14,
},
{/* hw:x,32 */
.name = MSM_DAILINK_NAME(Compress8),
@@ -2185,13 +2193,13 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{/* hw:x,33 */
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_capture = 1,
.dpcm_playback = 1,
@@ -2202,7 +2210,7 @@
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
/* this dai link has playback support */
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA16,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA16,
},
{/* hw:x,34 */
.name = "SLIMBUS_8 Hostless",
@@ -2302,7 +2310,7 @@
.ignore_suspend = 1,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
.ignore_pmdown_time = 1,
- .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+ .id = MSM_FRONTEND_DAI_MULTIMEDIA6,
},
};
@@ -2315,7 +2323,7 @@
.platform_name = "msm-pcm-hostless",
.codec_name = "msm_sdw_codec",
.codec_dai_name = "msm_sdw_vifeedback",
- .be_id = MSM_BACKEND_DAI_INT5_MI2S_TX,
+ .id = MSM_BACKEND_DAI_INT5_MI2S_TX,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
.ops = &msm_sdw_mi2s_be_ops,
.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
@@ -2338,7 +2346,7 @@
.dpcm_playback = 1,
.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
ASYNC_DPCM_SND_SOC_HW_PARAMS,
- .be_id = MSM_BACKEND_DAI_INT0_MI2S_RX,
+ .id = MSM_BACKEND_DAI_INT0_MI2S_RX,
.init = &msm_audrx_init,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
.ops = &msm_int_mi2s_be_ops,
@@ -2355,7 +2363,7 @@
.dpcm_capture = 1,
.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
ASYNC_DPCM_SND_SOC_HW_PARAMS,
- .be_id = MSM_BACKEND_DAI_INT3_MI2S_TX,
+ .id = MSM_BACKEND_DAI_INT3_MI2S_TX,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
.ops = &msm_int_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2371,7 +2379,7 @@
.dpcm_capture = 1,
.async_ops = ASYNC_DPCM_SND_SOC_PREPARE |
ASYNC_DPCM_SND_SOC_HW_PARAMS,
- .be_id = MSM_BACKEND_DAI_INT2_MI2S_TX,
+ .id = MSM_BACKEND_DAI_INT2_MI2S_TX,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
.ops = &msm_int_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2385,7 +2393,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_AFE_PCM_RX,
+ .id = MSM_BACKEND_DAI_AFE_PCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
/* this dainlink has playback support */
.ignore_pmdown_time = 1,
@@ -2400,7 +2408,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
+ .id = MSM_BACKEND_DAI_AFE_PCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2414,7 +2422,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ .id = MSM_BACKEND_DAI_INCALL_RECORD_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2428,7 +2436,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ .id = MSM_BACKEND_DAI_INCALL_RECORD_RX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2442,7 +2450,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ .id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2456,7 +2464,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+ .id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
.be_hw_params_fixup = msm_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2469,7 +2477,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_USB_RX,
+ .id = MSM_BACKEND_DAI_USB_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2483,7 +2491,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_USB_TX,
+ .id = MSM_BACKEND_DAI_USB_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
},
@@ -2496,7 +2504,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2510,7 +2518,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2524,7 +2532,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2538,7 +2546,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_SEC_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2552,7 +2560,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_TERT_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2566,7 +2574,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_TERT_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2580,7 +2588,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+ .id = MSM_BACKEND_DAI_QUAT_TDM_RX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2594,7 +2602,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+ .id = MSM_BACKEND_DAI_QUAT_TDM_TX_0,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_tdm_be_ops,
.ignore_suspend = 1,
@@ -2611,7 +2619,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_PRI_MI2S_RX,
+ .id = MSM_BACKEND_DAI_PRI_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2626,7 +2634,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_PRI_MI2S_TX,
+ .id = MSM_BACKEND_DAI_PRI_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2640,7 +2648,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2655,7 +2663,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2669,7 +2677,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2684,7 +2692,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2698,7 +2706,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2713,7 +2721,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ops = &msm_mi2s_be_ops,
.ignore_suspend = 1,
@@ -2731,7 +2739,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2746,7 +2754,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2762,7 +2770,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_SEC_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2777,7 +2785,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_SEC_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -2793,7 +2801,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_TERT_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2808,7 +2816,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_TERT_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -2824,7 +2832,7 @@
.codec_dai_name = "msm-stub-rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
+ .id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
@@ -2839,7 +2847,7 @@
.codec_dai_name = "msm-stub-tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
+ .id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
@@ -2862,7 +2870,7 @@
.codec_dai_name = "btfm_bt_sco_a2dp_slim_rx",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_7_RX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_7_RX,
.be_hw_params_fixup = msm_btfm_be_hw_params_fixup,
.ops = &msm_wcn_ops,
/* dai link has playback support */
@@ -2878,7 +2886,7 @@
.codec_dai_name = "btfm_bt_sco_slim_tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_7_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_7_TX,
.be_hw_params_fixup = msm_btfm_be_hw_params_fixup,
.ops = &msm_wcn_ops,
.ignore_suspend = 1,
@@ -2892,7 +2900,7 @@
.codec_dai_name = "btfm_fm_slim_tx",
.no_pcm = 1,
.dpcm_capture = 1,
- .be_id = MSM_BACKEND_DAI_SLIMBUS_8_TX,
+ .id = MSM_BACKEND_DAI_SLIMBUS_8_TX,
.be_hw_params_fixup = msm_btfm_be_hw_params_fixup,
.init = &msm_wcn_init,
.ops = &msm_wcn_ops,
@@ -2910,7 +2918,7 @@
.codec_dai_name = "msm_sdw_i2s_rx1",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_INT4_MI2S_RX,
+ .id = MSM_BACKEND_DAI_INT4_MI2S_RX,
.init = &msm_sdw_audrx_init,
.be_hw_params_fixup = int_mi2s_be_hw_params_fixup,
.ops = &msm_sdw_mi2s_be_ops,
@@ -2929,7 +2937,7 @@
.codec_dai_name = "msm_dp_audio_codec_rx_dai",
.no_pcm = 1,
.dpcm_playback = 1,
- .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
.be_hw_params_fixup = msm_common_be_hw_params_fixup,
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index e699760..7a5ccd8 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -134,6 +134,13 @@
u32 msm_is_mi2s_master;
};
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+ Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+ Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT
+};
+
struct auxpcm_conf {
struct mutex lock;
u32 ref_cnt;
@@ -434,6 +441,7 @@
"Five", "Six", "Seven",
"Eight"};
static const char *const hifi_text[] = {"Off", "On"};
+static const char *const qos_text[] = {"Disable", "Enable"};
static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_chs, slim_rx_ch_text);
@@ -493,10 +501,16 @@
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_tx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
+static SOC_ENUM_SINGLE_EXT_DECL(qos_vote, qos_text);
static struct platform_device *spdev;
static int msm_hifi_control;
+static int qos_vote_status;
static bool is_initial_boot;
static bool codec_reg_done;
@@ -536,10 +550,10 @@
};
static struct snd_soc_dapm_route wcd_audio_paths[] = {
- {"MIC BIAS1", NULL, "MCLK"},
- {"MIC BIAS2", NULL, "MCLK"},
- {"MIC BIAS3", NULL, "MCLK"},
- {"MIC BIAS4", NULL, "MCLK"},
+ {"MIC BIAS1", NULL, "MCLK TX"},
+ {"MIC BIAS2", NULL, "MCLK TX"},
+ {"MIC BIAS3", NULL, "MCLK TX"},
+ {"MIC BIAS4", NULL, "MCLK TX"},
};
static struct afe_clk_set mi2s_clk[MI2S_MAX] = {
@@ -2250,6 +2264,54 @@
return sample_rate;
}
+static int mi2s_auxpcm_get_format(int value)
+{
+ int format;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_auxpcm_get_format_value(int format)
+{
+ int value;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2382,6 +2444,150 @@
return 1;
}
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(aux_pcm_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ aux_pcm_rx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_auxpcm_get_format_value(aux_pcm_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int msm_aux_pcm_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = aux_pcm_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ aux_pcm_tx_cfg[idx].bit_format =
+ mi2s_auxpcm_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, aux_pcm_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_hifi_ctrl(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2432,6 +2638,72 @@
return 0;
}
+static s32 msm_qos_value(struct snd_pcm_runtime *runtime)
+{
+ s32 usecs;
+
+ if (!runtime->rate)
+ return -EINVAL;
+
+ /* take 75% of period time as the deadline */
+ usecs = (750000 / runtime->rate) * runtime->period_size;
+ usecs += ((750000 % runtime->rate) * runtime->period_size) /
+ runtime->rate;
+
+ return usecs;
+}
+
+static int msm_qos_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.enumerated.item[0] = qos_vote_status;
+
+ return 0;
+}
+
+static int msm_qos_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct snd_soc_card *card = codec->component.card;
+ const char *be_name = MSM_DAILINK_NAME(LowLatency);
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_pcm_substream *substream;
+ s32 usecs;
+
+ rtd = snd_soc_get_pcm_runtime(card, be_name);
+ if (!rtd) {
+ pr_err("%s: fail to get pcm runtime for %s\n",
+ __func__, be_name);
+ return -EINVAL;
+ }
+
+ substream = rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s: substream is null\n", __func__);
+ return -EINVAL;
+ }
+
+ qos_vote_status = ucontrol->value.enumerated.item[0];
+ if (qos_vote_status) {
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ if (!substream->runtime) {
+ pr_err("%s: runtime is null\n", __func__);
+ return -EINVAL;
+ }
+ usecs = msm_qos_value(substream->runtime);
+ if (usecs >= 0)
+ pm_qos_add_request(&substream->latency_pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY, usecs);
+ } else {
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ }
+
+ return 0;
+}
+
static const struct snd_kcontrol_new msm_snd_controls[] = {
SOC_ENUM_EXT("SLIM_0_RX Channels", slim_0_rx_chs,
msm_slim_rx_ch_get, msm_slim_rx_ch_put),
@@ -2627,8 +2899,42 @@
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+ msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+ msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+ SOC_ENUM_EXT("PRIM_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("PRIM_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("SEC_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("SEC_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("TERT_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("TERT_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
+ SOC_ENUM_EXT("QUAT_AUX_PCM_RX Format", aux_pcm_rx_format,
+ msm_aux_pcm_rx_format_get, msm_aux_pcm_rx_format_put),
+ SOC_ENUM_EXT("QUAT_AUX_PCM_TX Format", aux_pcm_tx_format,
+ msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
msm_hifi_put),
+ SOC_ENUM_EXT("MultiMedia5_RX QOS Vote", qos_vote, msm_qos_ctl_get,
+ msm_qos_ctl_put),
};
static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
@@ -2646,6 +2952,38 @@
return ret;
}
+static int msm_snd_enable_codec_ext_tx_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+
+ if (!strcmp(dev_name(codec->dev), "tavil_codec")) {
+ ret = tavil_cdc_mclk_tx_enable(codec, enable);
+ } else {
+ dev_err(codec->dev, "%s: unknown codec to enable TX ext clk\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int msm_mclk_tx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ pr_debug("%s: event = %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 1, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 0, true);
+ }
+ return 0;
+}
+
static int msm_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -2702,7 +3040,7 @@
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("MCLK TX", SND_SOC_NOPM, 0, 0,
- NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ msm_mclk_tx_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SPK("Lineout_1 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_2 amp", NULL),
@@ -2996,6 +3334,8 @@
break;
case MSM_BACKEND_DAI_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[PRIM_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3003,6 +3343,8 @@
break;
case MSM_BACKEND_DAI_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[PRIM_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[PRIM_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3010,6 +3352,8 @@
break;
case MSM_BACKEND_DAI_SEC_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[SEC_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[SEC_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3017,6 +3361,8 @@
break;
case MSM_BACKEND_DAI_SEC_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[SEC_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[SEC_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3024,6 +3370,8 @@
break;
case MSM_BACKEND_DAI_TERT_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[TERT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[TERT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3031,6 +3379,8 @@
break;
case MSM_BACKEND_DAI_TERT_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[TERT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[TERT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3038,6 +3388,8 @@
break;
case MSM_BACKEND_DAI_QUAT_AUXPCM_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_rx_cfg[QUAT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_rx_cfg[QUAT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3045,6 +3397,8 @@
break;
case MSM_BACKEND_DAI_QUAT_AUXPCM_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ aux_pcm_tx_cfg[QUAT_AUX_PCM].bit_format);
rate->min = rate->max =
aux_pcm_tx_cfg[QUAT_AUX_PCM].sample_rate;
channels->min = channels->max =
@@ -3052,48 +3406,64 @@
break;
case MSM_BACKEND_DAI_PRI_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3855,6 +4225,7 @@
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;
@@ -3883,9 +4254,6 @@
mi2s_clk[dai_id].clk_freq_in_hz =
mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
}
-
- if (!mi2s_intf_conf[dai_id].msm_is_mi2s_master)
- mi2s_clk[dai_id].clk_freq_in_hz = 0;
}
static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
@@ -4236,6 +4604,11 @@
*/
mutex_lock(&mi2s_intf_conf[index].lock);
if (++mi2s_intf_conf[index].ref_cnt == 1) {
+ /* Check if msm needs to provide the clock to the interface */
+ if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+ mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ }
ret = msm_mi2s_set_sclk(substream, true);
if (ret < 0) {
dev_err(rtd->card->dev,
@@ -4255,9 +4628,6 @@
ret = -EINVAL;
goto clk_off;
}
- /* Check if msm needs to provide the clock to the interface */
- if (!mi2s_intf_conf[index].msm_is_mi2s_master)
- fmt = SND_SOC_DAIFMT_CBM_CFM;
ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
if (ret < 0) {
pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
@@ -4756,12 +5126,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA7,
},
{
- .name = MSM_DAILINK_NAME(Compress3),
- .stream_name = "Compress3",
+ .name = MSM_DAILINK_NAME(MultiMedia10),
+ .stream_name = "MultiMedia10",
.cpu_dai_name = "MultiMedia10",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp.1",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
@@ -5032,12 +5403,13 @@
.id = MSM_FRONTEND_DAI_MULTIMEDIA15,
},
{
- .name = MSM_DAILINK_NAME(Compress9),
- .stream_name = "Compress9",
+ .name = MSM_DAILINK_NAME(ULL_NOIRQ_2),
+ .stream_name = "MM_NOIRQ_2",
.cpu_dai_name = "MultiMedia16",
- .platform_name = "msm-compress-dsp",
+ .platform_name = "msm-pcm-dsp-noirq",
.dynamic = 1,
.dpcm_playback = 1,
+ .dpcm_capture = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST,
SND_SOC_DPCM_TRIGGER_POST},
.codec_dai_name = "snd-soc-dummy-dai",
@@ -6325,16 +6697,18 @@
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,wsa-max-devs", &wsa_max_devs);
if (ret) {
- dev_dbg(&pdev->dev,
+ dev_info(&pdev->dev,
"%s: wsa-max-devs property missing in DT %s, ret = %d\n",
__func__, pdev->dev.of_node->full_name, ret);
- goto err;
+ card->num_aux_devs = 0;
+ return 0;
}
if (wsa_max_devs == 0) {
dev_warn(&pdev->dev,
"%s: Max WSA devices is 0 for this target?\n",
__func__);
- goto err;
+ card->num_aux_devs = 0;
+ return 0;
}
/* Get count of WSA device phandles for this platform */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6b23bf5..4f6c777 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3326,7 +3326,8 @@
*/
void snd_soc_card_change_online_state(struct snd_soc_card *soc_card, int online)
{
- snd_card_change_online_state(soc_card->snd_card, online);
+ if (soc_card && soc_card->snd_card)
+ snd_card_change_online_state(soc_card->snd_card, online);
}
EXPORT_SYMBOL(snd_soc_card_change_online_state);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 94ea909..d40bfef 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -423,14 +423,13 @@
kfree(data);
}
-struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
const struct snd_kcontrol *kcontrol)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
return data->wlist;
}
-EXPORT_SYMBOL(dapm_kcontrol_get_wlist);
static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_widget *widget)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index eaf18aa..a87a526 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -619,7 +619,7 @@
assoc = intf->intf_assoc;
if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
assoc->bFunctionProtocol == UAC_VERSION_3 &&
- assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+ assoc->bFunctionSubClass == FULL_ADC_3_0) {
dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
return -EINVAL;
}
diff --git a/techpack/.gitignore b/techpack/.gitignore
new file mode 100644
index 0000000..58da0b8
--- /dev/null
+++ b/techpack/.gitignore
@@ -0,0 +1,2 @@
+# ignore all subdirs except stub
+!/stub/
diff --git a/techpack/Kbuild b/techpack/Kbuild
new file mode 100644
index 0000000..3c7c8e6
--- /dev/null
+++ b/techpack/Kbuild
@@ -0,0 +1,5 @@
+techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+obj-y += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
+
+techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+header-y += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))
diff --git a/techpack/stub/Makefile b/techpack/stub/Makefile
new file mode 100644
index 0000000..184b5c7
--- /dev/null
+++ b/techpack/stub/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Wno-unused-function
+obj-y := stub.o
diff --git a/techpack/stub/include/uapi/Kbuild b/techpack/stub/include/uapi/Kbuild
new file mode 100644
index 0000000..87bfa65
--- /dev/null
+++ b/techpack/stub/include/uapi/Kbuild
@@ -0,0 +1 @@
+#Stub place holder
diff --git a/techpack/stub/stub.c b/techpack/stub/stub.c
new file mode 100644
index 0000000..6024341
--- /dev/null
+++ b/techpack/stub/stub.c
@@ -0,0 +1,3 @@
+static void _techpack_stub(void)
+{
+}
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 49e6eba..98dcc11 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -286,7 +286,7 @@
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
- _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+ _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index e33fc1d..d94179f 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -126,4 +126,13 @@
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#ifndef __fallthrough
+# if defined(__GNUC__) && __GNUC__ >= 7
+# define __fallthrough __attribute__ ((fallthrough))
+# else
+# define __fallthrough
+# endif
+#endif
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
index c808c7d..d302142 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -8,7 +8,7 @@
#include <linux/utsname.h>
#include <linux/compiler.h>
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 63UL
#define asmlinkage
#define __visible
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 5e0dea2..039636f 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -150,9 +150,9 @@
*type = INSN_RETURN;
break;
- case 0xc5: /* iret */
case 0xca: /* retf */
case 0xcb: /* retf */
+ case 0xcf: /* iret */
*type = INSN_CONTEXT_SWITCH;
break;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index e8a1f69..b8dadb0 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -757,11 +757,20 @@
insn->jump_dest->offset > orig_insn->offset))
break;
+ /* look for a relocation which references .rodata */
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
insn->len);
- if (text_rela && text_rela->sym == file->rodata->sym)
- return find_rela_by_dest(file->rodata,
- text_rela->addend);
+ if (!text_rela || text_rela->sym != file->rodata->sym)
+ continue;
+
+ /*
+ * Make sure the .rodata address isn't associated with a
+ * symbol. gcc jump tables are anonymous data.
+ */
+ if (find_symbol_containing(file->rodata, text_rela->addend))
+ continue;
+
+ return find_rela_by_dest(file->rodata, text_rela->addend);
}
return NULL;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 0d7983a..d897702 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -85,6 +85,18 @@
return NULL;
}
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
+{
+ struct symbol *sym;
+
+ list_for_each_entry(sym, &sec->symbol_list, list)
+ if (sym->type != STT_SECTION &&
+ offset >= sym->offset && offset < sym->offset + sym->len)
+ return sym;
+
+ return NULL;
+}
+
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
unsigned int len)
{
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index aa1ff65..731973e 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -79,6 +79,7 @@
struct elf *elf_open(const char *name);
struct section *find_section_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
unsigned int len);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 8efe904..9e5a02d 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1573,13 +1573,13 @@
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
- char tname[32];
+ char tname[14 + 2 * 10 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
- memset(tname, 0, 32);
+ memset(tname, 0, sizeof(tname));
td = g->threads + p*g->p.nr_threads + t;
- snprintf(tname, 32, "process%d:thread%d", p, t);
+ snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / NSEC_PER_SEC,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3af95..0b613e7 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -643,7 +643,7 @@
case -1:
if (errno == EINTR)
continue;
- /* Fall trhu */
+ __fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 20c2e64..aa9276b 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1779,15 +1779,14 @@
}
while (!ret && (ent = readdir(dir))) {
-#define MAX_NAME 100
struct evlist_test e;
- char name[MAX_NAME];
+ char name[2 * NAME_MAX + 1 + 12 + 3];
/* Names containing . are special and cannot be used directly */
if (strchr(ent->d_name, '.'))
continue;
- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
+ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events;
@@ -1795,11 +1794,10 @@
ret = test_event(&e);
if (ret)
break;
- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
ret = test_event(&e);
-#undef MAX_NAME
}
closedir(dir);
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 3eb3edb..a130901 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -702,7 +702,7 @@
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
- if (row++ == 0)
+ if (++row == 0)
goto out;
} else
row = 0;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2f3eded..5337f49 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3184,6 +3184,7 @@
case PERF_EVENT_UPDATE__SCALE:
ev_scale = (struct event_update_event_scale *) ev->data;
evsel->scale = ev_scale->scale;
+ break;
case PERF_EVENT_UPDATE__CPUS:
ev_cpus = (struct event_update_event_cpus *) ev->data;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 16c06d3..7e27207 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include <stdint.h>
#include <inttypes.h>
+#include <linux/compiler.h>
#include "../cache.h"
#include "../util.h"
@@ -63,6 +64,25 @@
INTEL_PT_STATE_FUP_NO_TIP,
};
+static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+{
+ switch (pkt_state) {
+ case INTEL_PT_STATE_NO_PSB:
+ case INTEL_PT_STATE_NO_IP:
+ case INTEL_PT_STATE_ERR_RESYNC:
+ case INTEL_PT_STATE_IN_SYNC:
+ case INTEL_PT_STATE_TNT:
+ return true;
+ case INTEL_PT_STATE_TIP:
+ case INTEL_PT_STATE_TIP_PGD:
+ case INTEL_PT_STATE_FUP:
+ case INTEL_PT_STATE_FUP_NO_TIP:
+ return false;
+ default:
+ return true;
+ };
+}
+
#ifdef INTEL_PT_STRICT
#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
@@ -91,6 +111,7 @@
bool have_tma;
bool have_cyc;
bool fixup_last_mtc;
+ bool have_last_ip;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -98,6 +119,7 @@
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
+ uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
@@ -138,6 +160,7 @@
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t timestamp_insn_cnt;
+ uint64_t sample_insn_cnt;
uint64_t stuck_ip;
int no_progress;
int stuck_ip_prd;
@@ -397,6 +420,7 @@
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
+ decoder->have_last_ip = true;
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -897,6 +921,7 @@
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
+ decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
if (err) {
@@ -1443,7 +1468,8 @@
case INTEL_PT_FUP:
decoder->pge = true;
- intel_pt_set_last_ip(decoder);
+ if (decoder->packet.count)
+ intel_pt_set_last_ip(decoder);
break;
case INTEL_PT_MODE_TSX:
@@ -1647,6 +1673,8 @@
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psbend(decoder);
if (err == -EAGAIN)
@@ -1727,8 +1755,9 @@
static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
{
- return decoder->last_ip || decoder->packet.count == 0 ||
- decoder->packet.count == 3 || decoder->packet.count == 6;
+ return decoder->packet.count &&
+ (decoder->have_last_ip || decoder->packet.count == 3 ||
+ decoder->packet.count == 6);
}
/* Walk PSB+ packets to get in sync. */
@@ -1744,6 +1773,7 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1797,6 +1827,8 @@
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
+ __fallthrough;
+
case INTEL_PT_TNT:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1837,6 +1869,7 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
@@ -1847,14 +1880,10 @@
break;
case INTEL_PT_FUP:
- if (decoder->overflow) {
- if (intel_pt_have_ip(decoder))
- intel_pt_set_ip(decoder);
- if (decoder->ip)
- return 0;
- }
- if (decoder->packet.count)
- intel_pt_set_last_ip(decoder);
+ if (intel_pt_have_ip(decoder))
+ intel_pt_set_ip(decoder);
+ if (decoder->ip)
+ return 0;
break;
case INTEL_PT_MTC:
@@ -1903,6 +1932,9 @@
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
+ intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
@@ -1928,6 +1960,8 @@
{
int err;
+ decoder->set_fup_tx_flags = false;
+
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
if (err)
@@ -2036,6 +2070,7 @@
decoder->pge = false;
decoder->continuous_period = false;
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
@@ -2044,6 +2079,7 @@
if (err)
return err;
+ decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
err = intel_pt_walk_psb(decoder);
@@ -2062,7 +2098,7 @@
static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
{
- uint64_t est = decoder->timestamp_insn_cnt << 1;
+ uint64_t est = decoder->sample_insn_cnt << 1;
if (!decoder->cbr || !decoder->max_non_turbo_ratio)
goto out;
@@ -2070,7 +2106,7 @@
est *= decoder->max_non_turbo_ratio;
est /= decoder->cbr;
out:
- return decoder->timestamp + est;
+ return decoder->sample_timestamp + est;
}
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@@ -2086,7 +2122,9 @@
err = intel_pt_sync(decoder);
break;
case INTEL_PT_STATE_NO_IP:
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
+ decoder->ip = 0;
/* Fall through */
case INTEL_PT_STATE_ERR_RESYNC:
err = intel_pt_sync_ip(decoder);
@@ -2123,15 +2161,24 @@
}
} while (err == -ENOLINK);
- decoder->state.err = err ? intel_pt_ext_err(err) : 0;
- decoder->state.timestamp = decoder->timestamp;
+ if (err) {
+ decoder->state.err = intel_pt_ext_err(err);
+ decoder->state.from_ip = decoder->ip;
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ } else {
+ decoder->state.err = 0;
+ if (intel_pt_sample_time(decoder->pkt_state)) {
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ }
+ }
+
+ decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
- if (err)
- decoder->state.from_ip = decoder->ip;
-
return &decoder->state;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 4f7b320..7528ae4 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/compiler.h>
#include "intel-pt-pkt-decoder.h"
@@ -498,6 +499,7 @@
case INTEL_PT_FUP:
if (!(packet->count))
return snprintf(buf, buf_len, "%s no ip", name);
+ __fallthrough;
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_MTC:
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d281ae2..7ea13f4 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -618,6 +618,67 @@
return ret ? : -ENOENT;
}
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+ struct map *map, unsigned long offs)
+{
+ struct symbol *sym;
+ u64 addr = tp->address + tp->offset - offs;
+
+ sym = map__find_symbol(map, addr);
+ if (!sym)
+ return -ENOENT;
+
+ if (strcmp(sym->name, tp->symbol)) {
+ /* If we have no realname, use symbol for it */
+ if (!tp->realname)
+ tp->realname = tp->symbol;
+ else
+ free(tp->symbol);
+ tp->symbol = strdup(sym->name);
+ if (!tp->symbol)
+ return -ENOMEM;
+ }
+ tp->offset = addr - sym->start;
+ tp->address -= offs;
+
+ return 0;
+}
+
+/*
+ * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
+ * and generate new symbols with suffixes such as .constprop.N or .isra.N
+ * etc. Since those symbols are not recorded in DWARF, we have to find
+ * correct generated symbols from offline ELF binary.
+ * For online kernel or uprobes we don't need this because those are
+ * rebased on _text, or already a section relative address.
+ */
+static int
+post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *pathname)
+{
+ struct map *map;
+ unsigned long stext = 0;
+ int i, ret = 0;
+
+ /* Prepare a map for offline binary */
+ map = dso__new_map(pathname);
+ if (!map || get_text_start_address(pathname, &stext) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", pathname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, stext);
+ if (ret < 0)
+ break;
+ }
+ map__put(map);
+
+ return ret;
+}
+
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec)
{
@@ -645,18 +706,31 @@
return ret;
}
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
- int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ struct debuginfo *dinfo)
{
+ Dwarf_Addr text_offs = 0;
int i, ret = 0;
char *mod_name = NULL;
+ struct map *map;
if (!module)
return 0;
- mod_name = find_module_name(module);
+ map = get_target_map(module, false);
+ if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", module);
+ return -EINVAL;
+ }
+ mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, (unsigned long)text_offs);
+ if (ret < 0)
+ break;
tevs[i].point.module =
strdup(mod_name ? mod_name : module);
if (!tevs[i].point.module) {
@@ -666,6 +740,8 @@
}
free(mod_name);
+ map__put(map);
+
return ret;
}
@@ -679,7 +755,8 @@
/* Skip post process if the target is an offline kernel */
if (symbol_conf.ignore_vmlinux_buildid)
- return 0;
+ return post_process_offline_probe_trace_events(tevs, ntevs,
+ symbol_conf.vmlinux_name);
reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
@@ -722,7 +799,7 @@
static int post_process_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, const char *module,
- bool uprobe)
+ bool uprobe, struct debuginfo *dinfo)
{
int ret;
@@ -730,7 +807,8 @@
ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
else if (module)
/* Currently ref_reloc_sym based probe is not for drivers */
- ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+ ret = post_process_module_probe_trace_events(tevs, ntevs,
+ module, dinfo);
else
ret = post_process_kernel_probe_trace_events(tevs, ntevs);
@@ -774,30 +852,27 @@
}
}
- debuginfo__delete(dinfo);
-
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
ret = post_process_probe_trace_events(pev, *tevs, ntevs,
- pev->target, pev->uprobes);
+ pev->target, pev->uprobes, dinfo);
if (ret < 0 || ret == ntevs) {
+ pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
+ ntevs = 0;
}
- if (ret != ntevs)
- return ret < 0 ? ret : ntevs;
- ntevs = 0;
- /* Fall through */
}
+ debuginfo__delete(dinfo);
+
if (ntevs == 0) { /* No error but failed to find probe point. */
pr_warning("Probe point '%s' not found.\n",
synthesize_perf_probe_point(&pev->point));
return -ENOENT;
- }
- /* Error path : ntevs < 0 */
- pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
- if (ntevs < 0) {
+ } else if (ntevs < 0) {
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
if (ntevs == -EBADF)
pr_warning("Warning: No dwarf info found in the vmlinux - "
"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe..0d9d6e0 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@
}
/* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset)
{
int n, i;
Elf32_Word shndx;
@@ -1530,6 +1531,8 @@
if (!shdr)
return -ENOENT;
*offs = shdr->sh_addr;
+ if (adjust_offset)
+ *offs -= shdr->sh_offset;
}
}
return 0;
@@ -1543,16 +1546,12 @@
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- bool reloc = false;
-retry:
+ /* We always need to relocate the address for aranges */
+ if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+ addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
- addr += baseaddr;
- reloc = true;
- goto retry;
- }
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558..2956c51 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@
int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
struct perf_probe_point *ppt);
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset);
+
/* Find a line range */
int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 6516e22..82d28c6 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,6 +1,6 @@
libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index bcae659..efb5377 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -269,6 +269,7 @@
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
+ __fallthrough;
case '!':
if (buf) {
*(buf + len++) = *node->p;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 7f7e072..f4e3444 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -21,6 +21,8 @@
case 'b': case 'B':
if (*p)
goto out_err;
+
+ __fallthrough;
case '\0':
return length;
default:
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 40585f5..ddec5c5 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -93,7 +93,7 @@
{
DIR *proc;
int max_threads = 32, items, i;
- char path[256];
+ char path[NAME_MAX + 1 + 6];
struct dirent *dirent, **namelist = NULL;
struct thread_map *threads = thread_map__alloc(max_threads);
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a9..763f37f 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@
if (chdir(cwd) != 0)
err(1, "chdir to private tmpfs");
-
- if (umount2(".", MNT_DETACH) != 0)
- err(1, "detach private tmpfs");
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@
err(1, "chown");
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
err(1, "chmod");
-}
+ }
capng_get_caps_process();
@@ -384,7 +381,7 @@
} else {
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
exec_other_validate_cap("./validate_cap_sgidnonroot",
- false, false, true, false);
+ false, false, true, false);
if (fork_wait()) {
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 1dd087d..111e09c 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -47,6 +47,22 @@
return vfio_group;
}
+static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep)
+{
+ bool ret, (*fn)(struct vfio_group *, struct file *);
+
+ fn = symbol_get(vfio_external_group_match_file);
+ if (!fn)
+ return false;
+
+ ret = fn(group, filep);
+
+ symbol_put(vfio_external_group_match_file);
+
+ return ret;
+}
+
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{
void (*fn)(struct vfio_group *);
@@ -171,18 +187,13 @@
if (!f.file)
return -EBADF;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
-
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
-
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
+ if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+ f.file))
continue;
list_del(&kvg->node);
@@ -196,7 +207,7 @@
mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
+ fdput(f);
kvm_vfio_update_coherency(dev);